[ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] &&
MODPROBECONF=/etc/modprobe.conf
+sanitize_parameters() {
+ for i in DIR DIR1 DIR2 MOUNT MOUNT1 MOUNT2
+ do
+ local path=${!i}
+ if [ -d "$path" ]; then
+ eval export $i=$(echo $path | sed -r 's/\/+$//g')
+ fi
+ done
+}
assert_DIR () {
- local failed=""
- [[ $DIR/ = $MOUNT/* ]] || \
- { failed=1 && echo "DIR=$DIR not in $MOUNT. Aborting."; }
- [[ $DIR1/ = $MOUNT1/* ]] || \
- { failed=1 && echo "DIR1=$DIR1 not in $MOUNT1. Aborting."; }
- [[ $DIR2/ = $MOUNT2/* ]] || \
- { failed=1 && echo "DIR2=$DIR2 not in $MOUNT2. Aborting"; }
+ local failed=""
+ [[ $DIR/ = $MOUNT/* ]] ||
+ { failed=1 && echo "DIR=$DIR not in $MOUNT. Aborting."; }
+ [[ $DIR1/ = $MOUNT1/* ]] ||
+ { failed=1 && echo "DIR1=$DIR1 not in $MOUNT1. Aborting."; }
+ [[ $DIR2/ = $MOUNT2/* ]] ||
+ { failed=1 && echo "DIR2=$DIR2 not in $MOUNT2. Aborting"; }
- [ -n "$failed" ] && exit 99 || true
+ [ -n "$failed" ] && exit 99 || true
}
usage() {
print_summary () {
trap 0
[ -z "$DEFAULT_SUITES"] && return 0
- [ "$TESTSUITE" == "lfsck" ] && return 0
[ -n "$ONLY" ] && echo "WARNING: ONLY is set to $(echo $ONLY)"
local details
local form="%-13s %-17s %-9s %s %s\n"
export TEST_FAILED=false
export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false}
export RPC_MODE=${RPC_MODE:-false}
+ export DO_CLEANUP=${DO_CLEANUP:-true}
export MKE2FS=$MKE2FS
if [ -z "$MKE2FS" ]; then
fi
fi
- export LFSCK_BIN=${LFSCK_BIN:-lfsck}
- export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after each test suite
- export FSCK_MAX_ERR=4 # File system errors left uncorrected
+ export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after test suite
+ export FSCK_MAX_ERR=4 # File system errors left uncorrected
export ZFS=${ZFS:-zfs}
export ZPOOL=${ZPOOL:-zpool}
export DIR2
export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests}
export AT_MAX_PATH
+ export LDEV=${LDEV:-"$LUSTRE/scripts/ldev"}
+ [ ! -f "$LDEV" ] && export LDEV=$(which ldev 2> /dev/null)
if [ "$ACCEPTOR_PORT" ]; then
export PORT_OPT="--port $ACCEPTOR_PORT"
export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3}
export OSD_TRACK_DECLARES_LBUG=${OSD_TRACK_DECLARES_LBUG:-"yes"}
- # command line
+ # command line
- while getopts "rvwf:" opt $*; do
- case $opt in
- f) CONFIG=$OPTARG;;
- r) REFORMAT=--reformat;;
- v) VERBOSE=true;;
- w) WRITECONF=writeconf;;
- \?) usage;;
- esac
- done
+ while getopts "rvwf:" opt $*; do
+ case $opt in
+ f) CONFIG=$OPTARG;;
+ r) REFORMAT=yes;;
+ v) VERBOSE=true;;
+ w) WRITECONF=writeconf;;
+ \?) usage;;
+ esac
+ done
- shift $((OPTIND - 1))
- ONLY=${ONLY:-$*}
+ shift $((OPTIND - 1))
+ ONLY=${ONLY:-$*}
# print the durations of each test if "true"
DDETAILS=${DDETAILS:-false}
lustre_build_version() {
local facet=${1:-client}
- # lustre: 2.6.52
- # kernel: patchless_client
- # build: v2_6_92_0-gadb3ee4-2.6.32-431.29.2.el6_lustre.x86_64
+ # lustre: 2.8.52
local VER=$(do_facet $facet $LCTL get_param -n version 2> /dev/null |
awk '/lustre: / { print $2 }')
# lctl 2.6.50
load_module lov/lov
load_module mgc/mgc
load_module obdecho/obdecho
- if ! client_only; then
- SYMLIST=/proc/kallsyms
- grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; }
- grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; }
- grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; }
+ if ! client_only; then
+ SYMLIST=/proc/kallsyms
+ grep -q crc16 $SYMLIST ||
+ { modprobe crc16 2>/dev/null || true; }
+ grep -q -w jbd2 $SYMLIST ||
+ { modprobe jbd2 2>/dev/null || true; }
load_module lfsck/lfsck
- [ "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS
+ [ "$LQUOTA" != "no" ] &&
+ load_module quota/lquota $LQUOTAOPTS
if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
modprobe zfs
load_module osd-zfs/osd_zfs
if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
grep -q exportfs_decode_fh $SYMLIST ||
{ modprobe exportfs 2> /dev/null || true; }
+ grep -q -w mbcache $SYMLIST ||
+ { modprobe mbcache 2>/dev/null || true; }
load_module ../ldiskfs/ldiskfs
load_module osd-ldiskfs/osd_ldiskfs
fi
load_module osp/osp
load_module ofd/ofd
load_module osp/osp
- fi
+ fi
load_module llite/lustre
llite_lloop_enabled && load_module llite/llite_lloop
echo -n $size
}
+fs_inode_ksize() {
+ local facet=${1:-$SINGLEMDS}
+ local fstype=$(facet_fstype $facet)
+ local size=0
+ case $fstype in
+ ldiskfs) size=4;; # ~4KB per inode
+ zfs) size=11;; # 10 to 11KB per inode
+ esac
+
+ echo -n $size
+}
+
check_gss_daemon_nodes() {
local list=$1
dname=$2
facet_number() {
local facet=$1
- if [ $facet == mgs ]; then
+ if [ $facet == mgs ] || [ $facet == client ]; then
return 1
fi
echo -n $device
}
+running_in_vm() {
+ local virt=$(virt-what 2> /dev/null)
+
+ [ $? -eq 0 ] && [ -n "$virt" ] && { echo $virt; return; }
+
+ virt=$(dmidecode -s system-product-name | awk '{print $1}')
+
+ case $virt in
+ VMware|KVM|VirtualBox|Parallels) echo ${virt,,} ;;
+ *) ;;
+ esac
+}
+
#
# Re-read the partition table on failover partner host.
# After a ZFS storage pool is created on a shared device, the partition table
local name=$3
do_nodes $nodes "$LCTL get_param -n obdfilter.$device.$name \
- osd-*.$device.$name 2>&1" | grep -v 'Found no match'
+ osd-*.$device.$name 2>&1" | grep -v 'error:'
}
set_osd_param() {
local value=$4
do_nodes $nodes "$LCTL set_param -n obdfilter.$device.$name=$value \
- osd-*.$device.$name=$value 2>&1" | grep -v 'Found no match'
+ osd-*.$device.$name=$value 2>&1" | grep -v 'error:'
}
set_debug_size () {
if [ -f /sys/devices/system/cpu/possible ]; then
local cpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/possible)+1))
else
- local cpus=$(getconf _NPROCESSORS_CONF)
+ local cpus=$(getconf _NPROCESSORS_CONF 2>/dev/null)
fi
# bug 19944, adjust size to be -gt num_possible_cpus()
set_default_debug_nodes $node
}
+set_hostid () {
+ local hostid=${1:-$(hostid)}
+
+ if [ ! -s /etc/hostid ]; then
+ printf $(echo -n $hostid |
+ sed 's/\(..\)\(..\)\(..\)\(..\)/\\x\4\\x\3\\x\2\\x\1/') >/etc/hostid
+ fi
+}
+
# Facet functions
mount_facets () {
local facets=${1:-$(get_facets)}
local opt=${facet}_opt
local mntpt=$(facet_mntpt $facet)
local opts="${!opt} $@"
+ local fstype=$(facet_fstype $facet)
+ local devicelabel
+
+ module_loaded lustre || load_modules
if [ $(facet_fstype $facet) == ldiskfs ] &&
! do_facet $facet test -b ${!dev}; then
import_zpool $facet || return ${PIPESTATUS[0]}
fi
+ case $fstype in
+ ldiskfs)
+ devicelabel=$(do_facet ${facet} "$E2LABEL ${!dev}");;
+ zfs)
+ devicelabel=$(do_facet ${facet} "$ZFS get -H -o value \
+ lustre:svname ${!dev}");;
+ *)
+ error "unknown fstype!";;
+ esac
+
echo "Starting ${facet}: $opts ${!dev} $mntpt"
# for testing LU-482 error handling in mount_facets() and test_0a()
if [ -f $TMP/test-lu482-trigger ]; then
return $RC
fi
+ health=$(do_facet ${facet} "$LCTL get_param -n health_check")
+ if [[ "$health" != "healthy" ]]; then
+ error "$facet is in a unhealthy state"
+ fi
+
set_default_debug_facet $facet
if [[ $facet == mds* ]]; then
if [[ $opts =~ .*nosvc.* ]]; then
echo "Start ${!dev} without service"
else
- local fstype=$(facet_fstype $facet)
case $fstype in
ldiskfs)
esac
fi
+ # commit the device label change to disk
+ if [[ $devicelabel =~ (:[a-zA-Z]{3}[0-9]{4}) ]]; then
+ echo "Commit the device label on ${!dev}"
+ do_facet $facet "sync; sync; sync"
+ sleep 5
+ fi
+
+
label=$(devicelabel ${facet} ${!dev})
[ -z "$label" ] && echo no label for ${!dev} && exit 1
eval export ${facet}_svc=${label}
# start facet device options
start() {
- local facet=$1
- shift
- local device=$1
- shift
- eval export ${facet}_dev=${device}
- eval export ${facet}_opt=\"$@\"
+ local facet=$1
+ shift
+ local device=$1
+ shift
+ eval export ${facet}_dev=${device}
+ eval export ${facet}_opt=\"$@\"
- local varname=${facet}failover_dev
- if [ -n "${!varname}" ] ; then
- eval export ${facet}failover_dev=${!varname}
- else
- eval export ${facet}failover_dev=$device
- fi
+ local varname=${facet}failover_dev
+ if [ -n "${!varname}" ] ; then
+ eval export ${facet}failover_dev=${!varname}
+ else
+ eval export ${facet}failover_dev=$device
+ fi
local mntpt=$(facet_mntpt $facet)
do_facet ${facet} mkdir -p $mntpt
return $rc
}
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-# it should be removed whenever we drop the interoperability for such
-# server.
-restore_quota_old() {
- local mntpt=${1:-$MOUNT}
- local quota_type=$(quota_type $FSNAME | grep MDT | cut -d "=" -f2)
- if [ ! "$old_QUOTA_TYPE" ] ||
- [ "$quota_type" = "$old_QUOTA_TYPE" ]; then
- return
- fi
- quota_save_version $old_QUOTA_TYPE
-}
-
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-# it should be removed whenever we drop the interoperability for such
-# server.
-setup_quota_old(){
- local mntpt=$1
-
- # no quota enforcement for now and accounting works out of the box
- return
-
- # We need save the original quota_type params, and restore them after testing
-
- # Suppose that quota type the same on mds and ost
- local quota_type=$(quota_type | grep MDT | cut -d "=" -f2)
- [ ${PIPESTATUS[0]} -eq 0 ] || error "quota_type failed!"
- echo "[HOST:$HOSTNAME] [old_quota_type:$quota_type] [new_quota_type:$QUOTA_TYPE]"
- if [ "$quota_type" != "$QUOTA_TYPE" ]; then
- export old_QUOTA_TYPE=$quota_type
- quota_save_version $QUOTA_TYPE
- else
- qtype=$(tr -c -d "ug" <<< $QUOTA_TYPE)
- $LFS quotacheck -$qtype $mntpt || error "quotacheck has failed for $type"
- fi
-
- local quota_usrs=$QUOTA_USERS
-
- # get_filesystem_size
- local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}')
- local blk_soft=$((disksz + 1024))
- local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
-
- local Inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}')
- local i_soft=$Inodes
- local i_hard=$((i_soft + i_soft / 20))
-
- echo "Total disk size: $disksz block-softlimit: $blk_soft block-hardlimit:
- $blk_hard inode-softlimit: $i_soft inode-hardlimit: $i_hard"
-
- local cmd
- for usr in $quota_usrs; do
- echo "Setting up quota on $HOSTNAME:$mntpt for $usr..."
- for type in u g; do
- cmd="$LFS setquota -$type $usr -b $blk_soft -B $blk_hard -i $i_soft -I $i_hard $mntpt"
- echo "+ $cmd"
- eval $cmd || error "$cmd FAILED!"
- done
- # display the quota status
- echo "Quota settings for $usr : "
- $LFS quota -v -u $usr $mntpt || true
- done
-}
-
# get mdt quota type
mdt_quota_type() {
local varsvc=${SINGLEMDS}_svc
# restore old quota type settings
restore_quota() {
- if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then
- restore_quota_old
- return
- fi
-
if [ "$old_MDT_QUOTA_TYPE" ]; then
do_facet mgs $LCTL conf_param \
$FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
}
setup_quota(){
- if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then
- setup_quota_old $1
- return
- fi
-
local mntpt=$1
# save old quota type & set new quota type
}
zconf_mount() {
- local client=$1
- local mnt=$2
- local opts=${3:-$MOUNT_OPTS}
- opts=${opts:+-o $opts}
- local flags=${4:-$MOUNT_FLAGS}
-
- local device=$MGSNID:/$FSNAME
- if [ -z "$mnt" -o -z "$FSNAME" ]; then
- echo Bad zconf mount command: opt=$flags $opts dev=$device mnt=$mnt
- exit 1
- fi
-
- echo "Starting client: $client: $flags $opts $device $mnt"
- do_node $client mkdir -p $mnt
- do_node $client $MOUNT_CMD $flags $opts $device $mnt || return 1
-
- set_default_debug_nodes $client
+ local client=$1
+ local mnt=$2
+ local opts=${3:-$MOUNT_OPTS}
+ opts=${opts:+-o $opts}
+ local flags=${4:-$MOUNT_FLAGS}
+
+ local device=$MGSNID:/$FSNAME$FILESET
+ if [ -z "$mnt" -o -z "$FSNAME" ]; then
+ echo "Bad mount command: opt=$flags $opts dev=$device " \
+ "mnt=$mnt"
+ exit 1
+ fi
+
+ echo "Starting client: $client: $flags $opts $device $mnt"
+ do_node $client mkdir -p $mnt
+ if [ -n "$FILESET" -a -z "$SKIP_FILESET" ];then
+ do_node $client $MOUNT_CMD $flags $opts $MGSNID:/$FSNAME \
+ $mnt || return 1
+ #disable FILESET if not supported
+ do_nodes $client lctl get_param -n \
+ mdc.$FSNAME-MDT0000*.import | grep -q subtree ||
+ device=$MGSNID:/$FSNAME
+ do_node $client mkdir -p $mnt/$FILESET
+ do_node $client "! grep -q $mnt' ' /proc/mounts ||
+ umount $mnt"
+ fi
+ do_node $client $MOUNT_CMD $flags $opts $device $mnt || return 1
+
+ set_default_debug_nodes $client
- return 0
+ return 0
}
zconf_umount() {
local client=$1
local mnt=$2
local force
- local busy
+ local busy
local need_kill
[ "$3" ] && force=-f
echo \\\$(hostname) env are INSANE!;
exit 1;
fi"
- [ $? -eq 0 ] || rc=1
+ [ $? -eq 0 ] || rc=1
done
return $rc
}
sanity_mount_check_servers () {
- [ "$CLIENTONLY" ] &&
+ [ -n "$CLIENTONLY" ] &&
{ echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true
echo Checking servers environments
# mount clients if not mouted
zconf_mount_clients() {
- local clients=$1
- local mnt=$2
- local opts=${3:-$MOUNT_OPTS}
- opts=${opts:+-o $opts}
- local flags=${4:-$MOUNT_FLAGS}
-
- local device=$MGSNID:/$FSNAME
- if [ -z "$mnt" -o -z "$FSNAME" ]; then
- echo Bad zconf mount command: opt=$flags $opts dev=$device mnt=$mnt
- exit 1
- fi
-
- echo "Starting client $clients: $flags $opts $device $mnt"
-
- do_nodes $clients "
+ local clients=$1
+ local mnt=$2
+ local opts=${3:-$MOUNT_OPTS}
+ opts=${opts:+-o $opts}
+ local flags=${4:-$MOUNT_FLAGS}
+
+ local device=$MGSNID:/$FSNAME$FILESET
+ if [ -z "$mnt" -o -z "$FSNAME" ]; then
+ echo "Bad conf mount command: opt=$flags $opts dev=$device " \
+ "mnt=$mnt"
+ exit 1
+ fi
+
+ echo "Starting client $clients: $flags $opts $device $mnt"
+ if [ -n "$FILESET" -a ! -n "$SKIP_FILESET" ]; then
+ do_nodes $clients "! grep -q $mnt' ' /proc/mounts ||
+ umount $mnt"
+ do_nodes $clients $MOUNT_CMD $flags $opts $MGSNID:/$FSNAME \
+ $mnt || return 1
+ #disable FILESET if not supported
+ do_nodes $clients lctl get_param -n \
+ mdc.$FSNAME-MDT0000*.import | grep -q subtree ||
+ device=$MGSNID:/$FSNAME
+ do_nodes $clients mkdir -p $mnt/$FILESET
+ do_nodes $clients "! grep -q $mnt' ' /proc/mounts ||
+ umount $mnt"
+ fi
+
+ do_nodes $clients "
running=\\\$(mount | grep -c $mnt' ');
rc=0;
if [ \\\$running -eq 0 ] ; then
fi;
exit \\\$rc" || return ${PIPESTATUS[0]}
- echo "Started clients $clients: "
- do_nodes $clients "mount | grep $mnt' '"
+ echo "Started clients $clients: "
+ do_nodes $clients "mount | grep $mnt' '"
- set_default_debug_nodes $clients
+ set_default_debug_nodes $clients
- return 0
+ return 0
}
zconf_umount_clients() {
wait_for_function --quiet "! ping -w 3 -c 1 $host" 5 1 && return 0
echo "waiting for $host to fail attempts=$attempts"
[ $i -lt $attempts ] || \
- { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
+ { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
done
}
# only for remote client
check_client_load () {
- local client=$1
- local var=$(node_var_name $client)_load
- local TESTLOAD=run_${!var}.sh
-
- ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1
-
- # bug 18914: try to connect several times not only when
- # check ps, but while check_catastrophe also
- local tries=3
- local RC=254
- while [ $RC = 254 -a $tries -gt 0 ]; do
- let tries=$tries-1
- # assume success
- RC=0
- if ! check_catastrophe $client; then
- RC=${PIPESTATUS[0]}
- if [ $RC -eq 254 ]; then
- # FIXME: not sure how long we shuold sleep here
- sleep 10
- continue
- fi
- echo "check catastrophe failed: RC=$RC "
- return $RC
- fi
- done
- # We can continue try to connect if RC=254
- # Just print the warning about this
- if [ $RC = 254 ]; then
- echo "got a return status of $RC from do_node while checking catastrophe on $client"
- fi
-
- # see if the load is still on the client
- tries=3
- RC=254
- while [ $RC = 254 -a $tries -gt 0 ]; do
- let tries=$tries-1
- # assume success
- RC=0
- if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then
- RC=${PIPESTATUS[0]}
- sleep 30
- fi
- done
- if [ $RC = 254 ]; then
- echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client"
- # see if we can diagnose a bit why this is
- fi
+ local client=$1
+ local var=$(node_var_name $client)_load
+ local testload=run_${!var}.sh
+
+ ps -C $testload | grep $client || return 1
+
+ # bug 18914: try to connect several times not only when
+ # check ps, but while check_node_health also
+
+ local tries=3
+ local RC=254
+ while [ $RC = 254 -a $tries -gt 0 ]; do
+ let tries=$tries-1
+ # assume success
+ RC=0
+ if ! check_node_health $client; then
+ RC=${PIPESTATUS[0]}
+ if [ $RC -eq 254 ]; then
+ # FIXME: not sure how long we shuold sleep here
+ sleep 10
+ continue
+ fi
+ echo "check node health failed: RC=$RC "
+ return $RC
+ fi
+ done
+ # We can continue try to connect if RC=254
+ # Just print the warning about this
+ if [ $RC = 254 ]; then
+ echo "got a return status of $RC from do_node while checking " \
+ "node health on $client"
+ fi
+
+ # see if the load is still on the client
+ tries=3
+ RC=254
+ while [ $RC = 254 -a $tries -gt 0 ]; do
+ let tries=$tries-1
+ # assume success
+ RC=0
+ if ! do_node $client \
+ "ps auxwww | grep -v grep | grep -q $testload"; then
+ RC=${PIPESTATUS[0]}
+ sleep 30
+ fi
+ done
+ if [ $RC = 254 ]; then
+ echo "got a return status of $RC from do_node while checking " \
+ "(node health and 'ps') the client load on $client"
+ # see if we can diagnose a bit why this is
+ fi
- return $RC
+ return $RC
}
check_client_loads () {
local clients=${1//,/ }
}
wait_update_facet() {
+ local verbose=
+ [ "$1" = "--verbose" ] && verbose="$1" && shift
+
local facet=$1
shift
- wait_update $(facet_active_host $facet) "$@"
+ wait_update $verbose $(facet_active_host $facet) "$@"
}
sync_all_data() {
do_nodes $(comma_list $(mdts_nodes)) \
- "lctl set_param -n osd*.*MDT*.force_sync 1"
+ "lctl set_param -n osd*.*MDT*.force_sync=1"
do_nodes $(comma_list $(osts_nodes)) \
- "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 |
+ "lctl set_param -n osd*.*OS*.force_sync=1" 2>&1 |
grep -v 'Found no match'
}
return $rc
}
+lfs_df_check() {
+ local clients=${1:-$CLIENTS}
+
+ if [ -z "$clients" ]; then
+ $LFS df $MOUNT
+ else
+ $PDSH $clients "$LFS df $MOUNT" > /dev/null
+ fi
+}
+
+
clients_up() {
- # not every config has many clients
- sleep 1
- if [ ! -z "$CLIENTS" ]; then
- $PDSH $CLIENTS "stat -f $MOUNT" > /dev/null
- else
- stat -f $MOUNT > /dev/null
- fi
+ # not every config has many clients
+ sleep 1
+ lfs_df_check
}
client_up() {
- local client=$1
- # usually checked on particular client or locally
- sleep 1
- if [ ! -z "$client" ]; then
- $PDSH $client "stat -f $MOUNT" > /dev/null
- else
- stat -f $MOUNT > /dev/null
- fi
+ # usually checked on particular client or locally
+ sleep 1
+ lfs_df_check $1
}
client_evicted() {
}
client_reconnect_try() {
- uname -n >> $MOUNT/recon
- if [ -z "$CLIENTS" ]; then
- df $MOUNT; uname -n >> $MOUNT/recon
- else
- do_nodes $CLIENTS "df $MOUNT; uname -n >> $MOUNT/recon" > /dev/null
- fi
- echo Connected clients:
- cat $MOUNT/recon
- ls -l $MOUNT/recon > /dev/null
- rm $MOUNT/recon
+ local f=$MOUNT/recon
+
+ uname -n >> $f
+ if [ -z "$CLIENTS" ]; then
+ $LFS df $MOUNT; uname -n >> $f
+ else
+ do_nodes $CLIENTS "$LFS df $MOUNT; uname -n >> $f" > /dev/null
+ fi
+ echo "Connected clients: $(cat $f)"
+ ls -l $f > /dev/null
+ rm $f
}
client_reconnect() {
replay_barrier() {
local facet=$1
do_facet $facet "sync; sync; sync"
- df $MOUNT
+ $LFS df $MOUNT
# make sure there will be no seq change
local clients=${CLIENTS:-$HOSTNAME}
facet_failover $* || error "failover: $?"
wait_clients_import_state "$clients" "$facets" FULL
- clients_up || error "post-failover df: $?"
+ clients_up || error "post-failover stat: $?"
}
fail_nodf() {
change_active $facet
wait_for_facet $facet
mount_facet $facet -o abort_recovery
- clients_up || echo "first df failed: $?"
- clients_up || error "post-failover df: $?"
+ clients_up || echo "first stat failed: $?"
+ clients_up || error "post-failover stat: $?"
}
do_lmc() {
[[ $facet = mgs ]] && combined_mgs_mds && facet="mds1"
local var=${facet}_MOUNT
- eval mntpt=${!var:-${MOUNT%/*}/$facet}
+ eval mntpt=${!var:-${MOUNT}-$facet}
echo -n $mntpt
}
mount_ldiskfs() {
local facet=$1
local dev=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
local opts
if ! do_facet $facet test -b $dev; then
unmount_ldiskfs() {
local facet=$1
local dev=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
do_facet $facet $UMOUNT $mnt
}
mount_zfs() {
local facet=$1
local ds=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
local canmnt
local mntpt
unmount_zfs() {
local facet=$1
local ds=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
local var_mntpt=mz_$(var_name ${facet}_$ds)_mountpoint
local var_canmnt=mz_$(var_name ${facet}_$ds)_canmount
local mntpt=${!var_mntpt}
mount_fstype() {
local facet=$1
+ local mnt=$2
local fstype=$(facet_fstype $facet)
- mount_$fstype $facet
+ mount_$fstype $facet $mnt
}
unmount_fstype() {
local facet=$1
+ local mnt=$2
local fstype=$(facet_fstype $facet)
- unmount_$fstype $facet
+ unmount_$fstype $facet $mnt
}
########
zconf_umount_clients $clients $MOUNT "$*" || true
[ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true
- [ "$CLIENTONLY" ] && return
+ [ -n "$CLIENTONLY" ] && return
+
# The add fn does rm ${facet}active file, this would be enough
# if we use do_facet <facet> only after the facet added, but
# currently we use do_facet mds in local.sh
}
cleanup_echo_devs () {
- local devs=$($LCTL dl | grep echo | awk '{print $4}')
+ trap 0
+ local dev
+ local devs=$($LCTL dl | grep echo | awk '{print $4}')
- for dev in $devs; do
- $LCTL --device $dev cleanup
- $LCTL --device $dev detach
- done
+ for dev in $devs; do
+ $LCTL --device $dev cleanup
+ $LCTL --device $dev detach
+ done
}
cleanupall() {
opts+=${LDLM_TIMEOUT:+" --param=sys.ldlm_timeout=$LDLM_TIMEOUT"}
if [ $type == MDS ]; then
- opts+=${SECLEVEL:+" --param=mdt.sec_level"}
opts+=${MDSCAPA:+" --param-mdt.capa=$MDSCAPA"}
opts+=${STRIPE_BYTES:+" --param=lov.stripesize=$STRIPE_BYTES"}
opts+=${STRIPES_PER_OBJ:+" --param=lov.stripecount=$STRIPES_PER_OBJ"}
fi
if [ $type == OST ]; then
- opts+=${SECLEVEL:+" --param=ost.sec_level"}
opts+=${OSSCAPA:+" --param=ost.capa=$OSSCAPA"}
if [ $fstype == ldiskfs ]; then
formatall() {
stopall
+ # Set hostid for ZFS/SPL zpool import protection
+ # (Assumes MDS version is also OSS version)
+ if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.8.54) ];
+ then
+ do_rpc_nodes "$(comma_list $(remote_nodes_list))" set_hostid
+ fi
+
# We need ldiskfs here, may as well load them all
load_modules
- [ "$CLIENTONLY" ] && return
+ [ -n "$CLIENTONLY" ] && return
echo Formatting mgs, mds, osts
if ! combined_mgs_mds ; then
format_mgs
}
setupall() {
- nfs_client_mode && return
+ nfs_client_mode && return
cifs_client_mode && return
- sanity_mount_check ||
- error "environments are insane!"
+ sanity_mount_check || error "environments are insane!"
- load_modules
+ load_modules
- if [ -z "$CLIENTONLY" ]; then
- echo Setup mgs, mdt, osts
- echo $WRITECONF | grep -q "writeconf" && \
- writeconf_all
- if ! combined_mgs_mds ; then
+ if [ -z "$CLIENTONLY" ]; then
+ echo Setup mgs, mdt, osts
+ echo $WRITECONF | grep -q "writeconf" && writeconf_all
+ if ! combined_mgs_mds ; then
start mgs $(mgsdevname) $MGS_MOUNT_OPTS
- fi
+ fi
for num in `seq $MDSCOUNT`; do
DEVNAME=$(mdsdevname $num)
}
init_facet_vars () {
- [ "$CLIENTONLY" ] && return 0
+ [ -n "$CLIENTONLY" ] && return 0
local facet=$1
shift
local device=$1
do_facet mgs "$LCTL conf_param $PARAM='$FINAL'" ||
error "conf_param $PARAM failed"
- wait_update $(facet_host $myfacet) "$TEST" "$FINAL" ||
+ wait_update_facet $myfacet "$TEST" "$FINAL" ||
error "check $PARAM failed!"
}
init_param_vars () {
- remote_mds_nodsh ||
- TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
+ TIMEOUT=$(lctl get_param -n timeout)
+ TIMEOUT=${TIMEOUT:-20}
+
+ remote_mds_nodsh && log "Using TIMEOUT=$TIMEOUT" && return 0
+ TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
log "Using TIMEOUT=$TIMEOUT"
osc_ensure_active $SINGLEMDS $TIMEOUT
local mntpt=$1
local mounted=$(mount | grep " $mntpt ")
- if [ "$CLIENTONLY" ]; then
+ if [ -n "$CLIENTONLY" ]; then
# bug 18021
# CLIENTONLY should not depend on *_HOST settings
local mgc=$($LCTL device_list | awk '/MGC/ {print $4}')
# in theory someone could create a new,
# client-only config file that assumed lustre was already
# configured and didn't set the MGSNID. If MGSNID is not set,
- # then we should use the mgs nid currently being used
+ # then we should use the mgs nid currently being used
# as the default value. bug 18021
[[ x$MGSNID = x ]] &&
MGSNID=${mgc//MGC/}
return 0
fi
- local myMGS_host=$mgs_HOST
+ local myMGS_host=$mgs_HOST
if [ "$NETTYPE" = "ptl" ]; then
- myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//)
+ myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//)
fi
echo Checking config lustre mounted on $mntpt
}
check_and_setup_lustre() {
- nfs_client_mode && return
+ sanitize_parameters
+ nfs_client_mode && return
cifs_client_mode && return
- local MOUNTED=$(mounted_lustre_filesystems)
-
- local do_check=true
- # 1.
- # both MOUNT and MOUNT2 are not mounted
- if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then
- [ "$REFORMAT" ] && formatall
- # setupall mounts both MOUNT and MOUNT2 (if MOUNT_2 is set)
- setupall
- is_mounted $MOUNT || error "NAME=$NAME not mounted"
- export I_MOUNTED=yes
- do_check=false
+ local MOUNTED=$(mounted_lustre_filesystems)
+
+ local do_check=true
+ # 1.
+ # both MOUNT and MOUNT2 are not mounted
+ if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then
+ [ "$REFORMAT" = "yes" ] && formatall
+ # setupall mounts both MOUNT and MOUNT2 (if MOUNT_2 is set)
+ setupall
+ is_mounted $MOUNT || error "NAME=$NAME not mounted"
+ export I_MOUNTED=yes
+ do_check=false
# 2.
# MOUNT2 is mounted
elif is_mounted $MOUNT2; then
restore_mount $MOUNT2
export I_MOUNTED2=yes
fi
- fi
+ fi
# 5.
# MOUNT is mounted MOUNT2 is not mounted
set_default_debug_nodes $(comma_list $(nodes_list))
fi
- if [ $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ] ; then
+ if [ -z "$CLIENTONLY" -a $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ]; then
local facets=""
[ "$(facet_fstype ost1)" = "ldiskfs" ] &&
facets="$(get_facets OST)"
set_flavor_all $SEC
fi
- #Enable remote MDT create for testing
- for num in $(seq $MDSCOUNT); do
- do_facet mds$num \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
- 2>/dev/null
- done
+ if [ -z "$CLIENTONLY" ]; then
+ # Enable remote MDT create for testing
+ for num in $(seq $MDSCOUNT); do
+ do_facet mds$num \
+ lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
+ 2>/dev/null
+ done
+ fi
if [ "$ONLY" == "setup" ]; then
exit 0
return 0
}
-# Run e2fsck on MDT and OST(s) to generate databases used for lfsck.
-generate_db() {
- local i
- local ostidx
- local dev
- local node
-
- [[ $(lustre_version_code $SINGLEMDS) -ne $(version_code 2.2.0) ]] ||
- { skip "Lustre 2.2.0 lacks the patch for LU-1255"; exit 0; }
-
- check_shared_dir $SHARED_DIRECTORY ||
- error "$SHARED_DIRECTORY isn't a shared directory"
-
- export MDSDB=$SHARED_DIRECTORY/mdsdb
- export OSTDB=$SHARED_DIRECTORY/ostdb
-
- # DNE is not supported, so when running e2fsck on a DNE filesystem,
- # we only pass master MDS parameters.
- run_e2fsck $MDTNODE $MDTDEV "-n --mdsdb $MDSDB"
-
- i=0
- ostidx=0
- OSTDB_LIST=""
- for node in $(osts_nodes); do
- for dev in ${OSTDEVS[i]}; do
- run_e2fsck $node $dev "-n --mdsdb $MDSDB --ostdb $OSTDB-$ostidx"
- OSTDB_LIST="$OSTDB_LIST $OSTDB-$ostidx"
- ostidx=$((ostidx + 1))
- done
- i=$((i + 1))
- done
-}
-
-# Run lfsck on server node if lfsck can't be found on client (LU-2571)
-run_lfsck_remote() {
- local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT"
- local client=$1
- local mounted=true
- local rc=0
-
- #Check if lustre is already mounted
- do_rpc_nodes $client is_mounted $MOUNT || mounted=false
- if ! $mounted; then
- zconf_mount $client $MOUNT ||
- error "failed to mount Lustre on $client"
- fi
- #Run lfsck
- echo $cmd
- do_node $client $cmd || rc=$?
- #Umount if necessary
- if ! $mounted; then
- zconf_umount $client $MOUNT ||
- error "failed to unmount Lustre on $client"
- fi
-
- [ $rc -le $FSCK_MAX_ERR ] ||
- error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
- echo "lfsck finished with rc=$rc"
-
- return $rc
-}
-
run_lfsck() {
- local facets="client $SINGLEMDS"
- local found=false
- local facet
- local node
- local rc=0
-
- for facet in $facets; do
- node=$(facet_active_host $facet)
- if check_progs_installed $node $LFSCK_BIN; then
- found=true
- break
- fi
+ do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
+ $LCTL set_param printk=+lfsck
+ do_facet $SINGLEMDS "$LCTL lfsck_start -M $FSNAME-MDT0000 -r -A -t all"
+
+ for k in $(seq $MDSCOUNT); do
+ # wait up to 10+1 minutes for LFSCK to complete
+ wait_update_facet --verbose mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" 600 ||
+ error "MDS${k} layout isn't the expected 'completed'"
+ wait_update_facet --verbose mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" 60 ||
+ error "MDS${k} namespace isn't the expected 'completed'"
done
- ! $found && error "None of \"$facets\" supports lfsck"
-
- run_lfsck_remote $node || rc=$?
-
- rm -rvf $MDSDB* $OSTDB* || true
- return $rc
+ local rep_mdt=$(do_nodes $(comma_list $(mdts_nodes)) \
+ $LCTL get_param -n mdd.$FSNAME-*.lfsck_* |
+ awk '/repaired/ { print $2 }' | calc_sum)
+ local rep_ost=$(do_nodes $(comma_list $(osts_nodes)) \
+ $LCTL get_param -n obdfilter.$FSNAME-*.lfsck_* |
+ awk '/repaired/ { print $2 }' | calc_sum)
+ local repaired=$((rep_mdt + rep_ost))
+ [ $repaired -eq 0 ] ||
+ error "lfsck repaired $rep_mdt MDT and $rep_ost OST errors"
}
dump_file_contents() {
}
check_and_cleanup_lustre() {
- if [ "$LFSCK_ALWAYS" = "yes" -a "$TESTSUITE" != "lfsck" ]; then
- get_svr_devs
- generate_db
- run_lfsck
- fi
+ if [ "$LFSCK_ALWAYS" = "yes" -a "$TESTSUITE" != "sanity-lfsck" -a \
+ "$TESTSUITE" != "sanity-scrub" ]; then
+ run_lfsck
+ fi
if is_mounted $MOUNT; then
- [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]* ||
- error "remove sub-test dirs failed"
+ if $DO_CLEANUP; then
+ [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]* ||
+ error "remove sub-test dirs failed"
+ else
+ echo "skip cleanup"
+ fi
[ "$ENABLE_QUOTA" ] && restore_quota || true
fi
}
set_nodes_failloc () {
- do_nodes $(comma_list $1) lctl set_param fail_val=0 fail_loc=$2
+ local fv=${3:-0}
+ do_nodes $(comma_list $1) lctl set_param fail_val=$fv fail_loc=$2
}
cancel_lru_locks() {
- $LCTL mark "cancel_lru_locks $1 start"
-
- if [ $1 != "MGC" ]; then
- for d in $(lctl get_param -N ldlm.namespaces.*.lru_size |
- egrep -i $1); do
- $LCTL set_param -n $d=clear
- done
- $LCTL get_param ldlm.namespaces.*.lock_unused_count | egrep -i $1 |
- grep -v '=0'
- else
- for d in $(find \
- /{proc,sys}/fs/lustre/ldlm/namespaces/*$1*/lru_size \
- 2> /dev/null); do
- echo "clear" > $d
- done
-
- for d in $(find \
- /{proc,sys}/fs/lustre/ldlm/namespaces/*$1*/lock_unused_count \
- 2> /dev/null); do
- if [ $(cat $d) != 0 ]; then
- echo "ldlm.namespaces.$(echo "$d" |
- cut -f 7 -d'/').lock_unused_count=$(cat $d)"
- fi
- done
- fi
-
- $LCTL mark "cancel_lru_locks $1 stop"
+ #$LCTL mark "cancel_lru_locks $1 start"
+ $LCTL set_param -n ldlm.namespaces.*$1*.lru_size=clear
+ $LCTL get_param ldlm.namespaces.*$1*.lock_unused_count | grep -v '=0'
+ #$LCTL mark "cancel_lru_locks $1 stop"
}
default_lru_size()
}
debugsave() {
- DEBUGSAVE="$(lctl get_param -n debug)"
+ DEBUGSAVE="$(lctl get_param -n debug)"
+ DEBUGSAVE_SERVER=$(do_facet $SINGLEMDS "$LCTL get_param -n debug")
}
debugrestore() {
- [ -n "$DEBUGSAVE" ] && \
- do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug=\\\"${DEBUGSAVE}\\\";"
- DEBUGSAVE=""
+ [ -n "$DEBUGSAVE" ] &&
+ do_nodes $CLIENTS "$LCTL set_param debug=\\\"${DEBUGSAVE}\\\""||
+ true
+ DEBUGSAVE=""
+
+ [ -n "$DEBUGSAVE_SERVER" ] &&
+ do_nodes $(comma_list $(all_server_nodes)) \
+ "$LCTL set_param debug=\\\"${DEBUGSAVE_SERVER}\\\"" ||
+ true
+ DEBUGSAVE_SERVER=""
}
debug_size_save() {
# prints bash call stack
print_stack_trace() {
+ local skip=${1:-1}
echo " Trace dump:"
- for (( i=1; i < ${#BASH_LINENO[*]} ; i++ )) ; do
- local s=${BASH_SOURCE[$i]}
- local l=${BASH_LINENO[$i-1]}
- local f=${FUNCNAME[$i]}
- echo " = $s:$l:$f()"
+ for (( i=$skip; i < ${#BASH_LINENO[*]} ; i++ )) ; do
+ local src=${BASH_SOURCE[$i]}
+ local lineno=${BASH_LINENO[$i-1]}
+ local funcname=${FUNCNAME[$i]}
+ echo " = $src:$lineno:$funcname()"
done
}
-##################################
-# Test interface
-##################################
-
-error_noexit() {
+report_error() {
local TYPE=${TYPE:-"FAIL"}
local dump=true
dump=false
fi
-
log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ "
- print_stack_trace >&2
-
+ (print_stack_trace 2) >&2
mkdir -p $LOGDIR
# We need to dump the logs on all nodes
if $dump; then
echo "$@" > $LOGDIR/err
fi
fi
+
+ # cleanup the env for failed tests
+ reset_fail_loc
+}
+
+##################################
+# Test interface
+##################################
+
+error_noexit() {
+ report_error "$@"
}
exit_status () {
}
error() {
- error_noexit "$@"
+ report_error "$@"
exit 1
}
error_exit() {
- error "$@"
+ report_error "$@"
+ exit 1
}
# use only if we are ignoring failures for this test, bugno required.
error_ignore() {
local TYPE="IGNORE ($1)"
shift
- error_noexit "$@"
+ report_error "$@"
}
error_and_remount() {
- error_noexit "$@"
+ report_error "$@"
remount_client $MOUNT
exit 1
}
+# Throw an error if it's not running in vm - usually for performance
+# verification
+error_not_in_vm() {
+ local virt=$(running_in_vm)
+ if [[ -n "$virt" ]]; then
+ echo "running in VM '$virt', ignore error"
+ error_ignore env=$virt "$@"
+ else
+ error "$@"
+ fi
+}
+
skip_env () {
$FAIL_ON_SKIP_ENV && error false $@ || skip $@
}
}
reset_fail_loc () {
- echo -n "Resetting fail_loc on all nodes..."
- do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 \
- fail_val=0 2>/dev/null || true"
- echo done.
+ echo -n "Resetting fail_loc on all nodes..."
+ do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 \
+ fail_val=0 2>/dev/null" || true
+ echo done.
}
#
-# Log a message (on all nodes) padded with "=" before and after.
+# Log a message (on all nodes) padded with "=" before and after.
# Also appends a timestamp and prepends the testsuite name.
-#
+#
EQUALS="===================================================================================================="
banner() {
log "$msg== $(date +"%H:%M:%S (%s)")"
}
+check_dmesg_for_errors() {
+ local res
+ local errors="VFS: Busy inodes after unmount of\|\
+ldiskfs_check_descriptors: Checksum for group 0 failed\|\
+group descriptors corrupted"
+
+ res=$(do_nodes $(comma_list $(nodes_list)) "dmesg" | grep "$errors")
+ [ -z "$res" ] && return 0
+ echo "Kernel error detected: $res"
+ return 1
+}
+
#
# Run a single test function and cleanup after it.
#
local SAVE_UMASK=`umask`
umask 0022
+ if ! grep -q $DIR /proc/mounts; then
+ $SETUP
+ fi
+
banner "test $testnum: $message"
test_${testnum} || error "test_$testnum failed with $?"
cd $SAVE_PWD
reset_fail_loc
check_grant ${testnum} || error "check_grant $testnum failed with $?"
- check_catastrophe || error "LBUG/LASSERT detected"
+ check_node_health
+ check_dmesg_for_errors || error "Error in dmesg detected"
if [ "$PARALLEL" != "yes" ]; then
ps auxww | grep -v grep | grep -q multiop &&
error "multiop still running"
unset tdir
unset tfile
umask $SAVE_UMASK
+ $CLEANUP
return 0
}
remote_mds_nodsh()
{
- [ "$CLIENTONLY" ] && return 0 || true
- remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ]
+ [ -n "$CLIENTONLY" ] && return 0 || true
+ remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ]
}
require_dsh_mds()
{
- remote_mds_nodsh && echo "SKIP: $TESTSUITE: remote MDS with nodsh" && \
- MSKIPPED=1 && return 1
- return 0
+ remote_mds_nodsh && echo "SKIP: $TESTSUITE: remote MDS with nodsh" &&
+ MSKIPPED=1 && return 1
+ return 0
}
remote_ost ()
remote_ost_nodsh()
{
- [ "$CLIENTONLY" ] && return 0 || true
- remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
+ [ -n "$CLIENTONLY" ] && return 0 || true
+ remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
}
require_dsh_ost()
remote_mgs_nodsh()
{
- [ "$CLIENTONLY" ] && return 0 || true
- local MGS
- MGS=$(facet_host mgs)
- remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
+ [ -n "$CLIENTONLY" ] && return 0 || true
+ local MGS
+ MGS=$(facet_host mgs)
+ remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
}
local_mode ()
rnodes=${rnodes//,/ }
local -a nodes=($rnodes)
- local num=${#nodes[@]}
+ local num=${#nodes[@]}
local i=$((RANDOM * num * 2 / 65536))
echo ${nodes[i]}
}
client_only () {
- [ "$CLIENTONLY" ] || [ "$CLIENTMODSONLY" = yes ]
-}
-
-is_patchless ()
-{
- lctl get_param version | grep -q patchless
+ [ -n "$CLIENTONLY" ] || [ "x$CLIENTMODSONLY" = "xyes" ]
}
check_versions () {
done
}
-check_catastrophe() {
+check_node_health() {
local nodes=${1:-$(comma_list $(nodes_list))}
- do_nodes $nodes "rc=0;
-val=\\\$($LCTL get_param -n catastrophe 2>&1);
-if [[ \\\$? -eq 0 && \\\$val -ne 0 ]]; then
- echo \\\$(hostname -s): \\\$val;
- rc=\\\$val;
-fi;
-exit \\\$rc"
+ for node in ${nodes//,/ }; do
+ check_network "$node" 5
+ if [ $? -eq 0 ]; then
+ do_node $node "rc=0;
+ val=\\\$($LCTL get_param -n catastrophe 2>&1);
+ if [[ \\\$? -eq 0 && \\\$val -ne 0 ]]; then
+ echo \\\$(hostname -s): \\\$val;
+ rc=\\\$val;
+ fi;
+ exit \\\$rc" || error "$node:LBUG/LASSERT detected"
+ fi
+ done
}
mdsrate_cleanup () {
}
add_pool_to_list () {
- local fsname=${1%%.*}
- local poolname=${1##$fsname.}
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
- local listvar=${fsname}_CREATED_POOLS
- eval export ${listvar}=$(expand_list ${!listvar} $poolname)
+ local listvar=${fsname}_CREATED_POOLS
+ local temp=${listvar}=$(expand_list ${!listvar} $poolname)
+ eval export $temp
}
remove_pool_from_list () {
- local fsname=${1%%.*}
- local poolname=${1##$fsname.}
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
- local listvar=${fsname}_CREATED_POOLS
- eval export ${listvar}=$(exclude_items_from_list ${!listvar} $poolname)
+ local listvar=${fsname}_CREATED_POOLS
+ local temp=${listvar}=$(exclude_items_from_list ${!listvar} $poolname)
+ eval export $temp
}
destroy_pool_int() {
suffix="$ts.log"
echo "Dumping lctl log to ${prefix}.*.${suffix}"
- if [ "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
+ if [ -n "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
echo "Dumping logs only on local client."
$LCTL dk > ${prefix}.debug_log.$(hostname -s).${suffix}
dmesg > ${prefix}.dmesg.$(hostname -s).${suffix}
echo -n $service_time
}
+recovery_time_min() {
+ local connection_switch_min=5
+ local connection_switch_inc=5
+ local connection_switch_max
+ local reconnect_delay_max
+ local initial_connect_timeout
+ local max
+ local timout_20
+
+ #connection_switch_max=min(50, max($connection_switch_min,$TIMEOUT)
+ (($connection_switch_min > $TIMEOUT)) &&
+ max=$connection_switch_min || max=$TIMEOUT
+ (($max < 50)) && connection_switch_max=$max || connection_switch_max=50
+
+ #initial_connect_timeout = max(connection_switch_min, obd_timeout/20)
+ timeout_20=$((TIMEOUT/20))
+ (($connection_switch_min > $timeout_20)) &&
+ initial_connect_timeout=$connection_switch_min ||
+ initial_connect_timeout=$timeout_20
+
+ reconnect_delay_max=$((connection_switch_max + connection_switch_inc + \
+ initial_connect_timeout))
+ echo $((2 * reconnect_delay_max))
+}
+
get_clients_mount_count () {
local clients=${CLIENTS:-`hostname`}
}
init_logging() {
- if [[ -n $YAML_LOG ]]; then
- return
- fi
- local SAVE_UMASK=`umask`
- umask 0000
-
- export YAML_LOG=${LOGDIR}/results.yml
- mkdir -p $LOGDIR
- init_clients_lists
-
- if [ ! -f $YAML_LOG ]; then # If the yaml log already exists then we will just append to it
- if check_shared_dir $LOGDIR; then
- touch $LOGDIR/shared
- echo "Logging to shared log directory: $LOGDIR"
- else
- echo "Logging to local directory: $LOGDIR"
- fi
+ [[ -n $YAML_LOG ]] && return
+ local save_umask=$(umask)
+ umask 0000
- yml_nodes_file $LOGDIR >> $YAML_LOG
- yml_results_file >> $YAML_LOG
- fi
+ export YAML_LOG=${LOGDIR}/results.yml
+ mkdir -p $LOGDIR
+ init_clients_lists
+
+ # If the yaml log already exists then we will just append to it
+ if [ ! -f $YAML_LOG ]; then
+ if check_shared_dir $LOGDIR; then
+ touch $LOGDIR/shared
+ echo "Logging to shared log directory: $LOGDIR"
+ else
+ echo "Logging to local directory: $LOGDIR"
+ fi
+
+ yml_nodes_file $LOGDIR >> $YAML_LOG
+ yml_results_file >> $YAML_LOG
+ fi
- umask $SAVE_UMASK
+ umask $save_umask
+
+ # If modules are not yet loaded then older "lctl lustre_build_version"
+ # will fail. Use lctl build version instead.
+ log "Client: $($LCTL lustre_build_version)"
+ log "MDS: $(do_facet $SINGLEMDS $LCTL lustre_build_version 2>/dev/null||
+ do_facet $SINGLEMDS $LCTL --version)"
+ log "OSS: $(do_facet ost1 $LCTL lustre_build_version 2> /dev/null ||
+ do_facet ost1 $LCTL --version)"
}
log_test() {
#
get_page_size() {
local facet=$1
- local size
+ local size=$(getconf PAGE_SIZE 2>/dev/null)
- size=$(do_facet $facet getconf PAGE_SIZE)
- [[ ${PIPESTATUS[0]} = 0 && -n "$size" ]] || size=4096
- echo -n $size
+ [ -z "$CLIENTONLY" ] && size=$(do_facet $facet getconf PAGE_SIZE)
+ echo -n ${size:-4096}
}
#
local device=$2
local count
- count=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
+ [ -z "$CLIENTONLY" ] && count=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
awk '/^Block count:/ {print $3}')
- echo -n $count
+ echo -n ${count:-0}
}
# Get the block size of the filesystem.
get_block_size() {
- local facet=$1
- local device=$2
- local size
+ local facet=$1
+ local device=$2
+ local size
- size=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
- awk '/^Block size:/ {print $3}')
- echo $size
+ [ -z "$CLIENTONLY" ] && size=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
+ awk '/^Block size:/ {print $3}')
+ echo -n ${size:-0}
}
# Check whether the "large_xattr" feature is enabled or not.