IDENTITY_UPCALL=false
;;
esac
+ USE_OFD=${USE_OFD:-yes}
+ [ "$USE_OFD" = "yes" ] && LOAD_MODULES_REMOTE=true
+
export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false}
# Paths on remote nodes, if different
load_modules_local() {
[ $(facet_fstype ost1) == "zfs" ] && export USE_OFD=yes
- if [ "$USE_OFD" == yes ]; then
- if module_loaded obdfilter; then
- if ! $LUSTRE_RMMOD ldiskfs; then
- echo "$HOSTNAME may still be using obdfilter.ko"
- return 1
- fi
- fi
- else
- if module_loaded ofd; then
- if ! $LUSTRE_RMMOD ldiskfs; then
- echo "$HOSTNAME may still be using ofd.ko"
- return 1
- fi
- fi
- fi
if [ -n "$MODPROBE" ]; then
# use modprobe
fi
fi
- local ncpts=0
# if there is only one CPU core, libcfs can only create one partition
# if there is more than 4 CPU cores, libcfs should create multiple CPU
# partitions. So we just force libcfs to create 2 partitions for
if [ $ncpus -le 4 ] && [ $ncpus -gt 1 ]; then
# force to enable multiple CPU partitions
echo "Force libcfs to create 2 CPU partitions"
- ncpts=2
+ MODOPTS_LIBCFS="cpu_npartitions=2 $MODOPTS_LIBCFS"
else
echo "libcfs will create CPU partition based on online CPUs"
fi
- load_module ../libcfs/libcfs/libcfs cpu_npartitions=$ncpts
+ load_module ../libcfs/libcfs/libcfs
[ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG"
[ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }"
load_module ost/ost
load_module lod/lod
load_module osp/osp
- if [ "$USE_OFD" == yes ]; then
- load_module ofd/ofd
- else
- load_module obdfilter/obdfilter
- fi
+ load_module ofd/ofd
load_module osp/osp
fi
# bug 19124
# load modules on remote nodes optionally
# lustre-tests have to be installed on these nodes
- if $LOAD_MODULES_REMOTE ; then
+ if $LOAD_MODULES_REMOTE; then
local list=$(comma_list $(remote_nodes_list))
- echo loading modules on $list
- do_rpc_nodes $list load_modules_local
+ if [ -n "$list" ]; then
+ echo "loading modules on: '$list'"
+ do_rpc_nodes "$list" load_modules_local
+ fi
fi
}
$LUSTRE_RMMOD ldiskfs || return 2
- if $LOAD_MODULES_REMOTE ; then
+ if $LOAD_MODULES_REMOTE; then
local list=$(comma_list $(remote_nodes_list))
- if [ ! -z $list ]; then
- echo unloading modules on $list
- do_rpc_nodes $list $LUSTRE_RMMOD ldiskfs
- do_rpc_nodes $list check_mem_leak
+ if [ -n "$list" ]; then
+ echo "unloading modules on: '$list'"
+ do_rpc_nodes "$list" $LUSTRE_RMMOD ldiskfs
+ do_rpc_nodes "$list" check_mem_leak
fi
fi
}
set_default_debug_nodes () {
- local nodes=$1
+ local nodes="$1"
- if [[ ,$nodes, = *,$HOSTNAME,* ]]; then
- nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME")
- set_default_debug
- fi
+ if [[ ,$nodes, = *,$HOSTNAME,* ]]; then
+ nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME")
+ set_default_debug
+ fi
- [[ -n $nodes ]] && do_rpc_nodes $nodes set_default_debug \
- \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true
+ do_rpc_nodes "$nodes" set_default_debug \
+ \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true
}
set_default_debug_facet () {
return
fi
- # XXX remove it once all quota code landed
- echo "skip quota setup"
- return
-
local mntpt=$1
# save old quota type & set new quota type
}
check_progs_installed () {
- local nodes=$1
- shift
+ local nodes=$1
+ shift
- do_rpc_nodes $nodes _check_progs_installed $@
+ do_rpc_nodes "$nodes" _check_progs_installed $@
}
# recovery-scale functions
check_mem_leak || exit 204
- [ "`lctl dl 2> /dev/null | wc -l`" -gt 0 ] && lctl dl && \
- echo "$0: lustre didn't clean up..." 1>&2 && return 202 || true
+ [ "`lctl dl 2> /dev/null | wc -l`" -gt 0 ] && lctl dl &&
+ echo "$TESTSUITE: lustre didn't clean up..." 1>&2 &&
+ return 202 || true
- if module_loaded lnet || module_loaded libcfs; then
- echo "$0: modules still loaded..." 1>&2
- /sbin/lsmod 1>&2
- return 203
- fi
- return 0
+ if module_loaded lnet || module_loaded libcfs; then
+ echo "$TESTSUITE: modules still loaded..." 1>&2
+ /sbin/lsmod 1>&2
+ return 203
+ fi
+ return 0
}
wait_update () {
fi
echo affected facets: $facets
- # we can use "for" here because we are waiting the slowest
- for facet in ${facets//,/ }; do
- local var_svc=${facet}_svc
- local param="*.${!var_svc}.recovery_status"
+ # we can use "for" here because we are waiting the slowest
+ for facet in ${facets//,/ }; do
+ local var_svc=${facet}_svc
+ local param="*.${!var_svc}.recovery_status"
- local host=$(facet_active_host $facet)
- do_rpc_nodes $host _wait_recovery_complete $param $MAX
- done
+ local host=$(facet_active_host $facet)
+ do_rpc_nodes "$host" _wait_recovery_complete $param $MAX
+ done
}
wait_mds_ost_sync () {
opts+=${L_GETIDENTITY:+" --param=mdt.identity_upcall=$L_GETIDENTITY"}
if [ $fstype == ldiskfs ]; then
- opts+=${IAMDIR:+" --iam-dir"}
-
fs_mkfs_opts+=${MDSJOURNALSIZE:+" -J size=$MDSJOURNALSIZE"}
fs_mkfs_opts+=${MDSISIZE:+" -i $MDSISIZE"}
fi
}
init_facet_vars () {
- [ "$CLIENTONLY" ] && return 0
- local facet=$1
- shift
- local device=$1
+ [ "$CLIENTONLY" ] && return 0
+ local facet=$1
+ shift
+ local device=$1
- shift
+ shift
- eval export ${facet}_dev=${device}
- eval export ${facet}_opt=\"$@\"
+ eval export ${facet}_dev=${device}
+ eval export ${facet}_opt=\"$@\"
- local dev=${facet}_dev
- local label=$(devicelabel ${facet} ${!dev})
- [ -z "$label" ] && echo no label for ${!dev} && exit 1
+ local dev=${facet}_dev
- eval export ${facet}_svc=${label}
+ # We need to loop for the label
+ # in case its not initialized yet.
+ for wait_time in {0,1,3,5,10}; do
- local varname=${facet}failover_HOST
- if [ -z "${!varname}" ]; then
- eval $varname=$(facet_host $facet)
- fi
+ if [ $wait_time -gt 0 ]; then
+ echo "${!dev} not yet initialized,"\
+ "waiting ${wait_time} seconds."
+ sleep $wait_time
+ fi
- # ${facet}failover_dev is set in cfg file
- varname=${facet}failover_dev
- if [ -n "${!varname}" ] ; then
- eval export ${facet}failover_dev=${!varname}
- else
- eval export ${facet}failover_dev=$device
- fi
+ local label=$(devicelabel ${facet} ${!dev})
- # get mount point of already mounted device
- # is facet_dev is already mounted then use the real
- # mount point of this facet; otherwise use $(facet_mntpt $facet)
- # i.e. ${facet}_MOUNT if specified by user or default
- local mntpt=$(do_facet ${facet} cat /proc/mounts | \
- awk '"'${!dev}'" == $1 && $3 == "lustre" { print $2 }')
- if [ -z $mntpt ]; then
- mntpt=$(facet_mntpt $facet)
- fi
- eval export ${facet}_MOUNT=$mntpt
+ # Check to make sure the label does
+ # not include ffff at the end of the label.
+ # This indicates it has not been initialized yet.
+
+ if [[ $label =~ [f|F]{4}$ ]]; then
+ # label is not initialized, unset the result
+ # and either try again or fail
+ unset label
+ else
+ break
+ fi
+ done
+
+ [ -z "$label" ] && echo no label for ${!dev} && exit 1
+
+ eval export ${facet}_svc=${label}
+
+ local varname=${facet}failover_HOST
+ if [ -z "${!varname}" ]; then
+ eval $varname=$(facet_host $facet)
+ fi
+
+ # ${facet}failover_dev is set in cfg file
+ varname=${facet}failover_dev
+ if [ -n "${!varname}" ] ; then
+ eval export ${facet}failover_dev=${!varname}
+ else
+ eval export ${facet}failover_dev=$device
+ fi
+
+ # get mount point of already mounted device
+ # is facet_dev is already mounted then use the real
+ # mount point of this facet; otherwise use $(facet_mntpt $facet)
+ # i.e. ${facet}_MOUNT if specified by user or default
+ local mntpt=$(do_facet ${facet} cat /proc/mounts | \
+ awk '"'${!dev}'" == $1 && $3 == "lustre" { print $2 }')
+ if [ -z $mntpt ]; then
+ mntpt=$(facet_mntpt $facet)
+ fi
+ eval export ${facet}_MOUNT=$mntpt
}
init_facets_vars () {
}
check_config_clients () {
- local clients=${CLIENTS:-$HOSTNAME}
- local mntpt=$1
+ local clients=${CLIENTS:-$HOSTNAME}
+ local mntpt=$1
- nfs_client_mode && return
+ nfs_client_mode && return
- do_rpc_nodes $clients check_config_client $mntpt
+ do_rpc_nodes "$clients" check_config_client $mntpt
- sanity_mount_check ||
- error "environments are insane!"
+ sanity_mount_check || error "environments are insane!"
}
check_timeout () {
# verify a directory is shared among nodes.
check_shared_dir() {
- local dir=$1
+ local dir=$1
- [ -z "$dir" ] && return 1
- do_rpc_nodes $(comma_list $(nodes_list)) check_logdir $dir
- check_write_access $dir || return 1
- return 0
+ [ -z "$dir" ] && return 1
+ do_rpc_nodes "$(comma_list $(nodes_list))" check_logdir $dir
+ check_write_access $dir || return 1
+ return 0
}
# Run e2fsck on MDT and OST(s) to generate databases used for lfsck.
eval $@
}
+# Convert a space-delimited list to a comma-delimited list. If the input is
+# only whitespace, ensure the output is empty (i.e. "") so [ -n $list ] works
comma_list() {
- # the sed converts spaces to commas, but leaves the last space
- # alone, so the line doesn't end with a comma.
- echo "$*" | tr -s " " "\n" | sort -b -u | tr "\n" " " | sed 's/ \([^$]\)/,\1/g'
+ # echo is used to convert newlines to spaces, since it doesn't
+ # introduce a trailing space as using "tr '\n' ' '" does
+ echo $(tr -s " " "\n" <<< $* | sort -b -u) | tr ' ' ','
}
list_member () {
}
drop_ldlm_cancel() {
-#define OBD_FAIL_LDLM_CANCEL 0x304
+#define OBD_FAIL_LDLM_CANCEL_NET 0x304
local RC=0
local list=$(comma_list $(mdts_nodes) $(osts_nodes))
do_nodes $list lctl set_param fail_loc=0x304
}
drop_bl_callback() {
-#define OBD_FAIL_LDLM_BL_CALLBACK 0x305
+#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
RC=0
do_facet client lctl set_param fail_loc=0x305
do_facet client "$@" || RC=$?
gather_logs $(comma_list $(nodes_list))
fi
- debugrestore
- [ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG
- echo "$@" > $LOGDIR/err
+ debugrestore
+ [ "$TESTSUITELOG" ] &&
+ echo "$TESTSUITE: $TYPE: $TESTNAME $@" >> $TESTSUITELOG
+ echo "$@" > $LOGDIR/err
}
exit_status () {
return $?
}
-equals_msg() {
- banner "$*"
-}
-
log() {
echo "$*"
module_loaded lnet || load_modules
}
complete () {
- equals_msg $1 test complete, duration $2 sec
+ local duration=$1
+
+ banner test complete, duration $duration sec
[ -f "$TESTSUITELOG" ] && egrep .FAIL $TESTSUITELOG || true
- echo duration $2 >>$TESTSUITELOG
+ echo duration $duration >>$TESTSUITELOG
}
pass() {
}
remote_nodes_list () {
- local rnodes=$(nodes_list)
- rnodes=$(echo " $rnodes " | sed -re "s/\s+$HOSTNAME\s+/ /g")
- echo $rnodes
+ echo $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g"
}
init_clients_lists () {
}
check_catastrophe() {
- local rnodes=${1:-$(comma_list $(remote_nodes_list))}
- local C=$CATASTROPHE
- [ -f $C ] && [ $(cat $C) -ne 0 ] && return 1
+ local rnodes=${1:-$(comma_list $(remote_nodes_list))}
+ local C=$CATASTROPHE
+ [ -f $C ] && [ $(cat $C) -ne 0 ] && return 1
+
+ [ -z "$rnodes" ] && return 0
- if [ $rnodes ]; then
- do_nodes $rnodes "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0);
-if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi
-exit \\\$rc;"
- fi
+ do_nodes "$rnodes" "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0);
+ if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi
+ exit \\\$rc;"
}
# CMD: determine mds index where directory inode presents
# 2. wait the deadline of client 2nd request
local maxtime=$(( 2 * $(request_timeout $facet)))
- if ! do_rpc_nodes $(facet_host $facet) \
- _wait_import_state $expected $param $maxtime; then
- error "import is not in ${expected} state"
- return 1
- fi
+ if ! do_rpc_nodes "$(facet_host $facet)" \
+ _wait_import_state $expected $param $maxtime; then
+ error "import is not in ${expected} state"
+ return 1
+ fi
- return 0
+ return 0
}
get_clientmdc_proc_path() {
}
do_rpc_nodes () {
- local list=$1
- shift
+ local list=$1
+ shift
- # Add paths to lustre tests for 32 and 64 bit systems.
- local RPATH="PATH=$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH"
- do_nodesv $list "${RPATH} NAME=${NAME} sh rpc.sh $@ "
+ [ -z "$list" ] && return 0
+
+ # Add paths to lustre tests for 32 and 64 bit systems.
+ local RPATH="PATH=$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH"
+ do_nodesv $list "${RPATH} NAME=${NAME} sh rpc.sh $@ "
}
wait_clients_import_state () {
local params=$(expand_list $params $proc_path)
done
- if ! do_rpc_nodes $list wait_import_state $expected $params; then
- error "import is not in ${expected} state"
- return 1
- fi
+ if ! do_rpc_nodes "$list" wait_import_state $expected $params; then
+ error "import is not in ${expected} state"
+ return 1
+ fi
}
oos_full() {
local metaea=${TMP}/backup_restore.ea
local metadata=${TMP}/backup_restore.tgz
local opts=${MDS_MOUNT_OPTS}
+ local svc=${SINGLEMDS}_svc
if ! ${rcmd} test -b ${devname}; then
opts=$(csa_add "$opts" -o loop)
${rcmd} umount -d $mntpt || return 10
# step 14: cleanup tmp backup
${rcmd} rm -f $metaea $metadata
+ # step 15: reset device label - it's not virgin on
+ ${rcmd} e2label $devname ${!svc}
}
# remove OI files
${rcmd} umount -d $mntpt || return 2
# OI files will be recreated when mounted as lustre next time.
}
+
+# generate maloo upload-able log file name
+# \param logname specify unique part of file name
+generate_logname() {
+ local logname=${1:-"default_logname"}
+
+ echo "$TESTLOG_PREFIX.$TESTNAME.$logname.$(hostname -s).log"
+}