#!/bin/bash
trap 'print_summary && touch $TF_FAIL && \
- echo "test-framework exiting on error"' ERR
+ echo "$TESTSUITE: FAIL: test-framework exiting on error"' ERR
set -e
#set -x
#export PDSH="pdsh -S -Rssh -w"
export MOUNT_CMD=${MOUNT_CMD:-"mount -t lustre"}
export UMOUNT=${UMOUNT:-"umount -d"}
+
+export LSNAPSHOT_CONF="/etc/ldev.conf"
+export LSNAPSHOT_LOG="/var/log/lsnapshot.log"
+
# sles12 umount has a issue with -d option
[ -e /etc/SuSE-release ] && grep -w VERSION /etc/SuSE-release | grep -wq 12 && {
export UMOUNT="umount"
#
# All Lustre versions support "lctl get_param" to report the version of the
# code running in the kernel (what our tests are interested in), but it
-# doesn't work without modules loaded. If that fails, use "lctl version"
-# instead, which is easy to parse and works without the kernel modules,
-# but was only added in 2.6.50. If that also fails, fall back to calling
-# "lctl lustre_build_version" which prints either (or both) the userspace
-# and kernel build versions, but is deprecated and should eventually be
-# removed.
+# doesn't work without modules loaded. After 2.9.53 and in upstream kernels
+# the "version" parameter doesn't include "lustre: " at the beginning.
+# If that fails, call "lctl lustre_build_version" which prints either (or both)
+# the userspace and kernel build versions, but until 2.8.55 required root
+# access to get the Lustre kernel version. If that also fails, fall back to
+# using "lctl --version", which is easy to parse and works without the kernel
+# modules, but was only added in 2.6.50 and only prints the lctl tool version,
+# not the module version, though they are usually the same.
+#
+# Various commands and their output format for different Lustre versions:
+# lctl get_param version: 2.9.55
+# lctl get_param version: lustre: 2.8.53
+# lctl get_param version: lustre: 2.6.52
+# kernel: patchless_client
+# build: v2_6_92_0-2.6.32-431.el6_lustre.x86_64
+# lctl lustre_build_version: Lustre version: 2.8.53_27_gae67fc01
+# lctl lustre_build_version: error: lustre_build_version: Permission denied
+# (as non-root user) lctl version: v2_6_92_0-2.6.32-431.el6.x86_64
+# lctl lustre_build_version: Lustre version: 2.5.3-2.6.32.26-175.fc12.x86_64
+# lctl version: 2.5.3-2.6.32..26-175fc12.x86_64
+# lctl --version: lctl 2.6.50
#
-# output: prints version string to stdout in dotted-decimal format
+# output: prints version string to stdout in (up to 4) dotted-decimal values
lustre_build_version() {
local facet=${1:-client}
+ local ver
- # lustre: 2.8.52
- local VER=$(do_facet $facet $LCTL get_param -n version 2> /dev/null |
- awk '/lustre: / { print $2 }')
- # lctl 2.6.50
- [ -z "$VER" ] && VER=$(do_facet $facet $LCTL --version 2>/dev/null |
- awk '{ print $2 }')
- # Lustre version: 2.5.3-gfcfd782-CHANGED-2.6.32.26-175.fc12.x86_64
- # lctl version: 2.5.3-gfcfd782-CHANGED-2.6.32.26-175.fc12.x86_64
- [ -z "$VER" ] && VER=$(do_facet $facet $LCTL lustre_build_version |
- awk '/version:/ { print $3; exit; }')
- sed -e 's/^v//' -e 's/-.*//' -e 's/_/./g' <<<$VER
+ local ver=$(do_facet $facet "$LCTL get_param -n version 2>/dev/null ||
+ $LCTL lustre_build_version 2>/dev/null ||
+ $LCTL --version 2>/dev/null | cut -d' ' -f2")
+ local lver=$(egrep -i "lustre: |version: " <<<$ver | head -n 1)
+ [ -n "$lver" ] && ver="$lver"
+
+ sed -e 's/.*: //' -e 's/^v//' -e 's/-.*//' -e 's/_/./g' <<<$ver |
+ cut -d. -f1-4
}
# Report the Lustre numeric build version code for the supplied facet.
/sbin/lsmod | grep -q "^\<$1\>"
}
+PRLFS=false
+lustre_insmod() {
+ local module=$1
+ shift
+ local args="$@"
+ local msg
+ local rc=0
+
+ if ! $PRLFS; then
+ msg="$(insmod $module $args 2>&1)" && return 0 || rc=$?
+ fi
+
+ # parallels can't load modules directly from prlfs, use /tmp instead
+ if $PRLFS || [[ "$(stat -f -c%t $module)" == "7c7c6673" ]]; then
+ local target="$(mktemp)"
+
+ cp "$module" "$target"
+ insmod $target $args
+ rc=$?
+ [[ $rc == 0 ]] && PRLFS=true
+ rm -f $target
+ else
+ echo "$msg"
+ fi
+ return $rc
+}
+
# Load a module on the system where this is running.
#
# usage: load_module module_name [module arguments for insmod/modprobe]
# we're passing options on the command-line.
if [[ "$BASE" == "lnet_selftest" ]] &&
[[ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]]; then
- insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
+ lustre_insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
elif [[ -f ${LUSTRE}/${module}${EXT} ]]; then
[[ "$BASE" != "ptlrpc_gss" ]] || modprobe sunrpc
- insmod ${LUSTRE}/${module}${EXT} "$@"
+ lustre_insmod ${LUSTRE}/${module}${EXT} "$@"
else
# must be testing a "make install" or "rpm" installation
# note failed to load ptlrpc_gss is considered not fatal
set_default_debug
load_module ../lnet/lnet/lnet
- case $NETTYPE in
- o2ib)
- LNETLND="o2iblnd/ko2iblnd"
- ;;
- *)
- ;;
- esac
- LNETLND=${LNETLND:-"socklnd/ksocklnd"}
+
+ LNDPATH=${LNDPATH:-"../lnet/klnds"}
+ if [ -z "$LNETLND" ]; then
+ case $NETTYPE in
+ o2ib*) LNETLND="o2iblnd/ko2iblnd" ;;
+ tcp*) LNETLND="socklnd/ksocklnd" ;;
+ *) local lnd="${NETTYPE%%[0-9]}lnd"
+ [ -f "$LNDPATH/$lnd/k$lnd.ko" ] &&
+ LNETLND="$lnd/k$lnd" ||
+ LNETLND="socklnd/ksocklnd"
+ esac
+ fi
load_module ../lnet/klnds/$LNETLND
load_module obdclass/obdclass
load_module ptlrpc/ptlrpc
[ "$LQUOTA" != "no" ] &&
load_module quota/lquota $LQUOTAOPTS
if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
- modprobe zfs
+ lsmod | grep zfs >&/dev/null || modprobe zfs
load_module osd-zfs/osd_zfs
fi
if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
}
#
+#
+# Get ZFS local fsname.
+#
+zfs_local_fsname() {
+ local facet=$1
+ local lfsname=$(basename $(facet_device $facet))
+
+ echo -n $lfsname
+}
+
+#
# Create ZFS storage pool.
#
create_zpool() {
shift 3
local opts=${@:-"-o cachefile=none"}
- do_facet $facet "modprobe zfs;
+ do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs;
$ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL create -f $opts $poolname $vdev"
}
if [[ -n "$poolname" ]]; then
opts+=" -d $(dirname $(facet_vdevice $facet))"
- do_facet $facet "modprobe zfs;
+ do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs;
$ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL import -f $opts $poolname"
fi
# commit the device label change to disk
if [[ $devicelabel =~ (:[a-zA-Z]{3}[0-9]{4}) ]]; then
echo "Commit the device label on ${!dev}"
- do_facet $facet "sync; sync; sync"
- sleep 5
+ do_facet $facet "sync; sleep 1; sync"
fi
echo $free_inodes
}
+#
+# Get the OST device status from 'lfs df' with a given OST index.
+#
+ost_dev_status() {
+ local ost_idx=$1
+ local mnt_pnt=${2:-$MOUNT}
+ local ost_uuid
+
+ ost_uuid=$(ostuuid_from_index $ost_idx $mnt_pnt)
+ lfs_df $mnt_pnt | awk '/'$ost_uuid'/ { print $7 }'
+}
+
setup_quota(){
local mntpt=$1
fi
do_nodes $clients "
-running=\\\$(mount | grep -c $mnt' ');
-rc=0;
-if [ \\\$running -eq 0 ] ; then
- mkdir -p $mnt;
- $MOUNT_CMD $flags $opts $device $mnt;
- rc=\\\$?;
-fi;
-exit \\\$rc" || return ${PIPESTATUS[0]}
+ running=\\\$(mount | grep -c $mnt' ');
+ rc=0;
+ if [ \\\$running -eq 0 ] ; then
+ mkdir -p $mnt;
+ $MOUNT_CMD $flags $opts $device $mnt;
+ rc=\\\$?;
+ else
+ lustre_mnt_count=\\\$(mount | grep $mnt' ' | \
+ grep 'type lustre' | wc -l);
+ if [ \\\$running -ne \\\$lustre_mnt_count ] ; then
+ echo zconf_mount_clients FAILED: \
+ mount count \\\$running, not matching \
+ with mount count of 'type lustre' \
+ \\\$lustre_mnt_count;
+ rc=1;
+ fi;
+ fi;
+ exit \\\$rc" || return ${PIPESTATUS[0]}
echo "Started clients $clients: "
do_nodes $clients "mount | grep $mnt' '"
}
start_client_load() {
- local client=$1
- local load=$2
- local var=$(node_var_name $client)_load
- eval export ${var}=$load
-
- do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \
-BREAK_ON_ERROR=$BREAK_ON_ERROR \
-END_RUN_FILE=$END_RUN_FILE \
-LOAD_PID_FILE=$LOAD_PID_FILE \
-TESTLOG_PREFIX=$TESTLOG_PREFIX \
-TESTNAME=$TESTNAME \
-DBENCH_LIB=$DBENCH_LIB \
-DBENCH_SRC=$DBENCH_SRC \
-CLIENT_COUNT=$((CLIENTCOUNT - 1)) \
-LFS=$LFS \
-run_${load}.sh" &
- local ppid=$!
- log "Started client load: ${load} on $client"
-
- # get the children process IDs
- local pids=$(ps --ppid $ppid -o pid= | xargs)
- CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $ppid $pids"
- return 0
+ local client=$1
+ local load=$2
+ local var=$(node_var_name $client)_load
+ eval export ${var}=$load
+
+ do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \
+ BREAK_ON_ERROR=$BREAK_ON_ERROR \
+ END_RUN_FILE=$END_RUN_FILE \
+ LOAD_PID_FILE=$LOAD_PID_FILE \
+ TESTLOG_PREFIX=$TESTLOG_PREFIX \
+ TESTNAME=$TESTNAME \
+ DBENCH_LIB=$DBENCH_LIB \
+ DBENCH_SRC=$DBENCH_SRC \
+ CLIENT_COUNT=$((CLIENTCOUNT - 1)) \
+ LFS=$LFS \
+ LCTL=$LCTL \
+ FSNAME=$FSNAME \
+ run_${load}.sh" &
+ local ppid=$!
+ log "Started client load: ${load} on $client"
+
+ # get the children process IDs
+ local pids=$(ps --ppid $ppid -o pid= | xargs)
+ CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $ppid $pids"
+ return 0
}
start_client_loads () {
wait_update $HOSTNAME "eval $cmd" $OSTCOUNT ||
error "wait_update OSTs up on client failed"
- cmd="$LCTL get_param -n lod.$FSNAME-MDT*-*.target_obd | sort -u |
- awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
+ cmd="$LCTL get_param osp.$FSNAME-OST*-MDT0000.prealloc_last_id |
+ awk '/=[1-9][0-9]/ { c += 1 } END { printf \\\"%d\\\", c }'"
wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT ||
- error "wait_update OSTs up on MDT failed"
+ error "wait_update OSTs up on MDT0000 failed"
}
wait_destroy_complete () {
clients_up || error "post-failover stat: $?"
}
-do_lmc() {
- echo There is no lmc. This is mountconf, baby.
- exit 1
-}
-
host_nids_address() {
- local nodes=$1
- local kind=$2
+ local nodes=$1
+ local net=${2:-"."}
- if [ -n "$kind" ]; then
- nids=$(do_nodes $nodes "$LCTL list_nids | grep $kind | cut -f 1 -d '@'")
- else
- nids=$(do_nodes $nodes "$LCTL list_nids all | cut -f 1 -d '@'")
- fi
- echo $nids
+ do_nodes $nodes "$LCTL list_nids | grep $net | cut -f 1 -d @"
}
h2name_or_ip() {
fi
}
-h2ptl() {
- if [ "$1" = "'*'" ]; then echo \'*\'; else
- ID=`xtprocadmin -n $1 2>/dev/null | egrep -v 'NID' | \
- awk '{print $1}'`
- if [ -z "$ID" ]; then
- echo "Could not get a ptl id for $1..."
- exit 1
- fi
- echo $ID"@ptl"
+h2nettype() {
+ if [[ -n "$NETTYPE" ]]; then
+ h2name_or_ip "$1" "$NETTYPE"
+ else
+ h2name_or_ip "$1" "$2"
fi
}
-declare -fx h2ptl
+declare -fx h2nettype
+# Wrapper function to print the deprecation warning
h2tcp() {
- h2name_or_ip "$1" "tcp"
-}
-declare -fx h2tcp
-
-h2elan() {
- if [ "$1" = "'*'" ]; then echo \'*\'; else
- if type __h2elan >/dev/null 2>&1; then
- ID=$(__h2elan $1)
- else
- ID=`echo $1 | sed 's/[^0-9]*//g'`
- fi
- echo $ID"@elan"
+ echo "h2tcp: deprecated, use h2nettype instead" 1>&2
+ if [[ -n "$NETTYPE" ]]; then
+ h2nettype "$@"
+ else
+ h2nettype "$1" "tcp"
fi
}
-declare -fx h2elan
+# Wrapper function to print the deprecation warning
h2o2ib() {
- h2name_or_ip "$1" "o2ib"
+ echo "h2o2ib: deprecated, use h2nettype instead" 1>&2
+ if [[ -n "$NETTYPE" ]]; then
+ h2nettype "$@"
+ else
+ h2nettype "$1" "o2ib"
+ fi
}
-declare -fx h2o2ib
# This enables variables in cfg/"setup".sh files to support the pdsh HOSTLIST
# expressions format. As a bonus we can then just pass in those variables
group=${group%%]*}
for range in ${group//,/ }; do
+ local order
+
begin=${range%-*}
end=${range#*-}
begin=$(echo $begin | sed 's/0*//')
[ -z $begin ] && begin=0
- for num in $(seq -f "%0${padlen}g" $begin $end); do
+ if [ ! -z "${begin##[!0-9]*}" ]; then
+ order=$(seq -f "%0${padlen}g" $begin $end)
+ else
+ order=$(eval echo {$begin..$end});
+ fi
+
+ for num in $order; do
value="${name#*,}${num}${back}"
[ "$value" != "${value/\[/}" ] && {
value=$(hostlist_expand "$value")
myList="${list%% *}"
while [[ "$list" != ${myList##* } ]]; do
- list=${list//${list%% *} /}
- myList="$myList ${list%% *}"
+ local tlist=" $list"
+ list=${tlist// ${list%% *} / }
+ list=${list:1}
+ myList="$myList ${list%% *}"
done
myList="${myList%* }";
var=${facet}failover_HOST
if [ -n "${!var}" ] && [ ${!var} != $(facet_host $facet) ]; then
- opts+=" --failnode=$(h2$NETTYPE ${!var})"
+ opts+=" --failnode=$(h2nettype ${!var})"
fi
opts+=${TIMEOUT:+" --param=sys.timeout=$TIMEOUT"}
echo -n "$opts"
}
+mountfs_opts() {
+ local facet=$1
+ local type=$(facet_type $facet)
+ local var=${type}_MOUNT_FS_OPTS
+ local opts=""
+ if [ -n "${!var}" ]; then
+ opts+=" --mountfsoptions=${!var}"
+ fi
+ echo -n "$opts"
+}
+
check_ost_indices() {
local index_count=${#OST_INDICES[@]}
[[ $index_count -eq 0 || $OSTCOUNT -le $index_count ]] && return 0
done
}
+__touch_device()
+{
+ local facet_type=$1 # mgs || mds || ost
+ local facet_num=$2
+ local facet=${1}${2}
+ local device
+
+ case "$(facet_fstype $facet)" in
+ ldiskfs)
+ device=$(${facet_type}devname $facet_num)
+ ;;
+ zfs)
+ device=$(${facet_type}vdevname $facet_num)
+ ;;
+ *)
+ error "Unhandled filesystem type"
+ ;;
+ esac
+
+ do_facet $facet "[ -e \"$device\" ]" && return
+
+ # Note: the following check only works with absolute paths
+ [[ ! "$device" =~ ^/dev/ ]] || [[ "$device" =~ ^/dev/shm/ ]] ||
+ error "$facet: device '$device' does not exist"
+
+ do_facet $facet "touch \"${device}\""
+}
+
format_mgs() {
local quiet
fi
echo "Format mgs: $(mgsdevname)"
reformat_external_journal mgs
- add mgs $(mkfs_opts mgs $(mgsdevname)) --reformat \
+
+ # touch "device" in case it is a loopback file for testing and needs to
+ # be created. mkfs.lustre doesn't do this to avoid accidentally writing
+ # to non-existent files in /dev if the admin made a typo during setup
+ __touch_device mgs
+
+ add mgs $(mkfs_opts mgs $(mgsdevname)) $(mountfs_opts mgs) --reformat \
$(mgsdevname) $(mgsvdevname) ${quiet:+>/dev/null} || exit 10
}
fi
echo "Format mds$num: $(mdsdevname $num)"
reformat_external_journal mds$num
+
+ __touch_device mds $num
+
add mds$num $(mkfs_opts mds$num $(mdsdevname ${num})) \
- --reformat $(mdsdevname $num) $(mdsvdevname $num) \
- ${quiet:+>/dev/null} || exit 10
+ $(mountfs_opts mds$num) --reformat $(mdsdevname $num) \
+ $(mdsvdevname $num) ${quiet:+>/dev/null} || exit 10
}
format_ost() {
fi
echo "Format ost$num: $(ostdevname $num)"
reformat_external_journal ost$num
+
+ __touch_device ost $num
+
add ost$num $(mkfs_opts ost$num $(ostdevname ${num})) \
- --reformat $(ostdevname $num) $(ostvdevname ${num}) \
- ${quiet:+>/dev/null} || exit 10
+ $(mountfs_opts ost$num) --reformat $(ostdevname $num) \
+ $(ostvdevname ${num}) ${quiet:+>/dev/null} || exit 10
}
formatall() {
return 0
fi
- local myMGS_host=$mgs_HOST
- if [ "$NETTYPE" = "ptl" ]; then
- myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//)
- fi
-
echo Checking config lustre mounted on $mntpt
local mgshost=$(mount | grep " $mntpt " | awk -F@ '{print $1}')
mgshost=$(echo $mgshost | awk -F: '{print $1}')
-# if [ "$mgshost" != "$myMGS_host" ]; then
-# log "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE
-# Please use correct config or set mds_HOST correctly!"
-# fi
-
}
check_config_clients () {
export base=$(basetest $1)
[ "$CHECK_GRANT" == "no" ] && return 0
- testname=GCHECK_ONLY_${base}
- [ ${!testname}x == x ] && return 0
+ testnamebase=GCHECK_ONLY_${base}
+ testname=GCHECK_ONLY_$1
+ [ ${!testnamebase}x == x -a ${!testname}x == x ] && return 0
echo -n "checking grant......"
awk '{ total += $1 } END { printf("%0.0f", total) }')
# get server grant
+ # which is tot_granted less grant_precreate
server_grant=$(do_nodes $(comma_list $(osts_nodes)) \
- "$LCTL get_param -n obdfilter.${FSNAME}-OST*.tot_granted" |
- awk '{ total += $1 } END { printf("%0.0f", total) }')
+ "$LCTL get_param "\
+ "obdfilter.${FSNAME}-OST*.{tot_granted,tot_pending,grant_precreate}" |
+ sed 's/=/ /'| awk '/tot_granted/{ total += $2 };
+ /tot_pending/{ total -= $2 };
+ /grant_precreate/{ total -= $2 };
+ END { printf("%0.0f", total) }')
# check whether client grant == server grant
if [[ $client_grant -ne $server_grant ]]; then
- echo "failed: client:${client_grant} server: ${server_grant}."
do_nodes $(comma_list $(osts_nodes)) \
- "$LCTL get_param obdfilter.${FSNAME}-OST*.tot*"
+ "$LCTL get_param obdfilter.${FSNAME}-OST*.tot*" \
+ "obdfilter.${FSNAME}-OST*.grant_*"
do_nodes $clients "$LCTL get_param osc.${FSNAME}-*.cur_*_bytes"
- return 1
+ error "failed: client:${client_grant} server: ${server_grant}."
else
echo "pass: client:${client_grant} server: ${server_grant}"
fi
setstripe_nfsserver () {
local dir=$1
+ local nfsexportdir=$2
+ shift
+ shift
- local nfsserver=$(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \
- { print $1 }' /proc/mounts | cut -f 1 -d : | head -n1)
+ local -a nfsexport=($(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \
+ { print $1 }' /proc/mounts | cut -f 1 -d :))
- [ -z $nfsserver ] && echo "$dir is not nfs mounted" && return 1
+ # check that only one nfs mounted
+ [[ -z $nfsexport ]] && echo "$dir is not nfs mounted" && return 1
+ (( ${#nfsexport[@]} == 1 )) ||
+ error "several nfs mounts found for $dir: ${nfsexport[@]} !"
- do_nodev $nfsserver lfs setstripe "$@"
+ do_nodev ${nfsexport[0]} lfs setstripe $nfsexportdir "$@"
}
# Check and add a test group.
}
mdsrate_inodes_available () {
- local min_inodes=$(inodes_available)
- echo $((min_inodes * 99 / 100))
-}
-
-# reset llite stat counters
-clear_llite_stats(){
- lctl set_param -n llite.*.stats 0
+ local min_inodes=$(inodes_available)
+ echo $((min_inodes * 99 / 100))
}
-# sum llite stat items
-calc_llite_stats() {
- local res=$(lctl get_param -n llite.*.stats |
- awk '/^'"$1"'/ {sum += $2} END { printf("%0.0f", sum) }')
- echo $((res))
+# reset stat counters
+clear_stats() {
+ local paramfile="$1"
+ lctl set_param -n $paramfile=0
}
-# reset osc stat counters
-clear_osc_stats(){
- lctl set_param -n osc.*.osc_stats 0
-}
-
-# sum osc stat items
-calc_osc_stats() {
- local res=$(lctl get_param -n osc.*.osc_stats |
- awk '/^'"$1"'/ {sum += $2} END { printf("%0.0f", sum) }')
- echo $((res))
+# sum stat items
+calc_stats() {
+ local paramfile="$1"
+ local stat="$2"
+ lctl get_param -n $paramfile |
+ awk '/^'$stat'/ { sum += $2 } END { printf("%0.0f", sum) }'
}
calc_sum () {
}
calc_osc_kbytes () {
- df $MOUNT > /dev/null
- $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum
+ df $MOUNT > /dev/null
+ $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum
}
# save_lustre_params(comma separated facet list, parameter_mask)
if [[ $facet == client* ]]; then
# During setup time, the osc might not be setup, it need wait
# until list_param can return valid value.
- param="osc.${ost%?}[^mM]*.ost_server_uuid"
params=$($LCTL list_param $param 2>/dev/null || true)
while [ -z "$params" ]; do
if [ $i -ge $maxtime ]; then
if ! do_rpc_nodes "$(facet_active_host $facet)" \
wait_import_state $expected "$params" $maxtime; then
- error "import is not in ${expected} state"
+ error "$facet: import is not in $expected state after $maxtime"
return 1
fi
fi
}
+wait_osp_active() {
+ local facet=$1
+ local tgt_name=$2
+ local tgt_idx=$3
+ local expected=$4
+ local num
+
+ # wait until all MDTs are in the expected state
+ for ((num = 1; num <= $MDSCOUNT; num++)); do
+ local mdtosp=$(get_mdtosc_proc_path mds${num} ${tgt_name})
+ local mproc
+
+ if [ $facet = "mds" ]; then
+ mproc="osp.$mdtosp.active"
+ [ $num -eq $((tgt_idx + 1)) ] && continue
+ else
+ mproc="osc.$mdtosp.active"
+ fi
+
+ echo "check $mproc"
+ while [ 1 ]; do
+ sleep 5
+ local result=$(do_facet mds${num} "$LCTL get_param -n $mproc")
+ local max=30
+ local wait=0
+
+ [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $mproc"
+ if [ $result -eq $expected ]; then
+ echo -n "target updated after"
+ echo "$wait sec (got $result)"
+ break
+ fi
+ wait=$((wait + 5))
+ if [ $wait -eq $max ]; then
+ error "$tgt_name: wanted $expected got $result"
+ fi
+ echo "Waiting $((max - wait)) secs for $tgt_name"
+ done
+ done
+}
+
oos_full() {
local -a AVAILA
local -a GRANTA
}
min_ost_size () {
- $LCTL get_param -n osc.*.kbytesavail | sort -n | head -n1
+ $LFS df | grep OST | awk '{print $4}' | sort -un | head -1
}
#
fi
}
-# find the smallest and not in use file descriptor
+# free_fd: find the smallest and not in use file descriptor [above @last_fd]
+#
+# If called many times, passing @last_fd will avoid repeated searching
+# already-open FDs repeatedly if we know they are still in use.
+#
+# usage: free_fd [last_fd]
free_fd()
{
- local max_fd=$(ulimit -n)
- local fd=3
- while [[ $fd -le $max_fd && -e /proc/self/fd/$fd ]]; do
- ((++fd))
- done
- [ $fd -lt $max_fd ] || error "finding free file descriptor failed"
- echo $fd
+ local max_fd=$(ulimit -n)
+ local fd=$((${1:-2} + 1))
+
+ while [[ $fd -le $max_fd && -e /proc/self/fd/$fd ]]; do
+ ((++fd))
+ done
+ [ $fd -lt $max_fd ] || error "finding free file descriptor failed"
+ echo $fd
}
check_mount_and_prep()
do_nodes $clients "killall $signal $name"
}
+
+lsnapshot_create()
+{
+ do_facet mgs "$LCTL snapshot_create -F $FSNAME $*"
+}
+
+lsnapshot_destroy()
+{
+ do_facet mgs "$LCTL snapshot_destroy -F $FSNAME $*"
+}
+
+lsnapshot_modify()
+{
+ do_facet mgs "$LCTL snapshot_modify -F $FSNAME $*"
+}
+
+lsnapshot_list()
+{
+ do_facet mgs "$LCTL snapshot_list -F $FSNAME $*"
+}
+
+lsnapshot_mount()
+{
+ do_facet mgs "$LCTL snapshot_mount -F $FSNAME $*"
+}
+
+lsnapshot_umount()
+{
+ do_facet mgs "$LCTL snapshot_umount -F $FSNAME $*"
+}
+
+lss_err()
+{
+ local msg=$1
+
+ do_facet mgs "cat $LSNAPSHOT_LOG"
+ error $msg
+}
+
+lss_cleanup()
+{
+ echo "Cleaning test environment ..."
+
+ # Every lsnapshot command takes exclusive lock with others,
+ # so can NOT destroy the snapshot during list with 'xargs'.
+ while true; do
+ local ssname=$(lsnapshot_list | grep snapshot_name |
+ grep lss_ | awk '{ print $2 }' | head -n 1)
+ [ -z "$ssname" ] && break
+
+ lsnapshot_destroy -n $ssname -f ||
+ lss_err "Fail to destroy $ssname by force"
+ done
+}
+
+lss_gen_conf_one()
+{
+ local facet=$1
+ local role=$2
+ local idx=$3
+
+ local host=$(facet_active_host $facet)
+ local dir=$(dirname $(facet_vdevice $facet))
+ local pool=$(zpool_name $facet)
+ local lfsname=$(zfs_local_fsname $facet)
+ local label=${FSNAME}-${role}$(printf '%04x' $idx)
+
+ do_facet mgs \
+ "echo '$host - $label zfs:${dir}/${pool}/${lfsname} - -' >> \
+ $LSNAPSHOT_CONF"
+}
+
+lss_gen_conf()
+{
+ do_facet mgs "rm -f $LSNAPSHOT_CONF"
+ echo "Generating $LSNAPSHOT_CONF on MGS ..."
+
+ if ! combined_mgs_mds ; then
+ [ $(facet_fstype mgs) != zfs ] &&
+ skip "Lustre snapshot 1 only works for ZFS backend" &&
+ exit 0
+
+ local host=$(facet_active_host mgs)
+ local dir=$(dirname $(facet_vdevice mgs))
+ local pool=$(zpool_name mgs)
+ local lfsname=$(zfs_local_fsname mgs)
+
+ do_facet mgs \
+ "echo '$host - MGS zfs:${dir}/${pool}/${lfsname} - -' \
+ >> $LSNAPSHOT_CONF" || lss_err "generate lss conf (mgs)"
+ fi
+
+ for num in `seq $MDSCOUNT`; do
+ [ $(facet_fstype mds$num) != zfs ] &&
+ skip "Lustre snapshot 1 only works for ZFS backend" &&
+ exit 0
+
+ lss_gen_conf_one mds$num MDT $((num - 1)) ||
+ lss_err "generate lss conf (mds$num)"
+ done
+
+ for num in `seq $OSTCOUNT`; do
+ [ $(facet_fstype ost$num) != zfs ] &&
+ skip "Lustre snapshot 1 only works for ZFS backend" &&
+ exit 0
+
+ lss_gen_conf_one ost$num OST $((num - 1)) ||
+ lss_err "generate lss conf (ost$num)"
+ done
+
+ do_facet mgs "cat $LSNAPSHOT_CONF"
+}
+
+parse_plain_param()
+{
+ local line=$1
+ local val=$(awk '{print $2}' <<< $line)
+
+ if [[ $line =~ ^"lmm_stripe_count:" ]]; then
+ echo "-c $val"
+ elif [[ $line =~ ^"lmm_stripe_size:" ]]; then
+ echo "-S $val"
+ elif [[ $line =~ ^"lmm_stripe_offset:" ]]; then
+ echo "-i $val"
+ fi
+}
+
+parse_layout_param()
+{
+ local mode=""
+ local val=""
+ local param=""
+
+ while read line; do
+ if [[ -z $mode ]]; then
+ if [[ $line =~ ^"stripe_count:" ]]; then
+ mode="plain_dir"
+ elif [[ $line =~ ^"lmm_stripe_count:" ]]; then
+ mode="plain_file"
+ elif [[ $line =~ ^"lcm_layout_gen:" ]]; then
+ mode="pfl"
+ fi
+ fi
+
+ if [[ $mode = "plain_dir" ]]; then
+ param=$(echo $line |
+ awk '{printf("-c %d -S %d -i %d",$2,$4,$6)}')
+ elif [[ $mode = "plain_file" ]]; then
+ val=$(parse_plain_param "$line")
+ [[ ! -z $val ]] && param="$param $val"
+ elif [[ $mode = "pfl" ]]; then
+ val=$(echo $line | awk '{print $2}')
+ if [[ $line =~ ^"lcme_extent.e_end:" ]]; then
+ if [[ $val = "EOF" ]]; then
+ param="$param -E -1"
+ else
+ param="$param -E $val"
+ fi
+ elif [[ $line =~ ^"stripe_count:" ]]; then
+ # pfl dir
+ val=$(echo $line |
+ awk '{printf("-c %d -S %d -i %d",$2,$4,$6)}')
+ param="$param $val"
+ else
+ #pfl file
+ val=$(parse_plain_param "$line")
+ [[ ! -z $val ]] && param="$param $val"
+ fi
+ fi
+ done
+ echo "$param"
+}
+
+get_layout_param()
+{
+ local param=$($LFS getstripe -d $1 | parse_layout_param)
+ echo "$param"
+}