fi
export RSYNC_RSH=${RSYNC_RSH:-rsh}
+ export LNETCTL=${LNETCTL:-"$LUSTRE/../lnet/utils/lnetctl"}
+ [ ! -f "$LNETCTL" ] && export LNETCTL=$(which lnetctl 2> /dev/null)
export LCTL=${LCTL:-"$LUSTRE/utils/lctl"}
[ ! -f "$LCTL" ] && export LCTL=$(which lctl)
export LFS=${LFS:-"$LUSTRE/utils/lfs"}
get_lustre_env
# use localrecov to enable recovery for local clients, LU-12722
- [[ $MDS1_VERSION -lt $(version_code 2.13.52) ]] ||
+ [[ $MDS1_VERSION -lt $(version_code 2.13.52) ]] || {
export MDS_MOUNT_OPTS=${MDS_MOUNT_OPTS:-"-o localrecov"}
+ export MGS_MOUNT_OPTS=${MGS_MOUNT_OPTS:-"-o localrecov"}
+ }
+
[[ $OST1_VERSION -lt $(version_code 2.13.52) ]] ||
export OST_MOUNT_OPTS=${OST_MOUNT_OPTS:-"-o localrecov"}
}
fi
}
-unload_modules() {
- wait_exit_ST client # bug 12845
-
+unload_modules_local() {
$LUSTRE_RMMOD ldiskfs || return 2
[ -f /etc/udev/rules.d/99-lustre-test.rules ] &&
udevadm control --reload-rules
udevadm trigger
+ check_mem_leak || return 254
+
+ return 0
+}
+
+unload_modules() {
+ local rc=0
+
+ wait_exit_ST client # bug 12845
+
+ unload_modules_local || rc=$?
+
if $LOAD_MODULES_REMOTE; then
local list=$(comma_list $(remote_nodes_list))
if [ -n "$list" ]; then
echo "unloading modules on: '$list'"
- do_rpc_nodes "$list" $LUSTRE_RMMOD ldiskfs
- do_rpc_nodes "$list" check_mem_leak
- do_rpc_nodes "$list" "rm -f /etc/udev/rules.d/99-lustre-test.rules"
- do_rpc_nodes "$list" "udevadm control --reload-rules"
- do_rpc_nodes "$list" "udevadm trigger"
+ do_rpc_nodes "$list" unload_modules_local
fi
fi
rm -f $sbin_mount
fi
- check_mem_leak || return 254
+ [[ $rc -eq 0 ]] && echo "modules unloaded."
- echo "modules unloaded."
- return 0
+ return $rc
}
fs_log_size() {
echo -n $label
}
-mdsdevlabel() {
- local num=$1
- local device=$(mdsdevname $num)
- local label=$(devicelabel mds$num ${device} | grep -v "CMD: ")
- echo -n $label
-}
-
-ostdevlabel() {
- local num=$1
- local device=$(ostdevname $num)
- local label=$(devicelabel ost$num ${device} | grep -v "CMD: ")
- echo -n $label
-}
-
#
# Get the device of a facet.
#
}
# End recovery-scale functions
-# verify that lustre actually cleaned up properly
-cleanup_check() {
- VAR=$(lctl get_param -n catastrophe 2>&1)
- if [ $? = 0 ] ; then
- if [ $VAR != 0 ]; then
- error "LBUG/LASSERT detected"
- fi
- fi
- BUSY=$(dmesg | grep -i destruct || true)
- if [ -n "$BUSY" ]; then
- echo "$BUSY" 1>&2
- [ -e $TMP/debug ] && mv $TMP/debug $TMP/debug-busy.$(date +%s)
- exit 205
- fi
-
- check_mem_leak || exit 204
-
- [[ $($LCTL dl 2>/dev/null | wc -l) -gt 0 ]] && $LCTL dl &&
- echo "$TESTSUITE: lustre didn't clean up..." 1>&2 &&
- return 202 || true
-
- if module_loaded lnet || module_loaded libcfs; then
- echo "$TESTSUITE: modules still loaded..." 1>&2
- /sbin/lsmod 1>&2
- return 203
- fi
- return 0
-}
-
##
# wait for a command to return the expected result
#
done
}
-obd_name() {
- local facet=$1
-}
-
replay_barrier() {
local facet=$1
do_facet $facet "sync; sync; sync"
echo $mounted' ' | grep -w -q $mntpt' '
}
-is_empty_dir() {
- [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0
- return 1
-}
-
-# empty lustre filesystem may have empty directories lost+found and .lustre
-is_empty_fs() {
- # exclude .lustre & lost+found
- [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \
- -print | wc -l) = 1 ] || return 1
- [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found || return 1
- if [ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.4.0) ]; then
- # exclude .lustre/fid (LU-2780)
- [ $(find $1/.lustre -maxdepth 1 -name fid -prune -o \
- -print | wc -l) = 1 ] || return 1
- else
- [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre || return 1
- fi
- return 0
-}
-
check_and_setup_lustre() {
sanitize_parameters
nfs_client_mode && return
check_and_setup_lustre
}
-# Get all of the server target devices from a given server node and type.
-get_mnt_devs() {
- local node=$1
- local type=$2
- local devs
- local dev
-
- if [ "$type" == ost ]; then
- devs=$(get_osd_param $node "" mntdev)
- else
- devs=$(do_node $node $LCTL get_param -n osd-*.$FSNAME-M*.mntdev)
- fi
- for dev in $devs; do
- case $dev in
- *loop*) do_node $node "losetup $dev" | \
- sed -e "s/.*(//" -e "s/).*//" ;;
- *) echo $dev ;;
- esac
- done
-}
-
-# Get all of the server target devices.
-get_svr_devs() {
- local node
- local i
-
- # Master MDS parameters used by lfsck
- MDTNODE=$(facet_active_host $SINGLEMDS)
- MDTDEV=$(echo $(get_mnt_devs $MDTNODE mdt) | awk '{print $1}')
-
- # MDT devices
- i=0
- for node in $(mdts_nodes); do
- MDTDEVS[i]=$(get_mnt_devs $node mdt)
- i=$((i + 1))
- done
-
- # OST devices
- i=0
- for node in $(osts_nodes); do
- OSTDEVS[i]=$(get_mnt_devs $node ost)
- i=$((i + 1))
- done
-}
-
# Run e2fsck on MDT or OST device.
run_e2fsck() {
local node=$1
at_get $1 at_max
}
-at_min_get() {
- at_get $1 at_min
-}
-
at_max_set() {
local at_max=$1
shift
log_sub_test_end "SKIP" "0" "0" "$@"
}
-canonical_path() {
- (cd $(dirname $1); echo $PWD/$(basename $1))
-}
-
grant_from_clients() {
local nodes="$1"
# Description:
# Returns list of ip addresses for each interface
local_addr_list() {
- ip addr | awk '/inet\ / {print $2}' | awk -F\/ '{print $1}'
+ ip addr | awk '/inet / {print $2}' | awk -F/ '{print $1}'
}
is_local_addr() {
echo -n $(facets_nodes $(get_facets OST))
}
-# Get all of the active AGT (HSM agent) nodes.
-agts_nodes () {
- echo -n $(facets_nodes $(get_facets AGT))
-}
-
# Get all of the client nodes and active server nodes.
nodes_list () {
local nodes=$HOSTNAME
echo $nodes | wc -w || true
}
-mixed_ost_devs () {
- local nodes=$(osts_nodes)
- local osscount=$(get_node_count "$nodes")
- [ ! "$OSTCOUNT" = "$osscount" ]
-}
-
mixed_mdt_devs () {
local nodes=$(mdts_nodes)
local mdtcount=$(get_node_count "$nodes")
rm -f $file
}
-setstripe_nfsserver () {
- local dir=$1
- local nfsexportdir=$2
- shift
- shift
-
- local -a nfsexport=($(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \
- { print $1 }' /proc/mounts | cut -f 1 -d :))
-
- # check that only one nfs mounted
- [[ -z $nfsexport ]] && echo "$dir is not nfs mounted" && return 1
- (( ${#nfsexport[@]} == 1 )) ||
- error "several nfs mounts found for $dir: ${nfsexport[@]} !"
-
- do_nodev ${nfsexport[0]} lfs setstripe $nfsexportdir "$@"
-}
-
# Check and add a test group.
add_group() {
local group_id=$1
}
check_runas_id_ret() {
- local myRC=0
- local myRUNAS_UID=$1
- local myRUNAS_GID=$2
- shift 2
- local myRUNAS=$@
- if [ -z "$myRUNAS" ]; then
- error_exit "myRUNAS command must be specified for check_runas_id"
- fi
- if $GSS_KRB5; then
- $myRUNAS krb5_login.sh || \
- error "Failed to refresh Kerberos V5 TGT for UID $myRUNAS_ID."
- fi
- mkdir $DIR/d0_runas_test
- chmod 0755 $DIR
- chown $myRUNAS_UID:$myRUNAS_GID $DIR/d0_runas_test
- $myRUNAS touch $DIR/d0_runas_test/f$$ || myRC=$?
- rm -rf $DIR/d0_runas_test
- return $myRC
+ local myRC=0
+ local myRUNAS_UID=$1
+ local myRUNAS_GID=$2
+ shift 2
+ local myRUNAS=$@
+
+ if [ -z "$myRUNAS" ]; then
+ error_exit "check_runas_id_ret requires myRUNAS argument"
+ fi
+
+ $myRUNAS true ||
+ error "Unable to execute $myRUNAS"
+
+ id $myRUNAS_UID > /dev/null ||
+ error "Invalid RUNAS_ID $myRUNAS_UID. Please set RUNAS_ID to " \
+ "some UID which exists on MDS and client or add user " \
+ "$myRUNAS_UID:$myRUNAS_GID on these nodes."
+
+ if $GSS_KRB5; then
+ $myRUNAS krb5_login.sh ||
+ error "Failed to refresh krb5 TGT for UID $myRUNAS_ID."
+ fi
+ mkdir $DIR/d0_runas_test
+ chmod 0755 $DIR
+ chown $myRUNAS_UID:$myRUNAS_GID $DIR/d0_runas_test
+ $myRUNAS -u $myRUNAS_UID -g $myRUNAS_GID touch $DIR/d0_runas_test/f$$ ||
+ myRC=$?
+ rm -rf $DIR/d0_runas_test
+ return $myRC
}
check_runas_id() {
- local myRUNAS_UID=$1
- local myRUNAS_GID=$2
- shift 2
- local myRUNAS=$@
- check_runas_id_ret $myRUNAS_UID $myRUNAS_GID $myRUNAS || \
- error "unable to write to $DIR/d0_runas_test as UID $myRUNAS_UID.
- Please set RUNAS_ID to some UID which exists on MDS and client or
- add user $myRUNAS_UID:$myRUNAS_GID on these nodes."
+ local myRUNAS_UID=$1
+ local myRUNAS_GID=$2
+ shift 2
+ local myRUNAS=$@
+
+ check_runas_id_ret $myRUNAS_UID $myRUNAS_GID $myRUNAS || \
+ error "unable to write to $DIR/d0_runas_test as " \
+ "UID $myRUNAS_UID."
}
# obtain the UID/GID for MPI_USER
fi
}
-delayed_recovery_enabled () {
- local var=${SINGLEMDS}_svc
- do_facet $SINGLEMDS lctl get_param -n mdd.${!var}.stale_export_age > /dev/null 2>&1
-}
-
########################
convert_facet2label() {
echo "${1}-osc-[-0-9a-f]*"
}
-# If the 2.0 MDS was mounted on 1.8 device, then the OSC and LOV names
-# used by MDT would not be changed.
-# mdt lov: fsname-mdtlov
-# mdt osc: fsname-OSTXXXX-osc
-mds_on_old_device() {
- local mds=${1:-"$SINGLEMDS"}
-
- if [ $(lustre_version_code $mds) -gt $(version_code 1.9.0) ]; then
- do_facet $mds "lctl list_param osc.$FSNAME-OST*-osc \
- > /dev/null 2>&1" && return 0
- fi
- return 1
-}
-
get_mdtosc_proc_path() {
local mds_facet=$1
local ost_label=${2:-"*OST*"}
params=$param
fi
+ local plist=$(comma_list $params)
if ! do_rpc_nodes "$(facet_active_host $facet)" \
- wait_import_state $expected "$params" $maxtime; then
+ wait_import_state $expected $plist $maxtime; then
error "$facet: import is not in $expected state after $maxtime"
return 1
fi
params=$($LCTL list_param $param 2>/dev/null || true)
done
fi
+ local plist=$(comma_list $params)
if ! do_rpc_nodes "$(facet_active_host $facet)" \
- wait_import_state $expected "$params" $maxtime \
+ wait_import_state $expected $plist $maxtime \
$error_on_failure; then
if [ $error_on_failure -ne 0 ]; then
error "import is not in ${expected} state"
llverfs $partial_arg $llverfs_opts $dir
}
-#Remove objects from OST
-remove_ost_objects() {
- local facet=$1
- local ostdev=$2
- local group=$3
- shift 3
- local objids="$@"
- local mntpt=$(facet_mntpt $facet)
- local opts=$OST_MOUNT_OPTS
- local i
- local rc
-
- echo "removing objects from $ostdev on $facet: $objids"
- if ! test -b $ostdev; then
- opts=$(csa_add "$opts" -o loop)
- fi
- mount -t $(facet_fstype $facet) $opts $ostdev $mntpt ||
- return $?
- rc=0
- for i in $objids; do
- rm $mntpt/O/$group/d$((i % 32))/$i || { rc=$?; break; }
- done
- umount -f $mntpt || return $?
- return $rc
-}
-
-#Remove files from MDT
-remove_mdt_files() {
- local facet=$1
- local mdtdev=$2
- shift 2
- local files="$@"
- local mntpt=$(facet_mntpt $facet)
- local opts=$MDS_MOUNT_OPTS
-
- echo "removing files from $mdtdev on $facet: $files"
- if [ $(facet_fstype $facet) == ldiskfs ] &&
- ! do_facet $facet test -b $mdtdev; then
- opts=$(csa_add "$opts" -o loop)
- fi
- mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt ||
- return $?
- rc=0
- for f in $files; do
- rm $mntpt/ROOT/$f || { rc=$?; break; }
- done
- umount -f $mntpt || return $?
- return $rc
-}
-
-duplicate_mdt_files() {
- local facet=$1
- local mdtdev=$2
- shift 2
- local files="$@"
- local mntpt=$(facet_mntpt $facet)
- local opts=$MDS_MOUNT_OPTS
-
- echo "duplicating files on $mdtdev on $facet: $files"
- mkdir -p $mntpt || return $?
- if [ $(facet_fstype $facet) == ldiskfs ] &&
- ! do_facet $facet test -b $mdtdev; then
- opts=$(csa_add "$opts" -o loop)
- fi
- mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt ||
- return $?
-
- do_umount() {
- trap 0
- popd > /dev/null
- rm $tmp
- umount -f $mntpt
- }
- trap do_umount EXIT
-
- tmp=$(mktemp $TMP/setfattr.XXXXXXXXXX)
- pushd $mntpt/ROOT > /dev/null || return $?
- rc=0
- for f in $files; do
- touch $f.bad || return $?
- getfattr -n trusted.lov $f | sed "s#$f#&.bad#" > $tmp
- rc=${PIPESTATUS[0]}
- [ $rc -eq 0 ] || return $rc
- setfattr --restore $tmp || return $?
- done
- do_umount
-}
-
run_sgpdd () {
local devs=${1//,/ }
shift
echo -n ${count:-0}
}
-# Get the block size of the filesystem.
-get_block_size() {
- local facet=$1
- local device=$2
- local size
-
- [ -z "$CLIENTONLY" ] && size=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
- awk '/^Block size:/ {print $3}')
- echo -n ${size:-0}
-}
-
# Check whether the "ea_inode" feature is enabled or not, to allow
# ldiskfs xattrs over one block in size. Allow both the historical
# Lustre feature name (large_xattr) and the upstream name (ea_inode).
local rc=0
for osc in $oscs; do
- ((rc++))
echo "Check state for $osc"
local evicted=$(do_facet client $LCTL get_param osc.$osc.state |
- tail -n 3 | awk -F"[ [,]" \
- '/EVICTED ]$/ { if (mx<$5) {mx=$5;} } END { print mx }')
+ tail -n 5 | awk -F"[ ,]" \
+ '/EVICTED/ { if (mx<$4) { mx=$4; } } END { print mx }')
if (($? == 0)) && (($evicted > $before)); then
echo "$osc is evicted at $evicted"
- ((rc--))
+ else
+ ((rc++))
+ echo "$osc was not evicted after $before:"
+ do_facet client $LCTL get_param osc.$osc.state |
+ tail -n 8
fi
done
export HSMTOOL_UPDATE_INTERVAL=${HSMTOOL_UPDATE_INTERVAL:=""}
export HSMTOOL_EVENT_FIFO=${HSMTOOL_EVENT_FIFO:=""}
export HSMTOOL_TESTDIR
+ export HSMTOOL_ARCHIVE_FORMAT=${HSMTOOL_ARCHIVE_FORMAT:-v2}
if ! [[ $HSMTOOL =~ hsmtool ]]; then
echo "HSMTOOL = '$HSMTOOL' does not contain 'hsmtool', GLWT" >&2
__lhsmtool_rebind()
{
- do_facet $facet $HSMTOOL -p "$hsm_root" --rebind "$@" "$mountpoint"
+ do_facet $facet $HSMTOOL "${hsmtool_options[@]}" --rebind "$@" "$mountpoint"
}
__lhsmtool_import()
{
mkdir -p "$(dirname "$2")" ||
error "cannot create directory '$(dirname "$2")'"
- do_facet $facet $HSMTOOL -p "$hsm_root" --import "$@" "$mountpoint"
+ do_facet $facet $HSMTOOL "${hsmtool_options[@]}" --import "$@" "$mountpoint"
}
__lhsmtool_setup()
{
local host="$(facet_host "$facet")"
- local cmd="$HSMTOOL $HSMTOOL_VERBOSE --daemon --pid-file=$HSMTOOL_PID_FILE --hsm-root \"$hsm_root\""
+ local cmd="$HSMTOOL ${hsmtool_options[@]} --daemon --pid-file=$HSMTOOL_PID_FILE"
[ -n "$bandwidth" ] && cmd+=" --bandwidth $bandwidth"
[ -n "$archive_id" ] && cmd+=" --archive $archive_id"
- [ ${#misc_options[@]} -gt 0 ] &&
- cmd+=" $(IFS=" " echo "$@")"
- cmd+=" \"$mountpoint\""
+# [ ${#misc_options[@]} -gt 0 ] &&
+# cmd+=" $(IFS=" " echo "$@")"
+ cmd+=" $@ \"$mountpoint\""
- echo "Starting copytool '$facet' on '$host'"
+ echo "Starting copytool '$facet' on '$host' with cmdline '$cmd'"
stack_trap "pkill_copytools $host TERM || true" EXIT
do_node "$host" "$cmd < /dev/null > \"$(copytool_logfile $facet)\" 2>&1"
}
# Parse arguments
local fail_on_error=true
- local -a misc_options
+ local -a hsmtool_options=("--hsm-root=$hsm_root")
+ local -a action_options=()
+
+ if [[ -n "$HSMTOOL_ARCHIVE_FORMAT" ]]; then
+ hsmtool_options+=("--archive-format=$HSMTOOL_ARCHIVE_FORMAT")
+ fi
+
+ if [[ -n "$HSMTOOL_VERBOSE" ]]; then
+ hsmtool_options+=("$HSMTOOL_VERBOSE")
+ fi
+
while [ $# -gt 0 ]; do
case "$1" in
-f|--facet)
;;
*)
# Uncommon(/copytool dependent) option
- misc_options+=("$1")
+ action_options+=("$1")
;;
esac
shift
;;
esac
- __${copytool}_${action} "${misc_options[@]}"
+ __${copytool}_${action} "${action_options[@]}"
if [ $? -ne 0 ]; then
local error_msg
error_msg="Failed to start copytool $facet on '$host'"
;;
import)
- local src="${misc_options[0]}"
- local dest="${misc_options[1]}"
+ local src="${action_options[0]}"
+ local dest="${action_options[1]}"
error_msg="Failed to import '$src' to '$dest'"
;;
rebind)
local saved_debug=$($LCTL get_param -n debug)
local list=$(comma_list $(all_nodes))
- do_nodes $list $LCTL set_param debug=0
+ do_nodes $list $LCTL set_param -n debug=0
}
$LUSTRE/tests/createmany $*
local rc=$?
(( count > 100 )) &&
- do_nodes $list "$LCTL set_param debug=\\\"$saved_debug\\\""
+ do_nodes $list "$LCTL set_param -n debug=\\\"$saved_debug\\\""
return $rc
}
local saved_debug=$($LCTL get_param -n debug)
local list=$(comma_list $(all_nodes))
- do_nodes $list $LCTL set_param debug=0
+ do_nodes $list $LCTL set_param -n debug=0
}
$LUSTRE/tests/unlinkmany $*
local rc=$?
(( count > 100 )) &&
- do_nodes $list "$LCTL set_param debug=\\\"$saved_debug\\\""
+ do_nodes $list "$LCTL set_param -n debug=\\\"$saved_debug\\\""
return $rc
}
check_set_fallocate || skip "need at least 2.13.57 for fallocate"
}
+function disable_opencache()
+{
+ local state=$($LCTL get_param -n "llite.*.opencache_threshold_count" | head -1)
+
+ test -z "${saved_OPENCACHE_value}" &&
+ export saved_OPENCACHE_value="$state"
+
+ [[ "$state" = "off" ]] && return
+
+ $LCTL set_param -n "llite.*.opencache_threshold_count"=off
+}
+
+function set_opencache()
+{
+ local newvalue="$1"
+ local state=$($LCTL get_param -n "llite.*.opencache_threshold_count")
+
+ [[ -n "$newvalue" ]] || return
+
+ [[ -n "${saved_OPENCACHE_value}" ]] ||
+ export saved_OPENCACHE_value="$state"
+
+ $LCTL set_param -n "llite.*.opencache_threshold_count"=$newvalue
+}
+
+
+
+function restore_opencache()
+{
+ [[ -z "${saved_OPENCACHE_value}" ]] ||
+ $LCTL set_param -n "llite.*.opencache_threshold_count"=${saved_OPENCACHE_value}
+}