export MDS1_VERSION=$(lustre_version_code mds1)
export OST1_VERSION=$(lustre_version_code ost1)
export CLIENT_VERSION=$(lustre_version_code client)
+
+ # Prefer using "mds1" directly instead of SINGLEMDS.
+ # Keep this for compat until it is removed from scripts.
+ export SINGLEMDS=${SINGLEMDS:-mds1}
}
init_test_env() {
[ ! -f "$LCTL" ] && export LCTL=$(which lctl)
export LFS=${LFS:-"$LUSTRE/utils/lfs"}
[ ! -f "$LFS" ] && export LFS=$(which lfs)
- SETSTRIPE=${SETSTRIPE:-"$LFS setstripe"}
- GETSTRIPE=${GETSTRIPE:-"$LFS getstripe"}
export PERM_CMD=${PERM_CMD:-"$LCTL conf_param"}
echo "unloading modules on: '$list'"
do_rpc_nodes "$list" $LUSTRE_RMMOD ldiskfs
do_rpc_nodes "$list" check_mem_leak
- do_rpc_nodes "$list" "rm /etc/udev/rules.d/99-lustre-test.rules"
+ do_rpc_nodes "$list" "rm -f /etc/udev/rules.d/99-lustre-test.rules"
do_rpc_nodes "$list" "udevadm control --reload-rules"
do_rpc_nodes "$list" "udevadm trigger"
fi
virt=$(dmidecode -s system-product-name | awk '{print $1}')
case $virt in
- VMware|KVM|VirtualBox|Parallels)
+ VMware|KVM|VirtualBox|Parallels|Bochs)
echo $virt | tr '[A-Z]' '[a-z]' ;;
*) ;;
esac
}
set_default_debug () {
- local debug=${1:-"$PTLDEBUG"}
- local subsys=${2:-"$SUBSYSTEM"}
- local debug_size=${3:-$DEBUG_SIZE}
+ local debug=${1:-"$PTLDEBUG"}
+ local subsys=${2:-"$SUBSYSTEM"}
+ local debug_size=${3:-$DEBUG_SIZE}
- [ -n "$debug" ] && lctl set_param debug="$debug" >/dev/null
- [ -n "$subsys" ] && lctl set_param subsystem_debug="${subsys# }" >/dev/null
+ [ -n "$debug" ] && lctl set_param debug="$debug" >/dev/null
+ [ -n "$subsys" ] && lctl set_param subsystem_debug="${subsys# }" >/dev/null
- [ -n "$debug_size" ] && set_debug_size $debug_size > /dev/null
+ [ -n "$debug_size" ] && set_debug_size $debug_size > /dev/null
}
set_default_debug_nodes () {
local nodes="$1"
+ local debug="${2:-"$PTLDEBUG"}"
+ local subsys="${3:-"$SUBSYSTEM"}"
+ local debug_size="${4:-$DEBUG_SIZE}"
if [[ ,$nodes, = *,$HOSTNAME,* ]]; then
nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME")
set_default_debug
fi
- do_rpc_nodes "$nodes" set_default_debug \
- \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true
+ [[ -z "$nodes" ]] ||
+ do_rpc_nodes "$nodes" set_default_debug \
+ \\\"$debug\\\" \\\"$subsys\\\" $debug_size || true
}
set_default_debug_facet () {
- local facet=$1
- local node=$(facet_active_host $facet)
- [ -z "$node" ] && echo "No host defined for facet $facet" && exit 1
+ local facet=$1
+ local debug="${2:-"$PTLDEBUG"}"
+ local subsys="${3:-"$SUBSYSTEM"}"
+ local debug_size="${4:-$DEBUG_SIZE}"
+ local node=$(facet_active_host $facet)
+
+ [ -n "$node" ] || error "No host defined for facet $facet"
- set_default_debug_nodes $node
+ set_default_debug_nodes $node "$debug" "$subsys" $debug_size
}
set_hostid () {
exit 1
fi
+ if $GSS_SK; then
+ # update mount option with skpath
+ opts=$(add_sk_mntflag $opts)
+ fi
+
echo "Starting client: $client: $flags $opts $device $mnt"
do_node $client mkdir -p $mnt
if [ -n "$FILESET" -a -z "$SKIP_FILESET" ];then
}
zconf_umount() {
- local client=$1
- local mnt=$2
- local force
- local busy
- local need_kill
-
- [ "$3" ] && force=-f
- local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
- if [ $running -ne 0 ]; then
- echo "Stopping client $client $mnt (opts:$force)"
- do_node $client lsof -t $mnt || need_kill=no
- if [ "x$force" != "x" -a "x$need_kill" != "xno" ]; then
- pids=$(do_node $client lsof -t $mnt | sort -u);
- if [ -n $pids ]; then
- do_node $client kill -9 $pids || true
- fi
- fi
-
- busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") || true
- if [ $busy -ne 0 ] ; then
- echo "$mnt is still busy, wait one second" && sleep 1
- do_node $client umount $force $mnt
- fi
- fi
-}
-
-# Mount the file system on the MGS
-mount_mgs_client() {
- do_facet mgs "mkdir -p $MOUNT"
- zconf_mount $mgs_HOST $MOUNT $MOUNT_OPTS ||
- error "unable to mount $MOUNT on MGS"
-}
+ local client=$1
+ local mnt=$2
+ local force
+ local busy
+ local need_kill
+ local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
+
+ [ "$3" ] && force=-f
+ [ $running -eq 0 ] && return 0
+
+ echo "Stopping client $client $mnt (opts:$force)"
+ do_node $client lsof -t $mnt || need_kill=no
+ if [ "x$force" != "x" ] && [ "x$need_kill" != "xno" ]; then
+ pids=$(do_node $client lsof -t $mnt | sort -u);
+ if [ -n "$pids" ]; then
+ do_node $client kill -9 $pids || true
+ fi
+ fi
-# Unmount the file system on the MGS
-umount_mgs_client() {
- zconf_umount $mgs_HOST $MOUNT
- do_facet mgs "rm -rf $MOUNT"
+ busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") ||
+ true
+ if [ $busy -ne 0 ] ; then
+ echo "$mnt is still busy, wait one second" && sleep 1
+ do_node $client umount $force $mnt
+ fi
}
# nodes is comma list
change_active ${affecteds[index]}
wait_for_facet ${affecteds[index]}
+ if $GSS_SK; then
+ init_gss
+ init_facets_vars_simple
+ fi
# start mgs first if it is affected
if ! combined_mgs_mds &&
list_member ${affecteds[index]} mgs; then
affected=$(exclude_items_from_list ${affecteds[index]} mgs)
echo mount facets: ${affecteds[index]}
mount_facets ${affecteds[index]}
+ if $GSS_SK; then
+ do_nodes $(comma_list $(all_nodes)) \
+ "keyctl show | grep lustre | cut -c1-11 |
+ sed -e 's/ //g;' |
+ xargs -IX keyctl setperm X 0x3f3f3f3f"
+ fi
done
}
local facets=$1
local clients=${CLIENTS:-$HOSTNAME}
+ SK_NO_KEY_save=$SK_NO_KEY
+ if $GSS_SK; then
+ export SK_NO_KEY=false
+ fi
facet_failover $* || error "failover: $?"
+ export SK_NO_KEY=$SK_NO_KEY_save
# to initiate all OSC idling connections
clients_up
wait_clients_import_state "$clients" "$facets" "\(FULL\|IDLE\)"
echo -n "$1" | tr '[:lower:]' '[:upper:]'
}
+squash_opt() {
+ local var="$*"
+ local other=""
+ local opt_o=""
+ local opt_e=""
+ local first_e=0
+ local first_o=0
+ local take=""
+
+ var=$(echo "$var" | sed -e 's/,\( \)*/,/g')
+ for i in $(echo "$var"); do
+ if [ "$i" == "-O" ]; then
+ take="o";
+ first_o=$(($first_o + 1))
+ continue;
+ fi
+ if [ "$i" == "-E" ]; then
+ take="e";
+ first_e=$(($first_e + 1 ))
+ continue;
+ fi
+ case $take in
+ "o")
+ [ $first_o -gt 1 ] && opt_o+=",";
+ opt_o+="$i";
+ ;;
+ "e")
+ [ $first_e -gt 1 ] && opt_e+=",";
+ opt_e+="$i";
+ ;;
+ *)
+ other+=" $i";
+ ;;
+ esac
+ take=""
+ done
+
+ echo -n "$other"
+ [ -n "$opt_o" ] && echo " -O $opt_o"
+ [ -n "$opt_e" ] && echo " -E $opt_e"
+}
+
mkfs_opts() {
local facet=$1
local dev=$2
opts+=${L_GETIDENTITY:+" --param=mdt.identity_upcall=$L_GETIDENTITY"}
if [ $fstype == ldiskfs ]; then
- fs_mkfs_opts+="-O ea_inode"
+ fs_mkfs_opts+="-O ea_inode,large_dir"
var=${facet}_JRN
if [ -n "${!var}" ]; then
var=${type}_FS_MKFS_OPTS
fs_mkfs_opts+=${!var:+" ${!var}"}
+ [ $fstype == ldiskfs ] && fs_mkfs_opts=$(squash_opt $fs_mkfs_opts)
+
if [ -n "${fs_mkfs_opts## }" ]; then
opts+=" --mkfsoptions=\\\"${fs_mkfs_opts## }\\\""
fi
fi
}
+init_facets_vars_simple () {
+ local devname
+
+ if ! remote_mds_nodsh; then
+ for num in $(seq $MDSCOUNT); do
+ devname=$(mdsdevname $num)
+ eval export mds${num}_dev=${devname}
+ eval export mds${num}_opt=\"${MDS_MOUNT_OPTS}\"
+ done
+ fi
+
+ if ! combined_mgs_mds ; then
+ eval export mgs_dev=$(mgsdevname)
+ eval export mgs_opt=\"${MGS_MOUNT_OPTS}\"
+ fi
+
+ if ! remote_ost_nodsh; then
+ for num in $(seq $OSTCOUNT); do
+ devname=$(ostdevname $num)
+ eval export ost${num}_dev=${devname}
+ eval export ost${num}_opt=\"${OST_MOUNT_OPTS}\"
+ done
+ fi
+}
+
osc_ensure_active () {
local facet=$1
local timeout=$2
TIMEOUT=$(lctl get_param -n timeout)
TIMEOUT=${TIMEOUT:-20}
- if [ -n $arg1 ]; then
+ if [ -n "$arg1" ]; then
[ "$arg1" = "server_only" ] && return
fi
}
is_mounted () {
- local mntpt=$1
- [ -z $mntpt ] && return 1
- local mounted=$(mounted_lustre_filesystems)
+ local mntpt=$1
+ [ -z $mntpt ] && return 1
+ local mounted=$(mounted_lustre_filesystems)
- echo $mounted' ' | grep -w -q $mntpt' '
+ echo $mounted' ' | grep -w -q $mntpt' '
}
is_empty_dir() {
flock_is_enabled()
{
+ local mountpath=${1:-$MOUNT}
local RC=0
- [ -z "$(mount | grep "$MOUNT.*flock" | grep -v noflock)" ] && RC=1
+
+ [ -z "$(mount | grep "$mountpath .*flock" | grep -v noflock)" ] && RC=1
return $RC
}
}
start_full_debug_logging() {
- debugsave
- debug_size_save
+ debugsave
+ debug_size_save
- local FULLDEBUG=-1
- local DEBUG_SIZE=150
+ local fulldebug=-1
+ local debug_size=150
+ local nodes=$(comma_list $(nodes_list))
- do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug_mb=$DEBUG_SIZE"
- do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug=$FULLDEBUG;"
+ do_nodes $nodes "$LCTL set_param debug=$fulldebug debug_mb=$debug_size"
}
stop_full_debug_logging() {
- debug_size_restore
- debugrestore
+ debug_size_restore
+ debugrestore
}
# prints bash call stack
# usage: stack_trap arg sigspec
#
# stack_trap() behaves like bash's built-in trap, except that it "stacks" the
-# command ``arg`` on top of previously defined commands for ``sigspec`` instead
+# command "arg" on top of previously defined commands for "sigspec" instead
# of overwriting them.
# stacked traps are executed in reverse order of their registration
#
stack_trap()
{
local arg="$1"
- local sigspec="$2"
+ local sigspec="${2:-EXIT}"
# Use "trap -p" to get the quoting right
local old_trap="$(trap -p "$sigspec")"
if [[ $1 = [a-z]* ]]; then
echo $1
else
- echo ${1%%[a-z]*}
+ echo ${1%%[a-zA-Z]*}
fi
}
check_node_health
check_dmesg_for_errors || error "Error in dmesg detected"
if [ "$PARALLEL" != "yes" ]; then
- ps auxww | grep -v grep | grep -q multiop &&
+ ps auxww | grep -v grep | grep -q "multiop " &&
error "multiop still running"
fi
unset TESTNAME
########################
convert_facet2label() {
- local facet=$1
+ local facet=$1
- if [ x$facet = xost ]; then
- facet=ost1
- fi
+ if [ x$facet = xost ]; then
+ facet=ost1
+ elif [ x$facet = xmgs ] && combined_mgs_mds ; then
+ facet=mds1
+ fi
- local varsvc=${facet}_svc
+ local varsvc=${facet}_svc
- if [ -n ${!varsvc} ]; then
- echo ${!varsvc}
- else
- error "No lablel for $facet!"
- fi
+ if [ -n ${!varsvc} ]; then
+ echo ${!varsvc}
+ else
+ error "No label for $facet!"
+ fi
}
get_clientosc_proc_path() {
for ost in $OSTS; do
do_facet mgs lctl pool_remove $1 $ost
done
+ wait_update_facet $SINGLEMDS "lctl pool_list $1 | wc -l" "1" ||
+ error "MDS: pool_list $1 failed"
do_facet mgs lctl pool_destroy $1
}
# ldiskfs xattrs over one block in size. Allow both the historical
# Lustre feature name (large_xattr) and the upstream name (ea_inode).
large_xattr_enabled() {
- [[ $(facet_fstype $SINGLEMDS) == zfs ]] && return 0
+ [[ $(facet_fstype $SINGLEMDS) == zfs ]] && return 1
local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
# Get the maximum xattr size supported by the filesystem.
max_xattr_size() {
- local size
-
- if large_xattr_enabled; then
- size=$($LCTL get_param -n llite.*.max_easize)
- else
- local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
- local block_size=$(get_block_size $SINGLEMDS $mds_dev)
-
- # maximum xattr size = size of block - size of header -
- # size of 1 entry - 4 null bytes
- size=$((block_size - 32 - 32 - 4))
- fi
-
- echo $size
+ $LCTL get_param -n llite.*.max_easize
}
# Dump the value of the named xattr from a file.
local file=$1
local pool=$2
local tlist="$3"
- local res=$($GETSTRIPE $file | grep 0x | cut -f2)
+ local res=$($LFS getstripe $file | grep 0x | cut -f2)
for i in $res
do
for t in $tlist ; do
local tdir=$2
echo "Setting pool on directory $tdir"
- $SETSTRIPE -c 2 -p $pool $tdir && return 0
+ $LFS setstripe -c 2 -p $pool $tdir && return 0
error_noexit "Cannot set pool $pool to $tdir"
return 1
local tdir=$2
echo "Checking pool on directory $tdir"
- local res=$($GETSTRIPE --pool $tdir | sed "s/\s*$//")
+ local res=$($LFS getstripe --pool $tdir | sed "s/\s*$//")
[ "$res" = "$pool" ] && return 0
error_noexit "Pool on '$tdir' is '$res', not '$pool'"
for i in $(seq -w 1 $count)
do
local file=$tdir/spoo-$i
- $SETSTRIPE -p $pool $file
+ $LFS setstripe -p $pool $file
check_file_in_pool $file $pool "$tlist" || \
failed=$((failed + 1))
done
mkdir -p $tdir ||
{ error_noexit "unable to create $tdir"; return 1 ; }
local file="/..$tdir/$tfile-1"
- $SETSTRIPE -p $pool $file ||
+ $LFS setstripe -p $pool $file ||
{ error_noexit "unable to create $file" ; return 2 ; }
cd $tdir
- $SETSTRIPE -p $pool $tfile-2 || {
+ $LFS setstripe -p $pool $tfile-2 || {
error_noexit "unable to create $tfile-2 in $tdir"
return 3
}
return 2
}
# setstripe on an empty pool should fail
- $SETSTRIPE -p $pool $file 2>/dev/null && {
+ $LFS setstripe -p $pool $file 2>/dev/null && {
error_noexit "expected failure when creating file" \
"with empty pool"
return 3
return 1
}
# setstripe on an empty pool should fail
- $SETSTRIPE -p $pool $file 2>/dev/null && {
+ $LFS setstripe -p $pool $file 2>/dev/null && {
error_noexit "expected failure when creating file" \
"with missing pool"
return 2
[[ -z "$file" || -z "$expected" ]] &&
error "check_obdidx: invalid argument!"
- obdidx=$(comma_list $($GETSTRIPE $file | grep -A $OSTCOUNT obdidx |
+ obdidx=$(comma_list $($LFS getstripe $file | grep -A $OSTCOUNT obdidx |
grep -v obdidx | awk '{print $1}' | xargs))
[[ $obdidx = $expected ]] ||
[[ -z "$file" || -z "$expected" ]] &&
error "check_start_ost_idx: invalid argument!"
- start_ost_idx=$($GETSTRIPE $file | grep -A 1 obdidx | grep -v obdidx |
- awk '{print $1}')
+ start_ost_idx=$($LFS getstripe $file | grep -A 1 obdidx |
+ grep -v obdidx | awk '{print $1}')
[[ $start_ost_idx = $expected ]] ||
error "OST index of the first stripe on $file is" \
is_project_quota_supported() {
$ENABLE_PROJECT_QUOTAS || return 1
- [ "$(facet_fstype $SINGLEMDS)" == "ldiskfs" ] &&
- [ $(lustre_version_code $SINGLEMDS) -gt \
- $(version_code 2.9.55) ] &&
- lfs --help | grep project >&/dev/null &&
- egrep -q "7." /etc/redhat-release && return 0
- if [ "$(facet_fstype $SINGLEMDS)" == "zfs" ]; then
- [ $(lustre_version_code $SINGLEMDS) -le \
- $(version_code 2.10.53) ] && return 1
+ [[ "$(facet_fstype $SINGLEMDS)" == "ldiskfs" &&
+ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.9.55) ]] &&
+ do_facet mds1 lfs --help |& grep -q project && return 0
- do_fact mds1 $ZPOOL upgrade -v |
- grep project_quota && return 0
- fi
+ [[ "$(facet_fstype $SINGLEMDS)" == "zfs" &&
+ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.10.53) ]] &&
+ do_facet mds1 $ZPOOL get all | grep -q project_quota && return 0
return 1
}
+# ZFS project quota enable/disable:
+# This feature will become active as soon as it is enabled and will never
+# return to being disabled. Each filesystem will be upgraded automatically
+# when remounted or when [a] new file is created under that filesystem. The
+# upgrade can also be triggered on filesystems via `zfs set version=current
+# <pool/fs>`. The upgrade process runs in the background and may take a
+# while to complete for the filesystems containing a large number of files.
enable_project_quota() {
is_project_quota_supported || return 0
- [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && return 0
+ local zkeeper=${KEEP_ZPOOL}
+ stack_trap "KEEP_ZPOOL=$zkeeper" EXIT
+ KEEP_ZPOOL="true"
stopall || error "failed to stopall (1)"
- for num in $(seq $MDSCOUNT); do
- do_facet mds$num $TUNE2FS -O project $(mdsdevname $num) ||
- error "tune2fs $(mdsdevname $num) failed"
- done
+ local zfeat_en="feature@project_quota=enabled"
+ for facet in $(seq -f mds%g $MDSCOUNT) $(seq -f ost%g $OSTCOUNT); do
+ local facet_fstype=${facet:0:3}1_FSTYPE
+ local devname
- for num in $(seq $OSTCOUNT); do
- do_facet ost$num $TUNE2FS -O project $(ostdevname $num) ||
- error "tune2fs $(ostdevname $num) failed"
+ if [ "${!facet_fstype}" = "zfs" ]; then
+ devname=$(zpool_name ${facet})
+ do_facet ${facet} $ZPOOL set "$zfeat_en" $devname ||
+ error "$ZPOOL set $zfeat_en $devname"
+ else
+ [ ${facet:0:3} == "mds" ] &&
+ devname=$(mdsdevname ${facet:3}) ||
+ devname=$(ostdevname ${facet:3})
+ do_facet ${facet} $TUNE2FS -O project $devname ||
+ error "tune2fs $devname failed"
+ fi
done
+ KEEP_ZPOOL="${zkeeper}"
mount
setupall
}
disable_project_quota() {
is_project_quota_supported || return 0
- [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && return 0
+ [ "$mds1_FSTYPE" != "ldiskfs" ] && return 0
stopall || error "failed to stopall (1)"
for num in $(seq $MDSCOUNT); do
local action=$1
shift
+ # Use default values
+ local facet=$SINGLEAGT
+ local mountpoint="${MOUNT2:-$MOUNT}"
+ local hsm_root="${hsm_root:-$(hsm_root "$facet")}"
+
# Parse arguments
local fail_on_error=true
local -a misc_options
case "$1" in
-f|--facet)
shift
- local facet="$1"
+ facet="$1"
;;
-m|--mountpoint)
shift
- local mountpoint="$1"
+ mountpoint="$1"
;;
-a|--archive-id)
shift
;;
-h|--hsm-root)
shift
- local hsm_root="$1"
+ hsm_root="$1"
;;
-b|--bwlimit)
shift
shift
done
- # Use default values if needed
- local facet=${facet:-$SINGLEAGT}
- local mountpoint="${mountpoint:-${MOUNT2:-$MOUNT}}"
- local hsm_root="${hsm_root:-$(hsm_root "$facet")}"
-
stack_trap "do_facet $facet rm -rf '$hsm_root'" EXIT
do_facet $facet mkdir -p "$hsm_root" ||
error "mkdir '$hsm_root' failed"
awk '{ print $1 * 2; exit; }')
sleep $delay
}
+
+check_component_count() {
+ local comp_cnt=$($LFS getstripe --component-count $1)
+ [ $comp_cnt -eq $2 ] || error "$1, component count $comp_cnt != $2"
+}
+
+# Verify there are no init components with "extension" flag
+verify_no_init_extension() {
+ local flg_opts="--component-flags init,extension"
+ local found=$($LFS find $flg_opts $1 | wc -l)
+ [ $found -eq 0 ] || error "$1 has component with initialized extension"
+}
+
+# Verify there is at least one component starting at 0
+verify_comp_at_zero() {
+ flg_opts="--component-flags init"
+ found=$($LFS find --component-start 0M $flg_opts $1 | wc -l)
+ [ $found -eq 1 ] ||
+ error "No component starting at zero(!)"
+}
+
+# version after which Self-Extending Layouts are available
+SEL_VER="2.12.55"
+
+sel_layout_sanity() {
+ local file=$1
+ local comp_cnt=$2
+
+ verify_no_init_extension $file
+ verify_comp_at_zero $file
+ check_component_count $file $comp_cnt
+}
+