export SK_S2S=${SK_S2S:-false}
export SK_S2SNM=${SK_S2SNM:-TestFrameNM}
export SK_S2SNMCLI=${SK_S2SNMCLI:-TestFrameNMCli}
+export SK_SKIPFIRST=${SK_SKIPFIRST:-true}
export IDENTITY_UPCALL=default
export QUOTA_AUTO=1
export FLAKEY=${FLAKEY:-true}
fi
}
+# Get information about the Lustre environment. The information collected
+# will be used in Lustre tests.
+# usage: get_lustre_env
+# input: No required or optional arguments
+# output: No return values, environment variables are exported
+
+get_lustre_env() {
+
+ export mds1_FSTYPE=${mds1_FSTYPE:-$(facet_fstype mds1)}
+ export ost1_FSTYPE=${ost1_FSTYPE:-$(facet_fstype ost1)}
+
+ export MGS_VERSION=$(lustre_version_code mgs)
+ export MDS1_VERSION=$(lustre_version_code mds1)
+ export OST1_VERSION=$(lustre_version_code ost1)
+ export CLIENT_VERSION=$(lustre_version_code client)
+}
+
init_test_env() {
export LUSTRE=$(absolute_path $LUSTRE)
export TESTSUITE=$(basename $0 .sh)
export DO_CLEANUP=${DO_CLEANUP:-true}
export KEEP_ZPOOL=${KEEP_ZPOOL:-false}
export CLEANUP_DM_DEV=false
+ export PAGE_SIZE=$(get_page_size client)
export MKE2FS=$MKE2FS
if [ -z "$MKE2FS" ]; then
return 0
fi
+ # Create special udev test rules on every node
+ if [ -f $LUSTRE/lustre/conf/99-lustre.rules ]; then {
+ sed -e 's|/usr/sbin/lctl|$LCTL|g' $LUSTRE/lustre/conf/99-lustre.rules > /etc/udev/rules.d/99-lustre-test.rules
+ } else {
+ echo "SUBSYSTEM==\"lustre\", ACTION==\"change\", ENV{PARAM}==\"?*\", RUN+=\"$LCTL set_param '\$env{PARAM}=\$env{SETTING}'\"" > /etc/udev/rules.d/99-lustre-test.rules
+ } fi
+ udevadm control --reload-rules
+ udevadm trigger
+
echo Loading modules from $LUSTRE
local ncpus
$LUSTRE_RMMOD ldiskfs || return 2
+ [ -f /etc/udev/rules.d/99-lustre-test.rules ] &&
+ rm /etc/udev/rules.d/99-lustre-test.rules
+ udevadm control --reload-rules
+ udevadm trigger
+
if $LOAD_MODULES_REMOTE; then
local list=$(comma_list $(remote_nodes_list))
if [ -n "$list" ]; then
echo "unloading modules on: '$list'"
do_rpc_nodes "$list" $LUSTRE_RMMOD ldiskfs
do_rpc_nodes "$list" check_mem_leak
+ do_rpc_nodes "$list" "rm /etc/udev/rules.d/99-lustre-test.rules"
+ do_rpc_nodes "$list" "udevadm control --reload-rules"
+ do_rpc_nodes "$list" "udevadm trigger"
fi
fi
# security ctx config for keyring
SK_NO_KEY=false
mkdir -p $SK_OM_PATH
- mount -o bind $SK_OM_PATH /etc/request-key.d/
+ if grep -q request-key /proc/mounts > /dev/null; then
+ echo "SSK: Request key already mounted."
+ else
+ mount -o bind $SK_OM_PATH /etc/request-key.d/
+ fi
local lgssc_conf_line='create lgssc * * '
lgssc_conf_line+=$(which lgss_keyring)
lgssc_conf_line+=' %o %k %t %d %c %u %g %T %P %S'
-m $SK_PATH/$FSNAME-nmclient.key \
>/dev/null 2>&1"
fi
+ fi
+ if $GSS_SK; then
# mount options for servers and clients
MGS_MOUNT_OPTS=$(add_sk_mntflag $MGS_MOUNT_OPTS)
MDS_MOUNT_OPTS=$(add_sk_mntflag $MDS_MOUNT_OPTS)
$RPC_MODE || echo "Cleaning up Shared Key.."
do_nodes $(comma_list $(all_nodes)) "rm -f \
$SK_PATH/$FSNAME*.key $SK_PATH/nodemap/$FSNAME*.key"
+ do_nodes $(comma_list $(all_nodes)) "keyctl show | \
+ awk '/lustre/ { print \\\$1 }' | xargs -IX keyctl unlink X"
# Remove the mount and clean up the files we added to SK_PATH
- do_nodes $(comma_list $(all_nodes)) "umount \
- /etc/request-key.d/"
+ do_nodes $(comma_list $(all_nodes)) "while grep -q \
+ request-key.d /proc/mounts; do umount \
+ /etc/request-key.d/; done"
do_nodes $(comma_list $(all_nodes)) "rm -f \
$SK_OM_PATH/lgssc.conf"
do_nodes $(comma_list $(all_nodes)) "rmdir $SK_OM_PATH"
set_default_debug_facet $facet
- if [[ $facet == mds* ]]; then
- do_facet $facet \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 2>/dev/null
- fi
-
if [[ $opts =~ .*nosvc.* ]]; then
echo "Start $dm_dev without service"
else
mount_facet ${facet}
RC=$?
- if [[ $facet == mds* ]]; then
- do_facet $facet \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
- 2>/dev/null
- fi
-
return $RC
}
# restore old quota type settings
restore_quota() {
if [ "$old_MDT_QUOTA_TYPE" ]; then
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
do_facet mgs $PERM_CMD \
osd-*.$FSNAME-MDT*.quota_slave.enable = \
$old_MDT_QUOTA_TYPE
fi
fi
if [ "$old_OST_QUOTA_TYPE" ]; then
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
do_facet mgs $PERM_CMD \
osd-*.$FSNAME-OST*.quota_slave.enable = \
$old_OST_QUOTA_TYPE
export old_MDT_QUOTA_TYPE=$mdt_qtype
export old_OST_QUOTA_TYPE=$ost_qtype
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
do_facet mgs $PERM_CMD \
osd-*.$FSNAME-MDT*.quota_slave.enable=$QUOTA_TYPE
do_facet mgs $PERM_CMD \
LFS=$LFS \
LCTL=$LCTL \
FSNAME=$FSNAME \
+ MPIRUN=$MPIRUN \
+ MPIRUN_OPTIONS=\\\"$MPIRUN_OPTIONS\\\" \
+ MACHINEFILE_OPTION=\\\"$MACHINEFILE_OPTION\\\" \
+ num_clients=$(get_node_count ${CLIENTS//,/ }) \
+ ior_THREADS=$ior_THREADS ior_iteration=$ior_iteration \
+ ior_blockSize=$ior_blockSize \
+ ior_blockUnit=$ior_blockUnit \
+ ior_xferSize=$ior_xferSize ior_type=$ior_type \
+ ior_DURATION=$ior_DURATION \
+ ior_stripe_params=\\\"$ior_stripe_params\\\" \
+ ior_custom_params=\\\"$ior_custom_param\\\" \
+ mpi_ior_custom_threads=$mpi_ior_custom_threads \
run_${load}.sh" &
local ppid=$!
log "Started client load: ${load} on $client"
if $GSS_SK; then
set_rule $FSNAME any cli2mdt $SK_FLAVOR
set_rule $FSNAME any cli2ost $SK_FLAVOR
- wait_flavor cli2mdt $SK_FLAVOR
- wait_flavor cli2ost $SK_FLAVOR
+ if $SK_SKIPFIRST; then
+ export SK_SKIPFIRST=false
+
+ sleep 30
+ do_nodes $CLIENTS \
+ "lctl set_param osc.*.idle_connect=1"
+ return
+ else
+ wait_flavor cli2mdt $SK_FLAVOR
+ wait_flavor cli2ost $SK_FLAVOR
+ fi
else
set_flavor_all $SEC
fi
final=$((orig + 5))
fi
- if [[ $PERM_CMD = *"set_param -P"* ]]; then
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
echo "Setting $test_param from $orig to $final"
do_facet mgs "$PERM_CMD $test_param='$final'" ||
error "$PERM_CMD $test_param failed"
TIMEOUT=$(lctl get_param -n timeout)
TIMEOUT=${TIMEOUT:-20}
+ if [ -n $arg1 ]; then
+ [ "$arg1" = "server_only" ] && return
+ fi
+
remote_mds_nodsh && log "Using TIMEOUT=$TIMEOUT" && return 0
TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
fi
fi
- init_gss
if $GSS_SK; then
set_flavor_all null
elif $GSS; then
set_flavor_all $SEC
fi
- if [ -z "$CLIENTONLY" ]; then
- # Enable remote MDT create for testing
- for num in $(seq $MDSCOUNT); do
- do_facet mds$num \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
- 2>/dev/null
- done
- fi
-
if [ "$ONLY" == "setup" ]; then
exit 0
fi
drop_request() {
# OBD_FAIL_MDS_ALL_REQUEST_NET
RC=0
- do_facet $SINGLEMDS lctl set_param fail_loc=0x123
+ do_facet $SINGLEMDS lctl set_param fail_val=0 fail_loc=0x123
do_facet client "$1" || RC=$?
do_facet $SINGLEMDS lctl set_param fail_loc=0
return $RC
local status=0
local log=$TESTSUITELOG
- [ -f "$log" ] && grep -q FAIL $log && status=1
+ [ -f "$log" ] && grep -qw FAIL $log && status=1
exit $status
}
all_mdts_nodes () {
local host
local failover_host
- local nodes="${mds_HOST} ${mdsfailover_HOST}"
+ local nodes
local nodes_sort
local i
nodes="$nodes ${!host} ${!failover_host}"
done
+ [ -n "$nodes" ] || nodes="${mds_HOST} ${mdsfailover_HOST}"
nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
echo -n $nodes_sort
}
all_osts_nodes () {
local host
local failover_host
- local nodes="${ost_HOST} ${ostfailover_HOST}"
+ local nodes=
local nodes_sort
local i
nodes="$nodes ${!host} ${!failover_host}"
done
+ [ -n "$nodes" ] || nodes="${ost_HOST} ${ostfailover_HOST}"
nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
echo -n $nodes_sort
}
}
get_clientosc_proc_path() {
- echo "${1}-osc-ffff*"
+ echo "${1}-osc-[-0-9a-f]*"
}
# If the 2.0 MDS was mounted on 1.8 device, then the OSC and LOV names
[ ${PIPESTATUS[0]} = 0 ] || error "Can't read $mproc"
if [ $result -eq $expected ]; then
- echo -n "target updated after"
+ echo -n "target updated after "
echo "$wait sec (got $result)"
break
fi
local clients=${CLIENTS:-$HOSTNAME}
for c in ${clients//,/ }; do
+ # reconnect if idle
+ do_node $c lctl set_param osc.*.idle_connect=1 >/dev/null 2>&1
local output=$(do_node $c lctl get_param -n \
osc.*OST*-osc-[^M][^D][^T]*.$PROC_CLI 2>/dev/null)
local tmpcnt=$(count_flvr "$output" $flavor)
#
# Get the page size (bytes) on a given facet node.
+# The local client page_size is directly available in PAGE_SIZE.
#
get_page_size() {
local facet=$1
- local size=$(getconf PAGE_SIZE 2>/dev/null)
+ local page_size=$(getconf PAGE_SIZE 2>/dev/null)
- [ -z "$CLIENTONLY" ] && size=$(do_facet $facet getconf PAGE_SIZE)
- echo -n ${size:-4096}
+ [ -z "$CLIENTONLY" -a "$facet" != "client" ] &&
+ page_size=$(do_facet $facet getconf PAGE_SIZE)
+ echo -n ${page_size:-4096}
}
#
[[ -z "$file" || -z "$expected" ]] &&
error "check_stripe_count: invalid argument"
- local cmd="$GETSTRIPE -c $file"
+ local cmd="$LFS getstripe -c $file"
actual=$($cmd) || error "$cmd failed"
actual=${actual%% *}
if [[ $actual -ne $expected ]]; then
- [[ $expected -eq -1 ]] ||
- error "$cmd wrong: found $actual, expected $expected"
- [[ $actual -eq $OSTCOUNT ]] ||
- error "$cmd wrong: found $actual, expected $OSTCOUNT"
+ [[ $expected -eq -1 ]] || { $LFS getstripe $file;
+ error "$cmd not expected ($expected): found $actual"; }
+ [[ $actual -eq $OSTCOUNT ]] || { $LFS getstripe $file;
+ error "$cmd not OST count ($OSTCOUNT): found $actual"; }
fi
}