Have facet_failover to work for mgs facet and include failover
nodes in the list of nodes to load modules.
When mgs/mds are combined, assign the failover host of mds to
the failover host of mgs.
Assign mgsfailover_dev with mds1failover_dev when mgs/mds are combined
while mounting facets, as mds1failover_dev is already defined when
mgs/mds are combined.
Fix start() to export mgs_dev and mgsfailover_dev for
combined_mds_mgs.
Do not wait recovery complete on mgs.
Test-Parameters: trivial failover=true osscount=2 mdscount=2 mdtcount=1 austeroptions=-R iscsi=1 env="ONLY=121" testlist=conf-sanity
Test-Parameters: testlist=conf-sanity
Change-Id: Ie698814c530c8deb98aa0010f2a0fa8e261b4b69
HPE-bug-id: MRP-3374, LUS-4858, LUS-2361
Signed-off-by: Elena Gryaznova <c17455@cray.com>
Signed-off-by: Alexander Boyko <c17825@cray.com>
Signed-off-by: Noopur Maheshwari <noopur.maheshwari@seagate.com>
Signed-off-by: Vladimir Saveliev <c17830@cray.com>
Signed-off-by: Alexander Zarochentsev <alexander.zarochentsev@hpe.com>
Reviewed-on: https://review.whamcloud.com/26235
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Elena Gryaznova <elena.gryaznova@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
}
run_test 120 "cross-target rename should not create bad symlinks"
}
run_test 120 "cross-target rename should not create bad symlinks"
+test_121(){
+ stopall
+ start_mgsmds || error "MGS MDS Start failed"
+ fail mgs
+ stop_mds || error "Stopping MDSes failed"
+ #failback
+ start_mds
+ fail mgs
+ stop_mds || error "Stopping MDSes failed"
+}
+run_test 121 "failover MGS"
+
test_122a() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
[[ "$OST1_VERSION" -ge $(version_code 2.11.53) ]] ||
test_122a() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
[[ "$OST1_VERSION" -ge $(version_code 2.11.53) ]] ||
echo "Writeconf"
writeconf_all
echo "Remounting"
echo "Writeconf"
writeconf_all
echo "Remounting"
- mountmgs
- mountmds
- mountoss
- mountcli
# Reapply the config from before
echo "Setting configuration parameters"
# Reapply the config from before
echo "Setting configuration parameters"
+ local facets
+ local facet
+ local failover
load_modules_local
# bug 19124
# load modules on remote nodes optionally
# lustre-tests have to be installed on these nodes
if $LOAD_MODULES_REMOTE; then
local list=$(comma_list $(remote_nodes_list))
load_modules_local
# bug 19124
# load modules on remote nodes optionally
# lustre-tests have to be installed on these nodes
if $LOAD_MODULES_REMOTE; then
local list=$(comma_list $(remote_nodes_list))
+
+ # include failover nodes in case they are not in the list yet
+ facets=$(get_facets)
+ for facet in ${facets//,/ }; do
+ failover=$(facet_failover_host $facet)
+ [ -n "$list" ] && [[ ! "$list" =~ "$failover" ]] &&
+ list="$list,$failover"
+ done
+
if [ -n "$list" ]; then
echo "loading modules on: '$list'"
do_rpc_nodes "$list" load_modules_local
if [ -n "$list" ]; then
echo "loading modules on: '$list'"
do_rpc_nodes "$list" load_modules_local
local devicelabel
local dm_dev=${!dev}
local devicelabel
local dm_dev=${!dev}
+ [[ $dev == "mgsfailover_dev" ]] && combined_mgs_mds &&
+ dev=mds1failover_dev
+
module_loaded lustre || load_modules
case $fstype in
module_loaded lustre || load_modules
case $fstype in
eval export ${dev_alias}_dev=${device}
eval export ${facet}_opt=\"$@\"
eval export ${dev_alias}_dev=${device}
eval export ${facet}_opt=\"$@\"
+ combined_mgs_mds && [[ ${dev_alias} == mds1 ]] &&
+ eval export mgs_dev=${device}
+
local varname=${dev_alias}failover_dev
if [ -n "${!varname}" ] ; then
eval export ${dev_alias}failover_dev=${!varname}
else
eval export ${dev_alias}failover_dev=$device
local varname=${dev_alias}failover_dev
if [ -n "${!varname}" ] ; then
eval export ${dev_alias}failover_dev=${!varname}
else
eval export ${dev_alias}failover_dev=$device
+ combined_mgs_mds && [[ ${dev_alias} == mds1 ]] &&
+ eval export mgsfailover_dev=${device}
+
fi
local mntpt=$(facet_mntpt $facet)
fi
local mntpt=$(facet_mntpt $facet)
fi
echo affected facets: $facets
fi
echo affected facets: $facets
- # we can use "for" here because we are waiting the slowest
- for facet in ${facets//,/ }; do
+ facets=${facets//,/ }
+ # We can use "for" here because we are waiting the slowest.
+ # The mgs not having the recovery_status proc entry, exclude it
+ # from the facet list.
+ for facet in ${facets//mgs/ }; do
local var_svc=${facet}_svc
local param="*.${!var_svc}.recovery_status"
local var_svc=${facet}_svc
local param="*.${!var_svc}.recovery_status"
if ! combined_mgs_mds &&
list_member ${affecteds[index]} mgs; then
mount_facet mgs || error "Restart of mgs failed"
if ! combined_mgs_mds &&
list_member ${affecteds[index]} mgs; then
mount_facet mgs || error "Restart of mgs failed"
+ affecteds[index]=$(exclude_items_from_list \
+ ${affecteds[index]} mgs)
fi
# FIXME; has to be changed to mount all facets concurrently
fi
# FIXME; has to be changed to mount all facets concurrently
- affected=$(exclude_items_from_list ${affecteds[index]} mgs)
- echo mount facets: ${affecteds[index]}
- mount_facets ${affecteds[index]}
+ if [ -n "${affecteds[index]}" ]; then
+ echo mount facets: ${affecteds[index]}
+ mount_facets ${affecteds[index]}
+ fi
if $GSS_SK; then
do_nodes $(comma_list $(all_nodes)) \
"keyctl show | grep lustre | cut -c1-11 |
if $GSS_SK; then
do_nodes $(comma_list $(all_nodes)) \
"keyctl show | grep lustre | cut -c1-11 |
elif [ "${facet:0:3}" == "mdt" -o \
"${facet:0:3}" == "mds" -o \
"${facet:0:3}" == "mgs" ]; then
elif [ "${facet:0:3}" == "mdt" -o \
"${facet:0:3}" == "mds" -o \
"${facet:0:3}" == "mgs" ]; then
- eval export ${facet}_HOST=${mds_HOST}
+ local temp
+ if [ "${facet}" == "mgsfailover" ] &&
+ [ -n "$mds1failover_HOST" ]; then
+ temp=$mds1failover_HOST
+ else
+ temp=${mds_HOST}
+ fi
+ eval export ${facet}_HOST=$temp
fi
fi
echo -n ${!varname}
fi
fi
echo -n ${!varname}
+ if combined_mgs_mds && [ $facet == "mgs" ] &&
+ [ -z $mds1failover_HOST ]; then
+ temp=mds1failover_HOST
+ echo ${!temp}
+ return
+ fi
+
if [ "${facet:0:3}" == "mdt" -o "${facet:0:3}" == "mds" -o \
"${facet:0:3}" == "mgs" ]; then
if [ "${facet:0:3}" == "mdt" -o "${facet:0:3}" == "mds" -o \
"${facet:0:3}" == "mgs" ]; then
local facetlist=$1
local facet
local facetlist=$1
local facet
- facetlist=$(exclude_items_from_list $facetlist mgs)
-
for facet in ${facetlist//,/ }; do
local failover=${facet}failover
local host=`facet_host $failover`
for facet in ${facetlist//,/ }; do
local failover=${facet}failover
local host=`facet_host $failover`
local varname=${facet}failover_HOST
if [ -z "${!varname}" ]; then
local varname=${facet}failover_HOST
if [ -z "${!varname}" ]; then
- eval export $varname=$(facet_host $facet)
+ local temp
+ if combined_mgs_mds && [ $facet == "mgs" ] &&
+ [ -n "$mds1failover_HOST" ]; then
+ temp=$mds1failover_HOST
+ else
+ temp=$(facet_host $facet)
+ fi
+ eval export $varname=$temp
if ! remote_mds_nodsh; then
for num in $(seq $MDSCOUNT); do
if ! remote_mds_nodsh; then
for num in $(seq $MDSCOUNT); do
- DEVNAME=`mdsdevname $num`
+ DEVNAME=$(mdsdevname $num)
init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS
done
fi
init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS
done
fi
- combined_mgs_mds || init_facet_vars mgs $(mgsdevname) $MGS_MOUNT_OPTS
+ init_facet_vars mgs $(mgsdevname) $MGS_MOUNT_OPTS
if ! remote_ost_nodsh; then
for num in $(seq $OSTCOUNT); do
if ! remote_ost_nodsh; then
for num in $(seq $OSTCOUNT); do