gen_config() {
rm -f $XMLCONFIG
- add_mds mds --dev $MDSDEV --size $MDSSIZE
- add_lov lov1 mds --stripe_sz $STRIPE_BYTES\
+ add_mds mds1 --dev $MDSDEV --size $MDSSIZE
+ add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES\
--stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
- add_client client mds --lov lov1 --path $MOUNT
+ add_client client mds1 --lov lov1 --path $MOUNT
}
gen_second_config() {
add_lov lov2 mds2 --stripe_sz $STRIPE_BYTES\
--stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
add_ost ost2 --lov lov2 --dev $OSTDEV --size $OSTSIZE
- add_client client mds2 --lov lov2 --path $MOUNT2
+ add_client client --mds mds2 --lov lov2 --path $MOUNT2
}
start_mds() {
- echo "start mds service on `facet_active_host mds`"
- start mds --reformat $MDSLCONFARGS || return 94
+ echo "start mds1 service on `facet_active_host mds1`"
+ start mds1 --reformat $MDSLCONFARGS || return 94
}
stop_mds() {
- echo "stop mds service on `facet_active_host mds`"
- stop mds $@ || return 97
+ echo "stop mds1 service on `facet_active_host mds1`"
+ stop mds1 $@ || return 97
}
start_ost() {
cleanup() {
umount_client $MOUNT || return 200
- stop_mds || return 201
+ stop_mds || return 201
stop_ost || return 202
# catch case where these return just fine, but modules are still not unloaded
/sbin/lsmod | grep -q portals
start_ost
start_mds
echo "start mds second time.."
- start mds --reformat $MDSLCONFARGS
+ start mds1 --reformat $MDSLCONFARGS
mount_client $MOUNT
check_mount || return 43
[ -d $MOUNT ] || mkdir -p $MOUNT
$LCONF --nosetup --node client_facet $XMLCONFIG > /dev/null
- llmount $mds_HOST://mds_svc/client_facet $MOUNT && exit 1
+ llmount $mds_HOST://mds1_svc/client_facet $MOUNT && exit 1
# cleanup client modules
$LCONF --cleanup --nosetup --node client_facet $XMLCONFIG > /dev/null
stop_mds || return 2
stop_ost || return 3
- lsmod | grep -q portals && return 3
+ lsmod | grep -q portals && return 4
return 0
}
[ -d $MOUNT ] || mkdir -p $MOUNT
$LCONF --nosetup --node client_facet $XMLCONFIG > /dev/null
- llmount $mds_HOST://wrong_mds_svc/client_facet $MOUNT && exit 1
+ llmount $mds_HOST://wrong_mds1_svc/client_facet $MOUNT && return 1
# cleanup client modules
$LCONF --cleanup --nosetup --node client_facet $XMLCONFIG > /dev/null
stop_mds || return 2
stop_ost || return 3
- lsmod | grep -q portals && return 3
+ lsmod | grep -q portals && return 4
return 0
}
run_test 5c "cleanup after failed mount (bug 2712)"
+test_5d() {
+ start_ost
+ start_mds
+ stop_ost --force
+
+ [ -d $MOUNT ] || mkdir -p $MOUNT
+ $LCONF --nosetup --node client_facet $XMLCONFIG > /dev/null
+ llmount $mds_HOST://mds1_svc/client_facet $MOUNT || return 1
+
+ umount $MOUNT || return 2
+ # cleanup client modules
+ $LCONF --cleanup --nosetup --node client_facet $XMLCONFIG > /dev/null
+
+ stop_mds || return 3
+
+ lsmod | grep -q portals && return 4
+ return 0
+
+}
+run_test 5d "ost down, don't crash during mount attempt"
+
test_6() {
setup
manual_umount_client
# check lconf --ptldebug/subsystem overriding lmc --ptldebug/subsystem
start_ost
start_mds
- CHECK_PTLDEBUG="`do_facet mds cat /proc/sys/portals/debug`"
+ CHECK_PTLDEBUG="`do_facet mds1 cat /proc/sys/portals/debug`"
if [ $CHECK_PTLDEBUG = "3" ]; then
echo "lconf --debug success"
else
echo "lconf --debug: want 3, have $CHECK_PTLDEBUG"
return 1
fi
- CHECK_SUBSYSTEM="`do_facet mds cat /proc/sys/portals/subsystem_debug`"
+ CHECK_SUBSYSTEM="`do_facet mds1 cat /proc/sys/portals/subsystem_debug`"
if [ $CHECK_SUBSYSTEM = "20" ]; then
echo "lconf --subsystem success"
else
OLDXMLCONFIG=$XMLCONFIG
XMLCONFIG="broken.xml"
[ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
- facet="mds"
+ facet="mds1"
rm -f ${facet}active
add_facet $facet
echo "the name for node and mds is the same"
add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
facet="client"
add_facet $facet --lustre_upcall $UPCALL
- do_lmc --add mtpt --node ${facet}_facet --mds mds_facet \
+ do_lmc --add mtpt --node ${facet}_facet --mds mds1_facet \
--lov lov1 --path $MOUNT
echo "mount lustre"
XMLCONFIG="conf11.xml"
[ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
- add_mds mds --dev $MDSDEV --size $MDSSIZE
+ add_mds mds1 --dev $MDSDEV --size $MDSSIZE
add_ost ost --dev $OSTDEV --size $OSTSIZE
- add_client client mds --path $MOUNT --ost ost_svc || return $?
+ add_client client mds1 --path $MOUNT --ost ost_svc || return $?
echo "Default lov config success!"
[ -f "$XMLCONFIG" ] && rm -f $XMLCONFIG
- add_mds mds --dev $MDSDEV --size $MDSSIZE
+ add_mds mds1 --dev $MDSDEV --size $MDSSIZE
add_ost ost --dev $OSTDEV --size $OSTSIZE
- add_client client mds --path $MOUNT && return $?
+ add_client client mds1 --path $MOUNT && return $?
echo "--add mtpt with neither --lov nor --ost will return error"
echo ""
fi
EXPECTEDMDS1UUID="e_longer_than_31characters_UUID"
EXPECTEDMDS2UUID="longer_than_31characters_UUID_2"
- FOUNDMDS1UUID=`awk -F"'" '/<mds uuid=/{print $2}' $XMLCONFIG | sed -n '1p'`
- FOUNDMDS2UUID=`awk -F"'" '/<mds uuid=/{print $2}' $XMLCONFIG | sed -n '2p'`
- if [ $EXPECTEDMDS1UUID != $FOUNDMDS1UUID ]; then
- echo "Error:expected uuid for mds1: $EXPECTEDMDS1UUID; found: $FOUNDMDS1UUID"
- return 1
- fi
- if [ $EXPECTEDMDS2UUID != $FOUNDMDS2UUID ]; then
- echo "Error:expected uuid for mds2: $EXPECTEDMDS2UUID; found: $FOUNDMDS2UUID"
+ FOUNDMDS1UUID=`awk -F"'" '/<mds .*uuid=/' $XMLCONFIG | sed -n '1p' \
+ | sed "s/ /\n\r/g" | awk -F"'" '/uuid=/{print $2}'`
+ FOUNDMDS2UUID=`awk -F"'" '/<mds .*uuid=/' $XMLCONFIG | sed -n '2p' \
+ | sed "s/ /\n\r/g" | awk -F"'" '/uuid=/{print $2}'`
+ if ([ $EXPECTEDMDS1UUID = $FOUNDMDS1UUID ] && [ $EXPECTEDMDS2UUID = $FOUNDMDS2UUID ]) || \
+ ([ $EXPECTEDMDS1UUID = $FOUNDMDS2UUID ] && [ $EXPECTEDMDS2UUID = $FOUNDMDS1UUID ]); then
+ echo "Success:long uuid truncated successfully and being unique."
+ else
+ echo "Error:expected uuid for mds1 and mds2: $EXPECTEDMDS1UUID; $EXPECTEDMDS2UUID"
+ echo "but: found uuid for mds1 and mds2: $FOUNDMDS1UUID; $FOUNDMDS2UUID"
return 1
fi
- echo "Success:long uuid truncated successfully and being unique."
# check multiple invocations for lmc generate same XML configuration file
rm -f $XMLCONFIG
# create xml file with --mkfsoptions for ost
echo "create xml file with --mkfsoptions for ost"
- add_mds mds --dev $MDSDEV --size $MDSSIZE
- add_lov lov1 mds --stripe_sz $STRIPE_BYTES\
+ add_mds mds1 --dev $MDSDEV --size $MDSSIZE
+ add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES\
--stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE \
--mkfsoptions "-Llabel_conf_14"
- add_client client mds --lov lov1 --path $MOUNT
+ add_client client mds1 --lov lov1 --path $MOUNT
FOUNDSTRING=`awk -F"<" '/<mkfsoptions>/{print $2}' $XMLCONFIG`
EXPECTEDSTRING="mkfsoptions>-Llabel_conf_14"
# load llite module on the client if it isn't in /lib/modules
do_node `hostname` lconf --nosetup --node client_facet $XMLCONFIG
do_node `hostname` mount -t lustre -o nettype=$NETTYPE \
- `facet_active_host mds`:/mds_svc/client_facet $MOUNT ||return $?
+ `facet_active_host mds1`:/mds1_svc/client_facet $MOUNT ||return $?
echo "mount lustre on $MOUNT with $MOUNTLUSTRE: success"
[ -d /r ] && $LCTL modules > /r/tmp/ogdb-`hostname`
check_mount || return 41
[ -f "$MOUNTLUSTRE" ] && rm -f $MOUNTLUSTRE
echo "mount lustre on ${MOUNT} without $MOUNTLUSTRE....."
do_node `hostname` mount -t lustre -o nettype=$NETTYPE \
- `facet_active_host mds`:/mds_svc/client_facet $MOUNT &&return $?
+ `facet_active_host mds1`:/mds1_svc/client_facet $MOUNT &&return $?
echo "mount lustre on $MOUNT without $MOUNTLUSTRE failed as expected"
cleanup || return $?
cleanup_15
}
run_test 16 "verify that lustre will correct the mode of OBJECTS/LOGS/PENDING"
+test_17() {
+ TMPMTPT="/mnt/conf17"
+
+ if [ ! -f "$MDSDEV" ]; then
+ echo "no $MDSDEV existing, so mount Lustre to create one"
+ start_ost
+ start_mds
+ mount_client $MOUNT
+ check_mount || return 41
+ cleanup || return $?
+ fi
+
+ echo "Remove mds config log"
+ [ -d $TMPMTPT ] || mkdir -p $TMPMTPT
+ mount -o loop -t ext3 $MDSDEV $TMPMTPT || return $?
+ rm -f $TMPMTPT/LOGS/mds1_svc || return $?
+ umount $TMPMTPT || return $?
+
+ start_ost
+ start mds1 $MDSLCONFARGS && return 42
+ cleanup || return $?
+}
+run_test 17 "Verify failed mds_postsetup won't fail assertion (2936)"
+
+test_18() {
+ [ -f $MDSDEV ] && echo "remove $MDSDEV" && rm -f $MDSDEV
+ echo "mount mds with large journal..."
+ OLDMDSSIZE=$MDSSIZE
+ MDSSIZE=2000000
+ gen_config
+
+ echo "mount lustre system..."
+ start_ost
+ start_mds
+ mount_client $MOUNT
+ check_mount || return 41
+
+ echo "check journal size..."
+ FOUNDJOURNALSIZE=`debugfs -R "stat <8>" $MDSDEV | awk '/Size: / { print $6; exit;}'`
+ if [ $FOUNDJOURNALSIZE = "79691776" ]; then
+ echo "Success:lconf creates large journals"
+ else
+ echo "Error:lconf not create large journals correctly"
+ echo "expected journal size: 79691776(76M), found journal size: $FOUNDJOURNALSIZE"
+ return 1
+ fi
+
+ cleanup || return $?
+
+ MDSSIZE=$OLDMDSSIZE
+ gen_config
+}
+run_test 18 "check lconf creates large journals"
+
equals_msg "Done"