return
fi
- local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+ combined_mgs_mds ||
+ { skip "needs combined MGT and MDT device" && return 0; }
start_mdt 1 -o nosvc -n
if [ $MDSCOUNT -ge 2 ]; then
test_41c() {
local server_version=$(lustre_version_code $SINGLEMDS)
+ local oss_list=$(comma_list $(osts_nodes))
[[ $server_version -ge $(version_code 2.6.52) ]] ||
[[ $server_version -ge $(version_code 2.5.26) &&
cleanup
# MDT concurrent start
+
+ LOAD_MODULES_REMOTE=true load_modules
+ do_facet $SINGLEMDS "lsmod | grep -q libcfs" ||
+ error "MDT concurrent start: libcfs module not loaded"
+
#define OBD_FAIL_TGT_MOUNT_RACE 0x716
do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x716"
start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS &
# OST concurrent start
+ do_rpc_nodes $oss_list "lsmod | grep -q libcfs" ||
+ error "OST concurrent start: libcfs module not loaded"
+
#define OBD_FAIL_TGT_MOUNT_RACE 0x716
do_facet ost1 "$LCTL set_param fail_loc=0x716"
start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &
for i in 1023 2048
do
+ if ! combined_mgs_mds; then
+ stop_mgs || error "stopping MGS service failed"
+ format_mgs || error "formatting MGT failed"
+ fi
add mds1 $(mkfs_opts mds1 ${mdsdev}) --reformat $mdsdev \
$mdsvdev || exit 10
add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \
MDSJOURNALSIZE=16
for num in $(seq 1 $MDSCOUNT); do
- reformat_mdt $num
+ format_mdt $num
done
add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=10000 --reformat \
$(ostdevname 1) $(ostvdevname 1)
}
test_58() { # bug 22658
+ combined_mgs_mds || stop_mgs || error "stopping MGS service failed"
setup_noconfig
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
createmany -o $DIR/$tdir/$tfile-%d 100
done
fi
+ combined_mgs_mds || stop_mgs || error "stopping MGS service failed"
setup_noconfig || error "setting up the filesystem failed"
client_up || error "starting client failed"
test_77() { # LU-3445
local server_version=$(lustre_version_code $SINGLEMDS)
-
- [[ $server_version -ge $(version_code 2.2.60) ]] &&
- [[ $server_version -le $(version_code 2.4.0) ]] &&
- skip "Need MDS version < 2.2.60 or > 2.4.0" && return
+ [[ $server_version -ge $(version_code 2.8.55) ]] ||
+ { skip "Need MDS version 2.8.55+ "; return; }
if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then
is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) &&
error "start fs2mds failed"
mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,)
- [[ $mgsnid = *,* ]] || mgsnid+=",$mgsnid"
+ mgsnid="$mgsnid,$mgsnid:$mgsnid"
add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \
--failnode=$failnid --fsname=$fsname \
}
run_test 85 "osd_ost init: fail ea_fid_set"
+cleanup_86() {
+ trap 0
+
+ # ost1 has already registered to the MGS before the reformat.
+ # So after reformatting it with option "-G", it could not be
+ # mounted to the MGS. Cleanup the system for subsequent tests.
+ reformat_and_config
+}
+
test_86() {
+ local server_version=$(lustre_version_code $SINGLEMDS)
[ "$(facet_fstype ost1)" = "zfs" ] &&
skip "LU-6442: no such mkfs params for ZFS OSTs" && return
+ [[ $server_version -ge $(version_code 2.7.56) ]] ||
+ { skip "Need server version newer than 2.7.55"; return 0; }
local OST_OPTS="$(mkfs_opts ost1 $(ostdevname 1)) \
--reformat $(ostdevname 1) $(ostvdevname 1)"
echo "params: $opts"
+ trap cleanup_86 EXIT ERR
+
+ stopall
add ost1 $opts || error "add ost1 failed with new params"
local FOUNDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" |
[[ $FOUNDSIZE == $NEWSIZE ]] ||
error "Flex block group size: $FOUNDSIZE, expected: $NEWSIZE"
- return 0
+
+ cleanup_86
}
run_test 86 "Replacing mkfs.lustre -G option"
}
run_test 96 "ldev returns hostname and backend fs correctly in command sub"
+test_97() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo -e "\nMDT role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mdt > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute for mdt role!"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mdt role!"
+ fi
+
+ echo -e "\nOST role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R ost > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for ost role!"
+ fi
+
+ rm $EXPECTED_OUTPUT
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for ost role!"
+ fi
+
+ echo -e "\nMGS role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mgs > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for mgs role!"
+ fi
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mgs role!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 97 "ldev returns correct ouput when querying based on role"
+
if ! combined_mgs_mds ; then
stop mgs
fi