ONLY=${ONLY:-"$*"}
-# bug number for skipped test: LU-7428
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 84"
+# bug number for skipped test: LU-8972
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
is_sles11() # LU-2181
return 1
}
-if is_sles11; then # LU-2181
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 23a 34b"
-fi
-
if [ "$FAILURE_MODE" = "HARD" ]; then
CONFIG_EXCEPTIONS="24a " &&
echo "Except the tests: $CONFIG_EXCEPTIONS for " \
OSTDEV2_2=$fs3ost_DEV
if ! combined_mgs_mds; then
- # bug number for skipped test: 23954
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 24b"
+ # bug number for skipped test: LU-9860 LU-9860 LU-9860
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 43b 53b 54b"
+ # bug number for skipped test: LU-9875 LU-9879 LU-9879 LU-9879 LU-9879
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 70e 80 84 87 100"
+ # bug number for skipped test: LU-8110 LU-9400 LU-9879 LU-9879 LU-9879
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 102 103 104 105 107"
fi
# pass "-E lazy_itable_init" to mke2fs to speed up the formatting time
fi
[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
-# bug number for skipped test: LU-4444
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 69"
+# bug number for skipped test:
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT"
init_logging
#
require_dsh_mds || exit 0
require_dsh_ost || exit 0
-#
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45 69"
+
+# 8 22 (min)"
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69"
assert_DIR
}
cleanup() {
- umount_client $MOUNT || return 200
+ local force=""
+ [ "x$1" != "x" ] && force='-f'
+ umount_client $MOUNT $force|| return 200
cleanup_nocli || return $?
}
test_5d() {
grep " $MOUNT " /etc/mtab &&
- error false "unexpected entry in mtab before mount" && return 10
+ error "unexpected entry in mtab before mount"
start_ost || error "OST start failed"
start_mds || error "MDS start failed"
- stop_ost || error "Unable to stop OST1"
+ stop_ost -f || error "Unable to stop OST1"
mount_client $MOUNT || error "mount_client $MOUNT failed"
umount_client $MOUNT -f || error "umount_client $MOUNT failed"
cleanup_nocli || error "cleanup_nocli failed with $?"
- grep " $MOUNT " /etc/mtab &&
+ ! grep " $MOUNT " /etc/mtab ||
error "$MOUNT entry in mtab after unmount"
- pass
}
run_test 5d "mount with ost down"
test_5f() {
if combined_mgs_mds ; then
- skip "combined mgs and mds"
+ skip "needs separate mgs and mds"
return 0
fi
}
run_test 5f "mds down, cleanup after failed mount (bug 2712)"
+test_5g() {
+ modprobe lustre
+ [ $(lustre_version_code client) -lt $(version_code 2.9.53) ] &&
+ { skip "automount of debugfs missing before 2.9.53" && return 0; }
+ umount /sys/kernel/debug
+ $LCTL get_param -n devices | egrep -v "error" && \
+ error "lctl can't access debugfs data"
+ grep " debugfs " /etc/mtab || error "debugfs failed to remount"
+}
+run_test 5g "handle missing debugfs"
+
test_6() {
setup
manual_umount_client
test_17() {
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
test_18() {
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
test_19b() {
start_ost || error "Unable to start OST1"
- stop_ost || error "Unable to stop OST1"
+ stop_ost -f || error "Unable to stop OST1"
}
run_test 19b "start/stop OSTs without MDS"
local fs2ostdev=$(ostdevname 1_2)
local fs2mdsvdev=$(mdsvdevname 1_2)
local fs2ostvdev=$(ostvdevname 1_2)
+ local cl_user
- # test 8-char fsname as well
- local FSNAME2=test1234
+ # LU-9733 test fsname started with numbers as well
+ local FSNAME2=969362ae
add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --nomgs --mgsnode=$MGSNID \
--fsname=${FSNAME2} --reformat $fs2mdsdev $fs2mdsvdev || exit 10
start fs2ost $fs2ostdev $OST_MOUNT_OPTS
mkdir -p $MOUNT2 || error "mkdir $MOUNT2 failed"
$MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || error "$MOUNT_CMD failed"
+
+ # LU-9733 test fsname started with numbers
+ cl_user=$(do_facet $SINGLEMDS lctl --device $FSNAME2-MDT0000 \
+ changelog_register -n) ||
+ error "register changelog failed"
+
+ do_facet $SINGLEMDS lctl --device $FSNAME2-MDT0000 \
+ changelog_deregister $cl_user ||
+ error "deregister changelog failed"
# 1 still works
check_mount || error "check_mount failed"
# files written on 1 should not show up on 2
run_test 28a "set symlink parameters permanently with conf_param"
test_29() {
- [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
+ [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" && return
setup > /dev/null 2>&1
start_ost2 || error "Unable to start OST2"
sleep 10
fi
# check MDTs too
- for num in $(seq $MDSCOUNT); do
- local mdtosc=$(get_mdtosc_proc_path mds${num} $FSNAME-OST0001)
- local MPROC="osc.$mdtosc.active"
- local MAX=30
- local WAIT=0
- while [ 1 ]; do
- sleep 5
- RESULT=$(do_facet mds${num} "$LCTL get_param -n $MPROC")
- [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC"
- if [ $RESULT -eq $DEAC ]; then
- echo -n "MDT deactivated also after"
- echo "$WAIT sec (got $RESULT)"
- break
- fi
- WAIT=$((WAIT + 5))
- if [ $WAIT -eq $MAX ]; then
- error "MDT active: wanted $DEAC got $RESULT"
- fi
- echo "Waiting $(($MAX - $WAIT))secs for MDT deactivated"
- done
- done
+ wait_osp_active ost ${FSNAME}-OST0001 1 0
+
# test new client starts deactivated
umount_client $MOUNT || error "umount_client $MOUNT failed"
mount_client $MOUNT || error "mount_client $MOUNT failed"
chown $T32_QID.$T32_QID $tmp/src/t32_qf_old
# format ost with comma-separated NIDs to verify LU-4460
- local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)"
+ local failnid="$(h2nettype 1.2.3.4),$(h2nettype 4.3.2.1)"
MGSNID="$MGSNID,$MGSNID" OSTOPT="--failnode=$failnid" formatall
setupall
local IMGTYPE=$(facet_fstype $SINGLEMDS)
- tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\')
+ tarballs=$($r find $RLUSTRE/tests -maxdepth 1 \
+ -name \'disk*-$IMGTYPE.tar.bz2\')
if [ -z "$tarballs" ]; then
skip "No applicable tarballs found"
destroy_zpool $facet $poolname
done
fi
+ combined_mgs_mds || start_mgs || rc=$?
return $rc
}
local qval
local cmd
+ # LU-2435: if the underlying zfs doesn't support userobj_accounting,
+ # lustre will estimate the object count usage. This fails quota
+ # verification in 32b. The object quota usage should be accurate after
+ # zfs-0.7.0 is released.
+ [ $fstype == "zfs" ] && {
+ local zfs_version=$(do_node $node cat /sys/module/zfs/version)
+
+ [ $(version_code $zfs_version) -lt $(version_code 0.7.0) ] && {
+ echo "Skip quota verify for zfs: $zfs_version"
+ return 0
+ }
+ }
+
$LFS quota -u $T32_QID -v $mnt
qval=$($LFS quota -v -u $T32_QID $mnt |
local mdt2_is_available=false
local node=$(facet_active_host $SINGLEMDS)
local r="do_node $node"
- local node2=$(facet_active_host mds2)
local tmp=$TMP/t32
local img_commit
local img_kernel
local img_blimit
local img_ilimit
local fsname=t32fs
- local nid=$($r $LCTL list_nids | head -1)
+ local nid
local mopts
local uuid
local nrpcs_orig
local stripe_count
local dir
+ combined_mgs_mds || stop_mgs || error "Unable to stop MGS"
trap 'trap - RETURN; t32_test_cleanup' RETURN
load_modules
+ nid=$($r $LCTL list_nids | head -1)
+
mkdir -p $tmp/mnt/lustre || error "mkdir $tmp/mnt/lustre failed"
$r mkdir -p $tmp/mnt/{mdt,mdt1,ost}
$r tar xjvf $tarball -S -C $tmp || {
! $mdt2_is_available || poolname_list+=" t32fs-mdt2"
for poolname in $poolname_list; do
- $r "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $r "modprobe zfs;
+ $ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL import -f -d $tmp $poolname"
done
+ # upgrade zpool to latest supported features, including
+ # dnode quota accounting in 0.7.0
+ $r "$ZPOOL upgrade -a"
+
mdt_dev=t32fs-mdt1/mdt1
ost_dev=t32fs-ost1/ost1
! $mdt2_is_available || mdt2_dev=t32fs-mdt2/mdt2
mkfsoptions="--mkfsoptions=\\\"-J size=8\\\""
fi
- add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \
+ add $SINGLEMDS $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \
$mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || {
error_noexit "Mkfs new MDT failed"
return 1
}
- [[ $(facet_fstype mds1) != zfs ]] || import_zpool fs2mds
+ [[ $(facet_fstype mds1) != zfs ]] || import_zpool mds1
$r $TUNEFS --dryrun $fs2mdsdev || {
error_noexit "tunefs.lustre before mounting the MDT"
mopts="loop,$mopts"
fi
fi
- $r $MOUNT_CMD -o $mopts $ost_dev $tmp/mnt/ost || {
+
+ $r $MOUNT_CMD -onomgs -o$mopts $ost_dev $tmp/mnt/ost || {
error_noexit "Mounting the OST"
return 1
}
pushd $tmp/mnt/lustre
fi
$r cat $list_file | sort -k 6 >$tmp/list.orig
- ls -Rni --time-style=+%s | sort -k 6 >$tmp/list || {
+ ls -Rni --time-style=+%s | sort -k 6 |
+ sed 's/\. / /' >$tmp/list || {
error_noexit "ls"
return 1
}
if [[ $fstype == zfs ]]; then
local poolname=t32fs-mdt1
- $r "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $r "modprobe zfs;
+ $ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL import -f -d $tmp $poolname"
+
+ # upgrade zpool to latest supported features,
+ # including dnode quota accounting in 0.7.0
+ $r "$ZPOOL upgrade $poolname"
fi
# mount a second time to make sure we didnt leave upgrade flag on
mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931.
fi
- add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \
- --reformat $mkfsoptions $fs2mdsdev $fs2mdsvdev || exit 10
+ if combined_mgs_mds; then
+ local mgs_flag="--mgs"
+ fi
+
+ add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --fsname=${FSNAME2} \
+ --reformat $mgs_flag $mkfsoptions $fs2mdsdev $fs2mdsvdev ||
+ exit 10
add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \
--fsname=${FSNAME2} --index=8191 --reformat $fs2ostdev \
$fs2ostvdev || exit 10
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT
start fs2ost $fs2ostdev $OST_MOUNT_OPTS
- do_facet $SINGLEMDS "$LCTL conf_param $FSNAME2.sys.timeout=200" ||
+ do_facet mgs "$LCTL conf_param $FSNAME2.sys.timeout=200" ||
error "$LCTL conf_param $FSNAME2.sys.timeout=200 failed"
mkdir -p $MOUNT2 || error "mkdir $MOUNT2 failed"
$MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || error "$MOUNT_CMD failed"
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
do_facet mgs "$LCTL conf_param \
- ${device}.failover.node=$(h2$NETTYPE $FAKENID)" ||
+ ${device}.failover.node=$(h2nettype $FAKENID)" ||
error "Setting ${device}.failover.node=\
- $(h2$NETTYPE $FAKENID) failed."
+ $(h2nettype $FAKENID) failed."
log "Wait for RECONNECT_INTERVAL seconds (10s)"
sleep 10
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" |
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
do_facet mgs "$LCTL conf_param \
- ${device}.failover.node=$(h2$NETTYPE $FAKENID)" ||
+ ${device}.failover.node=$(h2nettype $FAKENID)" ||
error "Set ${device}.failover.node=\
- $(h2$NETTYPE $FAKENID) failed"
+ $(h2nettype $FAKENID) failed"
local at_max_saved=0
# adaptive timeouts may prevent seeing the issue
run_test 35b "Continue reconnection retries, if the active server is busy"
test_36() { # 12743
- [ $OSTCOUNT -lt 2 ] && skip_env "skipping test for single OST" && return
+ [ $OSTCOUNT -lt 2 ] && skip_env "needs >= 2 OSTs" && return
[ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] ||
{ skip "remote OST" && return 0; }
local rc=0
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Currently only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
return
fi
- local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+ combined_mgs_mds ||
+ { skip "needs combined MGT and MDT device" && return 0; }
start_mdt 1 -o nosvc -n
if [ $MDSCOUNT -ge 2 ]; then
stop ost1 -f || error "unable to stop OST1"
stop_mds || error "Unable to stop MDS"
stop_mds || error "Unable to stop MDS on second try"
- unload_modules_conf || error "unload_modules_conf failed"
}
run_test 41a "mount mds with --nosvc and --nomgs"
echo "blah blah" > $MOUNT/$tfile
cat $MOUNT/$tfile || error "cat $MOUNT/$tfile failed"
- umount_client $MOUNT || error "umount_client $MOUNT failed"
+ umount_client $MOUNT -f || error "umount_client $MOUNT failed"
stop_ost || error "Unable to stop OST1"
stop_mds || error "Unable to stop MDS"
stop_mds || error "Unable to stop MDS on second try"
test_41c() {
local server_version=$(lustre_version_code $SINGLEMDS)
+ local oss_list=$(comma_list $(osts_nodes))
[[ $server_version -ge $(version_code 2.6.52) ]] ||
[[ $server_version -ge $(version_code 2.5.26) &&
$server_version -lt $(version_code 2.5.11) ]] ||
{ skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"; return; }
+ # ensure mds1 ost1 have been created even if running sub-test standalone
cleanup
+ setup
+ cleanup || error "cleanup failed"
+
+ # using directly mount command instead of start() function to avoid
+ # any side effect of // with others/externals tools/features
+ # ("zpool import", ...)
+
# MDT concurrent start
+
+ LOAD_MODULES_REMOTE=true load_modules
+ do_facet $SINGLEMDS "lsmod | grep -q libcfs" ||
+ error "MDT concurrent start: libcfs module not loaded"
+
+ local mds1dev=$(mdsdevname 1)
+ local mds1mnt=$(facet_mntpt mds1)
+ local mds1fstype=$(facet_fstype mds1)
+ local mds1opts=$MDS_MOUNT_OPTS
+
+ if [ $mds1fstype == ldiskfs ] &&
+ ! do_facet mds1 test -b $mds1dev; then
+ mds1opts=$(csa_add "$mds1opts" -o loop)
+ fi
+ if [[ $mds1fstype == zfs ]]; then
+ import_zpool mds1 || return ${PIPESTATUS[0]}
+ fi
+
#define OBD_FAIL_TGT_MOUNT_RACE 0x716
- do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x716"
- start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS &
+ do_facet mds1 "$LCTL set_param fail_loc=0x80000716"
+
+ do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts &
local pid=$!
- start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS &
- do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x0"
- local pid2=$!
- wait $pid2
+
+ do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts
local rc2=$?
wait $pid
local rc=$?
+ do_facet mds1 "$LCTL set_param fail_loc=0x0"
if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then
echo "1st MDT start succeed"
echo "2nd MDT start failed with $rc2"
# OST concurrent start
+ do_rpc_nodes $oss_list "lsmod | grep -q libcfs" ||
+ error "OST concurrent start: libcfs module not loaded"
+
+ local ost1dev=$(ostdevname 1)
+ local ost1mnt=$(facet_mntpt ost1)
+ local ost1fstype=$(facet_fstype ost1)
+ local ost1opts=$OST_MOUNT_OPTS
+
+ if [ $ost1fstype == ldiskfs ] &&
+ ! do_facet ost1 test -b $ost1dev; then
+ ost1opts=$(csa_add "$ost1opts" -o loop)
+ fi
+ if [[ $ost1fstype == zfs ]]; then
+ import_zpool ost1 || return ${PIPESTATUS[0]}
+ fi
+
#define OBD_FAIL_TGT_MOUNT_RACE 0x716
- do_facet ost1 "$LCTL set_param fail_loc=0x716"
- start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &
+ do_facet ost1 "$LCTL set_param fail_loc=0x80000716"
+
+ do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts &
pid=$!
- start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &
- do_facet ost1 "$LCTL set_param fail_loc=0x0"
- pid2=$!
- wait $pid2
+
+ do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts
rc2=$?
wait $pid
rc=$?
+ do_facet ost1 "$LCTL set_param fail_loc=0x0"
if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then
echo "1st OST start succeed"
echo "2nd OST start failed with $rc2"
do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20
cleanup || error "stopping $FSNAME failed with invalid sys param"
- load_modules
setup
check_mount || error "client was not mounted with invalid sys param"
cleanup || error "stopping $FSNAME failed with invalid sys param"
[ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
ID1=${ID1:-501}
- USER1=$(cat /etc/passwd | grep :$ID1:$ID1: | cut -d: -f1)
+ USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
[ -z "$USER1" ] && skip_env "missing user with uid=$ID1 gid=$ID1" &&
return
setup
chmod ugo+x $DIR || error "chmod 0 failed"
- set_conf_param_and_check mds \
+ set_conf_param_and_check mds1 \
"$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \
"$FSNAME.mdt.root_squash" \
"0:0"
"$LCTL get_param -n llite.${FSNAME}*.root_squash" \
"0:0" ||
error "check llite root_squash failed!"
- set_conf_param_and_check mds \
+ set_conf_param_and_check mds1 \
"$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
"$FSNAME.mdt.nosquash_nids" \
"NONE"
# set root squash UID:GID to RUNAS_ID
# root should be able to access only files owned by RUNAS_ID
#
- set_conf_param_and_check mds \
+ set_conf_param_and_check mds1 \
"$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \
"$FSNAME.mdt.root_squash" \
"$RUNAS_ID:$RUNAS_ID"
local NIDLIST=$($LCTL list_nids all | tr '\n' ' ')
NIDLIST="2@gni $NIDLIST 192.168.0.[2,10]@tcp"
NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
- set_conf_param_and_check mds \
+ set_conf_param_and_check mds1 \
"$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
"$FSNAME-MDTall.mdt.nosquash_nids" \
"$NIDLIST"
local client_ip=$(host_nids_address $HOSTNAME $NETTYPE)
local host=${client_ip//*./}
local net=${client_ip/%$host/}
- local nosquash_nids=$(h2$NETTYPE $net[$host,$host,$host])
+ local nosquash_nids=$(h2nettype $net[$host,$host,$host])
add $fs2mgs $(mkfs_opts mgs $fs2mgsdev) --fsname=$fsname \
--param mdt.root_squash=$RUNAS_ID:$RUNAS_ID \
df -h $MOUNT &
log "sleep 60 sec"
sleep 60
- #define OBD_FAIL_PTLRPC_LONG_UNLINK 0x50f
- do_facet client "$LCTL set_param fail_loc=0x50f"
+ #define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f
+ do_facet client "$LCTL set_param fail_loc=0x8000050f"
log "sleep 10 sec"
sleep 10
manual_umount_client --force || error "manual_umount_client failed"
reformat_and_config
}
-test_48() { # bug 17636
- reformat
+test_48() { # bz-17636 LU-7473
+ local count
+
setup_noconfig
check_mount || error "check_mount failed"
$GETSTRIPE $MOUNT/widestripe ||
error "$GETSTRIPE $MOUNT/widestripe failed"
- trap cleanup_48 EXIT ERR
+ # In the future, we may introduce more EAs, such as selinux, enlarged
+ # LOV EA, and so on. These EA will use some EA space that is shared by
+ # ACL entries. So here we only check some reasonable ACL entries count,
+ # instead of the max number that is calculated from the max_ea_size.
+ if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.8.57) ];
+ then
+ count=28 # hard coded of RPC protocol
+ elif [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ count=4000 # max_num 4091 max_ea_size = 32768
+ elif ! large_xattr_enabled; then
+ count=450 # max_num 497 max_ea_size = 4012
+ else
+ count=4500 # max_num 8187 max_ea_size = 1048492
+ # not create too much (>5000) to save test time
+ fi
- # fill acl buffer for avoid expand lsm to them
- getent passwd | awk -F : '{ print "u:"$1":rwx" }' | while read acl; do
- setfacl -m $acl $MOUNT/widestripe
+ echo "It is expected to hold at least $count ACL entries"
+ trap cleanup_48 EXIT ERR
+ for ((i = 0; i < $count; i++)) do
+ setfacl -m u:$((i + 100)):rw $MOUNT/widestripe ||
+ error "Fail to setfacl for $MOUNT/widestripe at $i"
done
+ cancel_lru_locks mdc
stat $MOUNT/widestripe || error "stat $MOUNT/widestripe failed"
+ local r_count=$(getfacl $MOUNT/widestripe | grep "user:" | wc -l)
+ count=$((count + 1)) # for the entry "user::rw-"
+
+ [ $count -eq $r_count ] ||
+ error "Expected ACL entries $count, but got $r_count"
cleanup_48
}
run_test 50f "normal statfs one server in down"
test_50g() {
- [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
+ [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" && return
setup
start_ost2 || error "Unable to start OST2"
wait_osc_import_state mds ost2 FULL
# LU-642
test_50h() {
# prepare MDT/OST, make OSC inactive for OST1
- [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
+ [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" && return
[ $(facet_fstype ost1) == zfs ] && import_zpool ost1
do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" ||
test_50i() {
# prepare MDT/OST, make OSC inactive for OST1
- [ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return
+ [ "$MDSCOUNT" -lt "2" ] && skip_env "needs >= 2 MDTs" && return
- [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
load_modules
+ [ $(facet_fstype mds2) == zfs ] && import_zpool mds2
do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" ||
error "tunefs MDT2 failed"
start_mds || error "Unable to start MDT"
"$TEST" "${FSNAME}-MDT0001.mdc.active" 0 ||
error "Unable to deactivate MDT2"
+ wait_osp_active mds ${FSNAME}-MDT0001 1 0
+
$LFS mkdir -i1 $DIR/$tdir/2 &&
error "mkdir $DIR/$tdir/2 succeeds after deactive MDT"
+ $LFS mkdir -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "mkdir $DIR/$tdir/striped_dir fails after deactive MDT2"
+
+ local stripe_count=$($LFS getdirstripe -c $DIR/$tdir/striped_dir)
+ [ $stripe_count -eq $((MDSCOUNT - 1)) ] ||
+ error "wrong $stripe_count != $((MDSCOUNT -1)) for striped_dir"
+
# cleanup
umount_client $MOUNT || error "Unable to umount client"
stop_mds
test_52() {
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
echo
# backup files
- echo backup files to $TMP/files
+ echo backup files to $TMP/$tdir
local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
- copy_files_xattrs $(hostname) $TMP/files $TMP/file_xattrs $files ||
+ copy_files_xattrs $(hostname) $TMP/$tdir $TMP/file_xattrs $files ||
error "Unable to copy files"
umount_client $MOUNT || error "Unable to umount client"
error "Some entry under /lost+found should be repaired"
# compare files
- diff_files_xattrs $(hostname) $TMP/files $TMP/file_xattrs $files ||
+ diff_files_xattrs $(hostname) $TMP/$tdir $TMP/file_xattrs $files ||
error "Unable to diff files"
- rm -rf $TMP/files $TMP/file_xattrs ||
+ rm -rf $TMP/$tdir $TMP/file_xattrs ||
error "Unable to delete temporary files"
do_node $ost1node "rm -rf $ost1tmp" ||
error "Unable to delete temporary files"
local newvalue="${opts}=$(expr $basethr \* $ncpts)"
setmodopts -a $modname "$newvalue" oldvalue
- load_modules
setup
check_mount || return 41
return $?
cleanup
- load_modules
setup
}
test_54a() {
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
test_54b() {
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
test_55() {
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
for i in 1023 2048
do
+ if ! combined_mgs_mds; then
+ stop_mgs || error "stopping MGS service failed"
+ format_mgs || error "formatting MGT failed"
+ fi
add mds1 $(mkfs_opts mds1 ${mdsdev}) --reformat $mdsdev \
$mdsvdev || exit 10
add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \
sync
echo checking size of lov_objid for ost index $i
- LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}')
+ LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" |
+ grep ^User | awk -F 'Size: ' '{print $2}')
if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then
error "lov_objid size has to be $(lov_objid_size $i), not $LOV_OBJID_SIZE"
else
}
run_test 55 "check lov_objid size"
-test_56() {
+test_56a() {
local server_version=$(lustre_version_code $SINGLEMDS)
local mds_journal_size_orig=$MDSJOURNALSIZE
local n
MDSJOURNALSIZE=16
- for num in $(seq 1 $MDSCOUNT); do
- reformat_mdt $num
- done
+ formatall
add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=10000 --reformat \
$(ostdevname 1) $(ostvdevname 1)
add ost2 $(mkfs_opts ost2 $(ostdevname 2)) --index=1000 --reformat \
MDSJOURNALSIZE=$mds_journal_size_orig
reformat
}
-run_test 56 "check big OST indexes and out-of-index-order start"
+run_test 56a "check big OST indexes and out-of-index-order start"
+
+cleanup_56b() {
+ trap 0
+
+ umount_client $MOUNT -f || error "unmount client failed"
+ stop mds1
+ stop mds2
+ stop mds3
+ stopall
+ reformat
+}
+
+test_56b() {
+ [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return
+
+ trap cleanup_56b EXIT RETURN ERR
+ stopall
+
+ if ! combined_mgs_mds ; then
+ format_mgs
+ start_mgs
+ fi
+
+ add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) --index=0 --reformat \
+ $(mdsdevname 1) $(mdsvdevname 1)
+ add mds2 $(mkfs_opts mds2 $(mdsdevname 2)) --index=1 --reformat \
+ $(mdsdevname 2) $(mdsvdevname 2)
+ add mds3 $(mkfs_opts mds3 $(mdsdevname 3)) --index=1000 --reformat \
+ $(mdsdevname 3) $(mdsvdevname 3)
+ format_ost 1
+ format_ost 2
+
+ start_mdt 1 || error "MDT 1 (idx 0) start failed"
+ start_mdt 2 || error "MDT 2 (idx 1) start failed"
+ start_mdt 3 || error "MDT 3 (idx 1000) start failed"
+ start_ost || error "Unable to start first ost"
+ start_ost2 || error "Unable to start second ost"
+
+ do_nodes $(comma_list $(mdts_nodes)) \
+ "$LCTL set_param mdt.*.enable_remote_dir=1 \
+ mdt.*.enable_remote_dir_gid=-1"
+
+ mount_client $MOUNT || error "Unable to mount client"
+
+ $LFS mkdir -c3 $MOUNT/$tdir || error "failed to make testdir"
+
+ echo "This is test file 1!" > $MOUNT/$tdir/$tfile.1 ||
+ error "failed to make test file 1"
+ echo "This is test file 2!" > $MOUNT/$tdir/$tfile.2 ||
+ error "failed to make test file 2"
+ echo "This is test file 1000!" > $MOUNT/$tdir/$tfile.1000 ||
+ error "failed to make test file 1000"
+
+ rm -rf $MOUNT/$tdir || error "failed to remove testdir"
+
+ $LFS mkdir -i1000 $MOUNT/$tdir.1000 ||
+ error "create remote dir at idx 1000 failed"
+
+ output=$($LFS df)
+ echo "=== START lfs df OUTPUT ==="
+ echo -e "$output"
+ echo "==== END lfs df OUTPUT ===="
+
+ mdtcnt=$(echo -e "$output" | grep $FSNAME-MDT | wc -l)
+ ostcnt=$(echo -e "$output" | grep $FSNAME-OST | wc -l)
+
+ echo "lfs df returned mdt count $mdtcnt and ost count $ostcnt"
+ [ $mdtcnt -eq 3 ] || error "lfs df returned wrong mdt count"
+ [ $ostcnt -eq 2 ] || error "lfs df returned wrong ost count"
+
+ echo "This is test file 1!" > $MOUNT/$tdir.1000/$tfile.1 ||
+ error "failed to make test file 1"
+ echo "This is test file 2!" > $MOUNT/$tdir.1000/$tfile.2 ||
+ error "failed to make test file 2"
+ echo "This is test file 1000!" > $MOUNT/$tdir.1000/$tfile.1000 ||
+ error "failed to make test file 1000"
+ rm -rf $MOUNT/$tdir.1000 || error "failed to remove remote_dir"
+
+ output=$($LFS mdts)
+ echo "=== START lfs mdts OUTPUT ==="
+ echo -e "$output"
+ echo "==== END lfs mdts OUTPUT ===="
+
+ echo -e "$output" | grep -v "MDTS:" | awk '{print $1}' |
+ sed 's/://g' > $TMP/mdts-actual.txt
+ sort $TMP/mdts-actual.txt -o $TMP/mdts-actual.txt
+
+ echo -e "0\n1\n1000" > $TMP/mdts-expected.txt
+
+ diff $TMP/mdts-expected.txt $TMP/mdts-actual.txt
+ result=$?
+
+ rm $TMP/mdts-expected.txt $TMP/mdts-actual.txt
+
+ [ $result -eq 0 ] || error "target_obd proc file is incorrect!"
+}
+run_test 56b "test target_obd correctness with nonconsecutive MDTs"
test_57a() { # bug 22656
do_rpc_nodes $(facet_active_host ost1) load_modules_local
}
test_58() { # bug 22658
+ combined_mgs_mds || stop_mgs || error "stopping MGS service failed"
setup_noconfig
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
createmany -o $DIR/$tdir/$tfile-%d 100
unmount_fstype $SINGLEMDS
# restart MDS with missing llog files
start_mds || error "unable to start MDS"
- do_facet mds "$LCTL set_param fail_loc=0"
+ do_facet $SINGLEMDS "$LCTL set_param fail_loc=0"
reformat
}
run_test 58 "missing llog files must not prevent MDT from mounting"
local num
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
done
fi
+ combined_mgs_mds || stop_mgs || error "stopping MGS service failed"
setup_noconfig || error "setting up the filesystem failed"
client_up || error "starting client failed"
test_62() {
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
{ skip "Need MDS version at least 2.2.51"; return 0; }
echo "disable journal for mds"
- do_facet mds $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed"
+ do_facet mds1 $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed"
start_mds && error "MDT start should fail"
echo "disable journal for ost"
do_facet ost1 $TUNE2FS -O ^has_journal $ostdev || error "tune2fs failed"
test_63() {
if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ skip "ldiskfs only test"
return
fi
- local inode_slab=$(do_facet $SINGLEMDS \
- "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo")
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) load_module ldiskfs
+ local inode_slab=$(do_facet $SINGLEMDS "cat /proc/slabinfo" |
+ awk '/ldiskfs_inode_cache/ { print $5 / $6 }')
if [ -z "$inode_slab" ]; then
skip "ldiskfs module has not been loaded"
return
fi
- echo "$inode_slab ldisk inodes per page"
- [ "$inode_slab" -ge "3" ] ||
- error "ldisk inode size is too big, $inode_slab objs per page"
- return
+ echo "$inode_slab ldiskfs inodes per page"
+ [ "${inode_slab%.*}" -ge "3" ] && return 0
+
+ # If kmalloc-128 is also 1 per page - this is a debug kernel
+ # and so this is not an error.
+ local kmalloc128=$(do_facet $SINGLEMDS "cat /proc/slabinfo" |
+ awk '/^(kmalloc|size)-128 / { print $5 / $6 }')
+ # 32 128-byte chunks in 4k
+ [ "${kmalloc128%.*}" -lt "32" ] ||
+ error "ldiskfs inode too big, only $inode_slab objs/page, " \
+ "kmalloc128 = $kmalloc128 objs/page"
}
-run_test 63 "Verify each page can at least hold 3 ldisk inodes"
+run_test 63 "Verify each page can at least hold 3 ldiskfs inodes"
test_64() {
start_mds || error "unable to start MDS"
mount_client $MOUNT || error "Unable to mount client"
stop_ost2 || error "Unable to stop second ost"
echo "$LFS df"
- $LFS df --lazy || error "lfs df failed"
+ $LFS df --lazy
umount_client $MOUNT -f || error “unmount $MOUNT failed”
cleanup_nocli || error "cleanup_nocli failed with $?"
#writeconf to remove all ost2 traces for subsequent tests
test_65() { # LU-2237
# Currently, the test is only valid for ldiskfs backend
[ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
- skip "non-ldiskfs backend" && return
+ skip "ldiskfs only test" && return
local devname=$(mdsdevname ${SINGLEMDS//mds/})
local brpt=$(facet_mntpt brpt)
umount_client $MOUNT || error "umount client failed"
+ if ! combined_mgs_mds; then
+ start_mgs || error "start mgs failed"
+ fi
+
start_mdt 1 || error "MDT start failed"
start_ost || error "Unable to start OST1"
}
run_test 70d "stop MDT1, mkdir succeed, create remote dir fail"
+test_70e() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.62) ] ||
+ { skip "Need MDS version at least 2.7.62"; return 0; }
+
+ cleanup || error "cleanup failed with $?"
+
+ local mdsdev=$(mdsdevname 1)
+ local ostdev=$(ostdevname 1)
+ local mdsvdev=$(mdsvdevname 1)
+ local ostvdev=$(ostvdevname 1)
+ local opts_mds="$(mkfs_opts mds1 $mdsdev) --reformat $mdsdev $mdsvdev"
+ local opts_ost="$(mkfs_opts ost1 $ostdev) --reformat $ostdev $ostvdev"
+
+ add mds1 $opts_mds || error "add mds1 failed"
+ start_mdt 1 || error "start mdt1 failed"
+ add ost1 $opts_ost || error "add ost1 failed"
+ start_ost || error "start ost failed"
+ mount_client $MOUNT > /dev/null || error "mount client $MOUNT failed"
+
+ local soc=$(do_facet mds1 "$LCTL get_param -n \
+ mdt.*MDT0000.sync_lock_cancel")
+ [ $soc == "never" ] || error "SoC enabled on single MDS"
+
+ for i in $(seq 2 $MDSCOUNT); do
+ mdsdev=$(mdsdevname $i)
+ mdsvdev=$(mdsvdevname $i)
+ opts_mds="$(mkfs_opts mds$i $mdsdev) --reformat $mdsdev \
+ $mdsvdev"
+ add mds$i $opts_mds || error "add mds$i failed"
+ start_mdt $i || error "start mdt$i fail"
+ done
+
+ wait_dne_interconnect
+
+ for i in $(seq $MDSCOUNT); do
+ soc=$(do_facet mds$i "$LCTL get_param -n \
+ mdt.*MDT000$((i - 1)).sync_lock_cancel")
+ [ $soc == "blocking" ] || error "SoC not enabled on DNE"
+ done
+
+ for i in $(seq 2 $MDSCOUNT); do
+ stop_mdt $i || error "stop mdt$i fail"
+ done
+ soc=$(do_facet mds1 "$LCTL get_param -n \
+ mdt.*MDT0000.sync_lock_cancel")
+ [ $soc == "never" ] || error "SoC enabled on single MDS"
+ umount_client $MOUNT -f > /dev/null
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 70e "Sync-on-Cancel will be enabled by default on DNE"
+
test_71a() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
if combined_mgs_mds; then
local ostdev=$(ostdevname 1)
local cmd="$E2FSCK -fnvd $mdsdev"
local fn=3
+ local add_options
[ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
skip "ldiskfs only test" && return
+ if combined_mgs_mds; then
+ add_options='--reformat'
+ else
+ add_options='--reformat --replace'
+ fi
+
#tune MDT with "-O extents"
for num in $(seq $MDSCOUNT); do
add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \
- --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
+ $add_options $(mdsdevname $num) $(mdsvdevname $num) ||
error "add mds $num failed"
do_facet mds${num} "$TUNE2FS -O extents $(mdsdevname $num)" ||
error "$TUNE2FS failed on mds${num}"
done
- add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev ||
+ add ost1 $(mkfs_opts ost1 $ostdev) $add_options $ostdev ||
error "add $ostdev failed"
- start_mgsmds || error "start mds failed"
+ start_mds || error "start mds failed"
start_ost || error "start ost failed"
mount_client $MOUNT || error "mount client failed"
run_test 72 "test fast symlink with extents flag enabled"
test_73() { #LU-3006
- load_modules
[ $(facet_fstype ost1) == zfs ] && import_zpool ost1
do_facet ost1 "$TUNEFS --failnode=1.2.3.4@$NETTYPE $(ostdevname 1)" ||
error "1st tunefs failed"
add mds1 $opts_mds || error "add mds1 failed for new params"
add ost1 $opts_ost || error "add ost1 failed for new params"
+ if ! combined_mgs_mds; then
+ stop_mgs || error "stop mgs failed"
+ fi
+ reformat
return 0
}
run_test 75 "The order of --index should be irrelevant"
test_76a() {
[[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] ||
{ skip "Need MDS version at least 2.4.52" && return 0; }
+
+ if ! combined_mgs_mds; then
+ start_mgs || error "start mgs failed"
+ fi
setup
local MDMB_PARAM="osc.*.max_dirty_mb"
echo "Change MGS params"
}
run_test 76b "verify params log setup correctly"
+test_76c() {
+ [[ $(lustre_version_code mgs) -ge $(version_code 2.8.54) ]] ||
+ { skip "Need MDS version at least 2.4.52" && return 0; }
+ setupall
+ local MASK_PARAM="mdd.*.changelog_mask"
+ echo "Change changelog_mask"
+ do_facet mgs $LCTL set_param -P $MASK_PARAM=-CLOSE ||
+ error "Can't change changlog_mask"
+ wait_update $(facet_host mds) "$LCTL get_param -n $MASK_PARAM |
+ grep 'CLOSE'" ""
+
+ echo "Check the value is stored after mds remount"
+ stop_mds || error "Failed to stop MDS"
+ start_mds || error "Failed to start MDS"
+ local CHANGELOG_MASK=$(do_facet mgs $LCTL get_param -n $MASK_PARAM)
+ echo $CHANGELOG_MASK | grep CLOSE > /dev/null &&
+ error "changelog_mask is not changed"
+
+ stopall
+}
+run_test 76c "verify changelog_mask is applied with set_param -P"
+
+test_76d() { #LU-9399
+ setupall
+
+ local xattr_cache="llite.*.xattr_cache"
+ local cmd="$LCTL get_param -n $xattr_cache | head -1"
+ local new=$((($(eval $cmd) + 1) % 2))
+
+ echo "lctl set_param -P llite.*.xattr_cache=$new"
+ do_facet mgs $LCTL set_param -P $xattr_cache=$new ||
+ error "Can't change xattr_cache"
+ wait_update $HOSTNAME "$cmd" "$new"
+
+ echo "Check $xattr_cache on client $MOUNT"
+ umount_client $MOUNT || error "umount $MOUNT failed"
+ mount_client $MOUNT || error "mount $MOUNT failed"
+ [ $(eval $cmd) -eq $new ] ||
+ error "$xattr_cache != $new on client $MOUNT"
+
+ echo "Check $xattr_cache on the new client $MOUNT2"
+ mount_client $MOUNT2 || error "mount $MOUNT2 failed"
+ [ $(eval $cmd) -eq $new ] ||
+ error "$xattr_cache != $new on client $MOUNT2"
+ umount_client $MOUNT2 || error "umount $MOUNT2 failed"
+
+ stopall
+}
+run_test 76d "verify llite.*.xattr_cache can be set by 'set_param -P' correctly"
+
test_77() { # LU-3445
local server_version=$(lustre_version_code $SINGLEMDS)
-
- [[ $server_version -ge $(version_code 2.2.60) ]] &&
- [[ $server_version -le $(version_code 2.4.0) ]] &&
- skip "Need MDS version < 2.2.60 or > 2.4.0" && return
+ [[ $server_version -ge $(version_code 2.8.55) ]] ||
+ { skip "Need MDS version 2.8.55+ "; return; }
if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then
is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) &&
local fs2ostvdev=$(ostvdevname 1_2)
local fsname=test1234
local mgsnid
- local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)"
+ local failnid="$(h2nettype 1.2.3.4),$(h2nettype 4.3.2.1)"
+
+ combined_mgs_mds || stop_mgs || error "stopping MGS service failed"
add fs2mds $(mkfs_opts mds1 $fs2mdsdev) --mgs --fsname=$fsname \
--reformat $fs2mdsdev $fs2mdsvdev || error "add fs2mds failed"
error "start fs2mds failed"
mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,)
- [[ $mgsnid = *,* ]] || mgsnid+=",$mgsnid"
+ mgsnid="0.0.0.0@tcp,$mgsnid,$mgsnid:$mgsnid"
- add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \
+ add fs2ost --mgsnode=$mgsnid $(mkfs_opts ost1 $fs2ostdev) \
--failnode=$failnid --fsname=$fsname \
--reformat $fs2ostdev $fs2ostvdev ||
error "add fs2ost failed"
test_78() {
[[ $(facet_fstype $SINGLEMDS) != ldiskfs ||
$(facet_fstype ost1) != ldiskfs ]] &&
- skip "only applicable to ldiskfs-based MDTs and OSTs" && return
+ skip "ldiskfs only test" && return
# reformat the Lustre filesystem with a smaller size
+ local saved_MDSCOUNT=$MDSCOUNT
local saved_MDSSIZE=$MDSSIZE
+ local saved_OSTCOUNT=$OSTCOUNT
local saved_OSTSIZE=$OSTSIZE
+ MDSCOUNT=1
+ OSTCOUNT=1
MDSSIZE=$((MDSSIZE - 20000))
OSTSIZE=$((OSTSIZE - 20000))
reformat || error "(1) reformat Lustre filesystem failed"
local i
local file
local num_files=100
+
mkdir $MOUNT/$tdir || error "(3) mkdir $MOUNT/$tdir failed"
+ $LFS df; $LFS df -i
for i in $(seq $num_files); do
file=$MOUNT/$tdir/$tfile-$i
- dd if=/dev/urandom of=$file count=1 bs=1M ||
+ dd if=/dev/urandom of=$file count=1 bs=1M || {
+ $LCTL get_param osc.*.cur*grant*
+ $LFS df; $LFS df -i;
+ # stop creating files if there is no more space
+ if [ ! -e $file ]; then
+ num_files=$((i - 1))
+ break
+ fi
+
+ $LFS getstripe -v $file
+ local ost_idx=$(LFS getstripe -i $file)
+ do_facet ost$((ost_idx + 1)) \
+ $LCTL get_param obdfilter.*.*grant*
error "(4) create $file failed"
+ }
done
# unmount the Lustre filesystem
# unmount and reformat the Lustre filesystem
cleanup || error "(12) cleanup Lustre filesystem failed"
combined_mgs_mds || stop_mgs || error "(13) stop mgs failed"
+
+ MDSCOUNT=$saved_MDSCOUNT
+ OSTCOUNT=$saved_OSTCOUNT
reformat || error "(14) reformat Lustre filesystem failed"
}
run_test 78 "run resize2fs on MDT and OST filesystems"
test_81() { # LU-4665
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] ||
{ skip "Need MDS version at least 2.6.54" && return; }
- [[ $OSTCOUNT -ge 3 ]] || { skip_env "Need at least 3 OSTs" && return; }
+ [[ $OSTCOUNT -ge 3 ]] || { skip_env "needs >= 3 OSTs" && return; }
stopall
}
run_test 81 "sparse OST indexing"
-# Wait OSTs to be active on both client and MDT side.
-wait_osts_up() {
- local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd |
- awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
- wait_update $HOSTNAME "eval $cmd" $OSTCOUNT ||
- error "wait_update OSTs up on client failed"
-
- cmd="$LCTL get_param -n lod.$FSNAME-MDT*-*.target_obd | sort -u |
- awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
- wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT ||
- error "wait_update OSTs up on MDT failed"
-}
-
# Here we exercise the stripe placement functionality on a file system that
# has formatted the OST with a random index. With the file system the following
# functionality is tested:
test_82a() { # LU-4665
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] ||
{ skip "Need MDS version at least 2.6.54" && return; }
- [[ $OSTCOUNT -ge 3 ]] || { skip_env "Need at least 3 OSTs" && return; }
+ [[ $OSTCOUNT -ge 3 ]] || { skip_env "needs >= 3 OSTs" && return; }
stopall
local i
local index
local ost_indices
+ local LOV_V1_INSANE_STRIPE_COUNT=65532
for i in $(seq $OSTCOUNT); do
- index=$((RANDOM * 2))
+ index=$(((RANDOM * 2) % LOV_V1_INSANE_STRIPE_COUNT))
ost_indices+=" $index"
done
ost_indices=$(comma_list $ost_indices)
test_82b() { # LU-4665
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] ||
{ skip "Need MDS version at least 2.6.54" && return; }
- [[ $OSTCOUNT -ge 4 ]] || { skip_env "Need at least 4 OSTs" && return; }
+ [[ $OSTCOUNT -ge 4 ]] || { skip_env "needs >= 4 OSTs" && return; }
stopall
local i
local index
local ost_indices
+ local LOV_V1_INSANE_STRIPE_COUNT=65532
for i in $(seq $OSTCOUNT); do
- index=$((RANDOM * 2))
+ index=$(((RANDOM * 2) % LOV_V1_INSANE_STRIPE_COUNT))
ost_indices+=" $index"
done
ost_indices=$(comma_list $ost_indices)
wait_update $HOSTNAME "$LCTL get_param -n lov.$FSNAME-*.pools.$TESTNAME|
sort -u | tr '\n' ' ' " "$ost_targets_uuid" ||
error "wait_update $ost_pool failed"
- pool_list $ost_pool || error "list OST pool $ost_pool failed"
+ wait_update_facet $SINGLEMDS "$LCTL pool_list $ost_pool | wc -l" 4 ||
+ error "wait_update pool_list $ost_pool failed"
# If [--pool|-p <pool_name>] is set with [--ost-list|-o <ost_indices>],
# then the OSTs must be the members of the pool.
test_83() {
[[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] ||
{ skip "Need OST version at least 2.6.91" && return 0; }
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ if [ $(facet_fstype ost1) != ldiskfs ]; then
+ skip "ldiskfs only test"
return
fi
# Mount the OST as an ldiskfs filesystem.
log "mount the OST $dev as a $fstype filesystem"
add ost1 $(mkfs_opts ost1 $dev) $FSTYPE_OPT \
- --reformat $dev $dev > /dev/null ||
+ --reformat $dev > /dev/null ||
error "format ost1 error"
if ! test -b $dev; then
run_test 83 "ENOSPACE on OST doesn't cause message VFS: \
Busy inodes after unmount ..."
-recovery_time_min() {
- local CONNECTION_SWITCH_MIN=5
- local CONNECTION_SWITCH_INC=5
- local CONNECTION_SWITCH_MAX
- local RECONNECT_DELAY_MAX
- local INITIAL_CONNECT_TIMEOUT
- local max
- local TO_20
-
- #CONNECTION_SWITCH_MAX=min(50, max($CONNECTION_SWITCH_MIN,$TIMEOUT)
- (($CONNECTION_SWITCH_MIN>$TIMEOUT)) && \
- max=$CONNECTION_SWITCH_MIN || max=$TIMEOUT
- (($max<50)) && CONNECTION_SWITCH_MAX=$max || CONNECTION_SWITCH_MAX=50
-
- #INITIAL_CONNECT_TIMEOUT = max(CONNECTION_SWITCH_MIN, \
- #obd_timeout/20)
- TO_20=$(($TIMEOUT/20))
- (($CONNECTION_SWITCH_MIN>$TO_20)) && \
- INITIAL_CONNECT_TIMEOUT=$CONNECTION_SWITCH_MIN || \
- INITIAL_CONNECT_TIMEOUT=$TO_20
-
- RECONNECT_DELAY_MAX=$(($CONNECTION_SWITCH_MAX+$CONNECTION_SWITCH_INC+ \
- $INITIAL_CONNECT_TIMEOUT))
- echo $((2*$RECONNECT_DELAY_MAX))
-}
-
test_84() {
local facet=$SINGLEMDS
local num=$(echo $facet | tr -d "mds")
local correct_clients
local wrap_up=5
- load_modules
echo "start mds service on $(facet_active_host $facet)"
start_mds \
"-o recovery_time_hard=$time_min,recovery_time_soft=$time_min" $@ ||
}
run_test 85 "osd_ost init: fail ea_fid_set"
+cleanup_86() {
+ trap 0
+
+ # ost1 has already registered to the MGS before the reformat.
+ # So after reformatting it with option "-G", it could not be
+ # mounted to the MGS. Cleanup the system for subsequent tests.
+ reformat_and_config
+}
+
test_86() {
+ local server_version=$(lustre_version_code $SINGLEMDS)
[ "$(facet_fstype ost1)" = "zfs" ] &&
skip "LU-6442: no such mkfs params for ZFS OSTs" && return
+ [[ $server_version -ge $(version_code 2.7.56) ]] ||
+ { skip "Need server version newer than 2.7.55"; return 0; }
local OST_OPTS="$(mkfs_opts ost1 $(ostdevname 1)) \
--reformat $(ostdevname 1) $(ostvdevname 1)"
echo "params: $opts"
+ trap cleanup_86 EXIT ERR
+
+ stopall
add ost1 $opts || error "add ost1 failed with new params"
local FOUNDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" |
[[ $FOUNDSIZE == $NEWSIZE ]] ||
error "Flex block group size: $FOUNDSIZE, expected: $NEWSIZE"
- return 0
+
+ cleanup_86
}
run_test 86 "Replacing mkfs.lustre -G option"
test_87() { #LU-6544
- [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.7.56) ]] ||
- { skip "Need MDS version at least 2.7.56" && return; }
+ [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.9.51) ]] ||
+ { skip "Need MDS version at least 2.9.51" && return; }
[[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
- { skip "Only applicable to ldiskfs-based MDTs" && return; }
- [[ $OSTCOUNT -gt 69 ]] &&
+ { skip "ldiskfs only test" && return; }
+ [[ $OSTCOUNT -gt 59 ]] &&
{ skip "Ignore wide striping situation" && return; }
local mdsdev=$(mdsdevname 1)
local file=$DIR/$tfile
local mntpt=$(facet_mntpt $SINGLEMDS)
local used_xattr_blk=0
- local inode_size=${1:-512}
+ local inode_size=${1:-1024}
local left_size=0
local xtest="trusted.test"
local value
local orig
local i
+ local stripe_cnt=$(($OSTCOUNT + 2))
- #Please see LU-6544 for MDT inode size calculation
- if [ $OSTCOUNT -gt 26 ]; then
+ #Please see ldiskfs_make_lustre() for MDT inode size calculation
+ if [ $stripe_cnt -gt 16 ]; then
inode_size=2048
- elif [ $OSTCOUNT -gt 5 ]; then
- inode_size=1024
fi
left_size=$(expr $inode_size - \
156 - \
32 - \
- 32 - $OSTCOUNT \* 24 - 16 - 3 - \
+ 32 - 40 \* 3 - 32 \* 3 - $stripe_cnt \* 24 - 16 - 3 - \
24 - 16 - 3 - \
24 - 18 - $(expr length $tfile) - 16 - 4)
if [ $left_size -le 0 ]; then
unload_modules
reformat
- add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$OSTCOUNT \
+ add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$stripe_cnt \
--reformat $mdsdev $mdsvdev || error "add mds1 failed"
start_mdt 1 > /dev/null || error "start mdt1 failed"
for i in $(seq $OSTCOUNT); do
check_mount || error "check client $MOUNT failed"
#set xattr
- $SETSTRIPE -c -1 $file || error "$SETSTRIPE -c -1 $file failed"
- $GETSTRIPE $file || error "$GETSTRIPE $file failed"
- i=$($GETSTRIPE -c $file)
+ $SETSTRIPE -E 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
+ error "Create file with 3 components failed"
+ $TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed"
+ i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed"
if [ $i -ne $OSTCOUNT ]; then
left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24)
echo -n "Since only $i out $OSTCOUNT OSTs are used, "
}
run_test 88 "check the default mount options can be overridden"
+test_89() { # LU-7131
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.54) ]] ||
+ { skip "Need MDT version at least 2.9.54" && return 0; }
+
+ local key=failover.node
+ local val1=192.0.2.254@tcp0 # Reserved IPs, see RFC 5735
+ local val2=192.0.2.255@tcp0
+ local mdsdev=$(mdsdevname 1)
+ local params
+
+ stopall
+
+ [ $(facet_fstype mds1) == zfs ] && import_zpool mds1
+ # Check that parameters are added correctly
+ echo "tunefs --param $key=$val1"
+ do_facet mds "$TUNEFS --param $key=$val1 $mdsdev >/dev/null" ||
+ error "tunefs --param $key=$val1 failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ $(echo $params | tr ' ' '\n' | grep -c $key=$val1) = "1" ] ||
+ error "on-disk parameter not added correctly via tunefs"
+
+ # Check that parameters replace existing instances when added
+ echo "tunefs --param $key=$val2"
+ do_facet mds "$TUNEFS --param $key=$val2 $mdsdev >/dev/null" ||
+ error "tunefs --param $key=$val2 failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ $(echo $params | tr ' ' '\n' | grep -c $key=) = "1" ] ||
+ error "on-disk parameter not replaced via tunefs"
+ [ $(echo $params | tr ' ' '\n' | grep -c $key=$val2) = "1" ] ||
+ error "on-disk parameter not replaced correctly via tunefs"
+
+ # Check that a parameter is erased properly
+ echo "tunefs --erase-param $key"
+ do_facet mds "$TUNEFS --erase-param $key $mdsdev >/dev/null" ||
+ error "tunefs --erase-param $key failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ $(echo $params | tr ' ' '\n' | grep -c $key=) = "0" ] ||
+ error "on-disk parameter not erased correctly via tunefs"
+
+ # Check that all the parameters are erased
+ echo "tunefs --erase-params"
+ do_facet mds "$TUNEFS --erase-params $mdsdev >/dev/null" ||
+ error "tunefs --erase-params failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ -z $params ] ||
+ error "all on-disk parameters not erased correctly via tunefs"
+
+ # Check the order of options --erase-params and --param
+ echo "tunefs --param $key=$val1 --erase-params"
+ do_facet mds \
+ "$TUNEFS --param $key=$val1 --erase-params $mdsdev >/dev/null"||
+ error "tunefs --param $key=$val1 --erase-params failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ $(echo $params | tr ' ' '\n') == "$key=$val1" ] ||
+ error "on-disk param not added correctly with --erase-params"
+
+ reformat
+}
+run_test 89 "check tunefs --param and --erase-param{s} options"
+
# $1 test directory
# $2 (optional) value of max_mod_rpcs_in_flight to set
check_max_mod_rpcs_in_flight() {
local nid
local found
- load_modules
-
[[ $(lustre_version_code ost1) -ge $(version_code 2.7.63) ]] ||
{ skip "Need OST version at least 2.7.63" && return 0; }
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.63) ]] ||
}
run_test 91 "evict-by-nid support"
+generate_ldev_conf() {
+ # generate an ldev.conf file
+ local ldevconfpath=$1
+ local fstype=
+ local fsldevformat=""
+ touch $ldevconfpath
+
+ fstype=$(facet_fstype mgs)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t-\t%s-MGS0000\t%s%s\n" \
+ $mgs_HOST \
+ $FSNAME \
+ $fsldevformat \
+ $(mgsdevname) > $ldevconfpath
+
+ local mdsfo_host=$mdsfailover_HOST;
+ if [ -z "$mdsfo_host" ]; then
+ mdsfo_host="-"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ fstype=$(facet_fstype mds$num)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t%s\t%s-MDT%04d\t%s%s\n" \
+ $mds_HOST \
+ $mdsfo_host \
+ $FSNAME \
+ $num \
+ $fsldevformat \
+ $(mdsdevname $num) >> $ldevconfpath
+ done
+
+ local ostfo_host=$ostfailover_HOST;
+ if [ -z "$ostfo_host" ]; then
+ ostfo_host="-"
+ fi
+
+ for num in $(seq $OSTCOUNT); do
+ fstype=$(facet_fstype ost$num)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t%s\t%s-OST%04d\t%s%s\n" \
+ $ost_HOST \
+ $ostfo_host \
+ $FSNAME \
+ $num \
+ $fsldevformat \
+ $(ostdevname $num) >> $ldevconfpath
+ done
+
+ echo "----- $ldevconfpath -----"
+ cat $ldevconfpath
+ echo "--- END $ldevconfpath ---"
+
+}
+
+generate_nids() {
+ # generate a nids file (mapping between hostname to nid)
+ # looks like we only have the MGS nid available to us
+ # so just echo that to a file
+ local nidspath=$1
+ echo -e "${mgs_HOST}\t${MGSNID}" > $nidspath
+
+ echo "----- $nidspath -----"
+ cat $nidspath
+ echo "--- END $nidspath ---"
+}
+
+compare_ldev_output() {
+ ldev_output=$1
+ expected_output=$2
+
+ sort $expected_output -o $expected_output
+ sort $ldev_output -o $ldev_output
+
+ echo "-- START OF LDEV OUTPUT --"
+ cat $ldev_output
+ echo "--- END OF LDEV OUTPUT ---"
+
+ echo "-- START OF EXPECTED OUTPUT --"
+ cat $expected_output
+ echo "--- END OF EXPECTED OUTPUT ---"
+
+ diff $expected_output $ldev_output
+ return $?
+}
+
+test_92() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ echo "Host is $(hostname)"
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ # echo the mgs nid and compare it to environment variable MGSNID
+ # also, ldev.conf and nids is a server side thing, use the OSS
+ # hostname
+ local output
+ output=$($LDEV -c $LDEVCONFPATH -H $ost_HOST -n $NIDSPATH echo %m)
+
+ echo "-- START OF LDEV OUTPUT --"
+ echo -e "$output"
+ echo "--- END OF LDEV OUTPUT ---"
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed to execute!"
+ fi
+
+ # need to process multiple lines because of combined MGS and MDS
+ echo -e $output | awk '{ print $2 }' | while read -r line ; do
+ if [ "$line" != "$MGSNID" ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed mgs nid '$line', expected '$MGSNID'"
+ fi
+ done
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 92 "ldev returns MGS NID correctly in command substitution"
+
+test_93() {
+ [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return
+
+ reformat
+ #start mgs or mgs/mdt0
+ if ! combined_mgs_mds ; then
+ start_mgs
+ start_mdt 1
+ else
+ start_mdt 1
+ fi
+
+ start_ost || error "OST0 start fail"
+
+ #define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e
+ do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num &
+ done
+
+ mount_client $MOUNT || error "mount client fails"
+ wait_osc_import_state mds ost FULL
+ wait_osc_import_state client ost FULL
+ check_mount || error "check_mount failed"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 93 "register mulitple MDT at the same time"
+
+test_94() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected.txt
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct hostlist!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 94 "ldev outputs correct labels for file system name query"
+
+test_95() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ # SUCCESS CASES
+ # file sys filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F failed!"
+ fi
+
+ # local filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -l &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -l failed!"
+ fi
+
+ # foreign filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f failed!"
+ fi
+
+ # all filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a failed!"
+ fi
+
+ # FAILURE CASES
+ # all & file sys
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -F $FSNAME &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -F incorrectly succeeded"
+ fi
+
+ # all & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -f incorrectly succeeded"
+ fi
+
+ # all & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -l incorrectly succeeded"
+ fi
+
+ # foreign & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f and -l incorrectly succeeded"
+ fi
+
+ # file sys & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -l incorrectly succeeded"
+ fi
+
+ # file sys & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -f incorrectly succeeded"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 95 "ldev should only allow one label filter"
+
+test_96() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -H $mgs_HOST \
+ echo %H-%b | \
+ awk '{print $2}' > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo "$mgs_HOST-$(facet_fstype mgs)" > $EXPECTED_OUTPUT
+
+ if [ "$mgs_HOST" == "$mds_HOST" ]; then
+ for num in $(seq $MDSCOUNT); do
+ echo "$mds_HOST-$(facet_fstype mds$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ if [ "$mgs_HOST" == "$ost_HOST" ]; then
+ for num in $(seq $OSTCOUNT); do
+ echo "$ost_HOST-$(facet_fstype ost$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 96 "ldev returns hostname and backend fs correctly in command sub"
+
+test_97() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo -e "\nMDT role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mdt > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute for mdt role!"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mdt role!"
+ fi
+
+ echo -e "\nOST role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R ost > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for ost role!"
+ fi
+
+ rm $EXPECTED_OUTPUT
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for ost role!"
+ fi
+
+ echo -e "\nMGS role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mgs > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for mgs role!"
+ fi
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mgs role!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 97 "ldev returns correct ouput when querying based on role"
+
+test_98()
+{
+ local mountopt
+ local temp=$MDS_MOUNT_OPTS
+
+ setup
+ check_mount || error "mount failed"
+ mountopt="user_xattr"
+ for ((x = 1; x <= 400; x++)); do
+ mountopt="$mountopt,user_xattr"
+ done
+ remount_client $mountopt $MOUNT 2>&1 | grep "too long" ||
+ error "Buffer overflow check failed"
+ cleanup || error "cleanup failed"
+}
+run_test 98 "Buffer-overflow check while parsing mount_opts"
+
+test_99()
+{
+ [[ $(facet_fstype ost1) != ldiskfs ]] &&
+ { skip "ldiskfs only test" && return; }
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] ||
+ { skip "Need OST version at least 2.8.57" && return 0; }
+
+ local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+ do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" &&
+ skip "meta_bg already set" && return
+
+ local opts=ost_opts
+ if [[ ${!opts} != *mkfsoptions* ]]; then
+ eval opts=\"${!opts} \
+ --mkfsoptions='\\\"-O ^resize_inode,meta_bg\\\"'\"
+ else
+ local val=${!opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O ^resize_inode,meta_bg }
+ eval opts='${val}'
+ fi
+
+ echo "params: $opts"
+
+ add ost1 $opts || error "add ost1 failed with new params"
+
+ do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" ||
+ error "meta_bg is not set"
+
+ reformat
+}
+run_test 99 "Adding meta_bg option"
+
+test_100() {
+ reformat
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ # Desired output
+ # MGS:
+ # 0@lo
+ # lustre-MDT0000:
+ # 0@lo
+ # lustre-OST0000:
+ # 0@lo
+ do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
+ END {exit rc}' || error "lshowmount have no output MGS"
+
+ do_facet mds1 'lshowmount -v' | awk 'BEGIN {NR == 2; rc=1} /-MDT0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output MDT0"
+
+ do_facet ost1 'lshowmount -v' | awk 'BEGIN {NR == 4; rc=1} /-OST0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output OST0"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo"
+
+test_101() {
+ local createmany_oid
+ local dev=$FSNAME-OST0000-osc-MDT0000
+ setup
+
+ createmany -o $DIR1/$tfile-%d 50000 &
+ createmany_oid=$!
+ # MDT->OST reconnection causes MDT<->OST last_id synchornisation
+ # via osp_precreate_cleanup_orphans.
+ for ((i = 0; i < 100; i++)); do
+ for ((k = 0; k < 10; k++)); do
+ do_facet $SINGLEMDS "$LCTL --device $dev deactivate;" \
+ "$LCTL --device $dev activate"
+ done
+
+ ls -asl $MOUNT | grep '???' &&
+ (kill -9 $createmany_oid &>/dev/null; \
+ error "File hasn't object on OST")
+
+ kill -s 0 $createmany_oid || break
+ done
+ wait $createmany_oid
+ cleanup
+}
+run_test 101 "Race MDT->OST reconnection with create"
+
+test_102() {
+ cleanup || error "cleanup failed with $?"
+
+ local mds1dev=$(mdsdevname 1)
+ local mds1mnt=$(facet_mntpt mds1)
+ local mds1fstype=$(facet_fstype mds1)
+ local mds1opts=$MDS_MOUNT_OPTS
+
+ if [ $mds1fstype == ldiskfs ] &&
+ ! do_facet mds1 test -b $mds1dev; then
+ mds1opts=$(csa_add "$mds1opts" -o loop)
+ fi
+ if [[ $mds1fstype == zfs ]]; then
+ import_zpool mds1 || return ${PIPESTATUS[0]}
+ fi
+
+ # unload all and only load libcfs to allow fail_loc setting
+ do_facet mds1 lustre_rmmod || error "unable to unload modules"
+ do_facet mds1 modprobe libcfs || error "libcfs not loaded"
+ do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded"
+
+ #define OBD_FAIL_OBDCLASS_MODULE_LOAD 0x60a
+ do_facet mds1 "$LCTL set_param fail_loc=0x8000060a"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts &&
+ error "mdt start must fail"
+ do_facet mds1 lsmod \| grep obdclass && error "obdclass must not load"
+
+ do_facet mds1 "$LCTL set_param fail_loc=0x0"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts ||
+ error "mdt start must not fail"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 102 "obdclass module cleanup upon error"
+
+test_renamefs() {
+ local newname=$1
+
+ echo "rename $FSNAME to $newname"
+
+ if [ ! combined_mgs_mds ]; then
+ local facet=$(mgsdevname)
+
+ do_facet mgs \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(7) Fail to rename MGS"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool mgs $newname-mgs
+ fi
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ local facet=$(mdsdevname $num)
+
+ do_facet mds${num} \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(8) Fail to rename MDT $num"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool mds${num} $newname-mdt${num}
+ fi
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ local facet=$(ostdevname $num)
+
+ do_facet ost${num} \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(9) Fail to rename OST $num"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool ost${num} $newname-ost${num}
+ fi
+ done
+}
+
+test_103_set_pool() {
+ local pname=$1
+ local ost_x=$2
+
+ do_facet mgs $LCTL pool_add $FSNAME.$pname ${FSNAME}-$ost_x ||
+ error "Fail to add $ost_x to $FSNAME.$pname"
+ wait_update $HOSTNAME \
+ "lctl get_param -n lov.$FSNAME-clilov-*.pools.$pname |
+ grep $ost_x" "$FSNAME-${ost_x}_UUID" ||
+ error "$ost_x is NOT in pool $FSNAME.$pname"
+}
+
+test_103_check_pool() {
+ local save_fsname=$1
+ local errno=$2
+
+ stat $DIR/$tdir/test-framework.sh ||
+ error "($errno) Fail to stat"
+ do_facet mgs $LCTL pool_list $FSNAME.pool1 ||
+ error "($errno) Fail to list $FSNAME.pool1"
+ do_facet mgs $LCTL pool_list $FSNAME.$save_fsname ||
+ error "($errno) Fail to list $FSNAME.$save_fsname"
+ do_facet mgs $LCTL pool_list $FSNAME.$save_fsname |
+ grep ${FSNAME}-OST0000 ||
+ error "($errno) List $FSNAME.$save_fsname is invalid"
+
+ local pname=$($LFS getstripe --pool $DIR/$tdir/d0)
+ [ "$pname" = "$save_fsname" ] ||
+ error "($errno) Unexpected pool name $pname"
+}
+
+test_103() {
+ check_mount_and_prep
+ rm -rf $DIR/$tdir
+ mkdir $DIR/$tdir || error "(1) Fail to mkdir $DIR/$tdir"
+ cp $LUSTRE/tests/test-framework.sh $DIR/$tdir ||
+ error "(2) Fail to copy test-framework.sh"
+
+ do_facet mgs $LCTL pool_new $FSNAME.pool1 ||
+ error "(3) Fail to create $FSNAME.pool1"
+ # name the pool name as the fsname
+ do_facet mgs $LCTL pool_new $FSNAME.$FSNAME ||
+ error "(4) Fail to create $FSNAME.$FSNAME"
+
+ test_103_set_pool $FSNAME OST0000
+
+ $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 ||
+ error "(6) Fail to setstripe on $DIR/$tdir/d0"
+
+ KEEP_ZPOOL=true
+ stopall
+
+ test_renamefs mylustre
+
+ local save_fsname=$FSNAME
+ FSNAME="mylustre"
+ setupall
+
+ test_103_check_pool $save_fsname 7
+
+ if [ $OSTCOUNT -ge 2 ]; then
+ test_103_set_pool $save_fsname OST0001
+ fi
+
+ $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 ||
+ error "(16) Fail to setstripe on $DIR/$tdir/f0"
+
+ stopall
+
+ test_renamefs tfs
+
+ FSNAME="tfs"
+ setupall
+
+ test_103_check_pool $save_fsname 17
+
+ stopall
+
+ test_renamefs $save_fsname
+
+ FSNAME=$save_fsname
+ setupall
+ KEEP_ZPOOL=false
+}
+run_test 103 "rename filesystem name"
+
+test_104() { # LU-6952
+ local mds_mountopts=$MDS_MOUNT_OPTS
+ local ost_mountopts=$OST_MOUNT_OPTS
+ local mds_mountfsopts=$MDS_MOUNT_FS_OPTS
+ local lctl_ver=$(do_facet $SINGLEMDS $LCTL --version |
+ awk '{ print $2 }')
+
+ [[ $(version_code $lctl_ver) -lt $(version_code 2.9.55) ]] &&
+ { skip "this test needs utils above 2.9.55" && return 0; }
+
+ # specify "acl" in mount options used by mkfs.lustre
+ if [ -z "$MDS_MOUNT_FS_OPTS" ]; then
+ MDS_MOUNT_FS_OPTS="acl,user_xattr"
+ else
+
+ MDS_MOUNT_FS_OPTS="${MDS_MOUNT_FS_OPTS},acl,user_xattr"
+ fi
+
+ echo "mountfsopt: $MDS_MOUNT_FS_OPTS"
+
+ #reformat/remount the MDT to apply the MDT_MOUNT_FS_OPT options
+ formatall
+ if [ -z "$MDS_MOUNT_OPTS" ]; then
+ MDS_MOUNT_OPTS="-o noacl"
+ else
+ MDS_MOUNT_OPTS="${MDS_MOUNT_OPTS},noacl"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ start mds$num $(mdsdevname $num) $MDS_MOUNT_OPTS ||
+ error "Failed to start MDS"
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ start ost$num $(ostdevname $num) $OST_MOUNT_OPTS ||
+ error "Failed to start OST"
+ done
+
+ mount_client $MOUNT
+ setfacl -m "d:$RUNAS_ID:rwx" $MOUNT &&
+ error "ACL is applied when FS is mounted with noacl."
+
+ MDS_MOUNT_OPTS=$mds_mountopts
+ OST_MOUNT_OPTS=$ost_mountopts
+ MDS_MOUNT_FS_OPTS=$mds_mountfsopts
+
+ formatall
+ setupall
+}
+run_test 104 "Make sure user defined options are reflected in mount"
+
+error_and_umount() {
+ umount $TMP/$tdir
+ rmdir $TMP/$tdir
+ error $*
+}
+
+test_105() {
+ cleanup -f
+ reformat
+ setup
+ mkdir -p $TMP/$tdir
+ mount --bind $DIR $TMP/$tdir || error "mount bind mnt pt failed"
+ rm -f $TMP/$tdir/$tfile
+ rm -f $TMP/$tdir/${tfile}1
+
+ # Files should not be created in ro bind mount point
+ # remounting from rw to ro
+ mount -o remount,ro $TMP/$tdir ||
+ error_and_umount "readonly remount of bind mnt pt failed"
+ touch $TMP/$tdir/$tfile &&
+ error_and_umount "touch succeeds on ro bind mnt pt"
+ [ -e $TMP/$tdir/$tfile ] &&
+ error_and_umount "file created on ro bind mnt pt"
+
+ # Files should be created in rw bind mount point
+ # remounting from ro to rw
+ mount -o remount,rw $TMP/$tdir ||
+ error_and_umount "read-write remount of bind mnt pt failed"
+ touch $TMP/$tdir/${tfile}1 ||
+ error_and_umount "touch fails on rw bind mnt pt"
+ [ -e $TMP/$tdir/${tfile}1 ] ||
+ error_and_umount "file not created on rw bind mnt pt"
+ umount $TMP/$tdir || error "umount of bind mnt pt failed"
+ rmdir $TMP/$tdir
+ cleanup || error "cleanup failed with $?"
+}
+run_test 105 "check file creation for ro and rw bind mnt pt"
+
+test_106() {
+ local repeat=5
+
+ reformat
+ setupall
+ mkdir -p $DIR/$tdir || error "create $tdir failed"
+ lfs setstripe -c 1 -i 0 $DIR/$tdir
+#define OBD_FAIL_CAT_RECORDS 0x1312
+ do_facet mds1 $LCTL set_param fail_loc=0x1312 fail_val=$repeat
+
+ for ((i = 1; i <= $repeat; i++)); do
+
+ #one full plain llog
+ createmany -o $DIR/$tdir/f- 64768
+
+ createmany -u $DIR/$tdir/f- 64768
+ done
+ wait_delete_completed $((TIMEOUT * 7))
+#ASSERTION osp_sync_thread() ( thread->t_flags != SVC_RUNNING ) failed
+#shows that osp code is buggy
+ do_facet mds1 $LCTL set_param fail_loc=0 fail_val=0
+
+ cleanupall
+}
+run_test 106 "check osp llog processing when catalog is wrapped"
+
+test_107() {
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.50) ]] ||
+ { skip "Need MDS version > 2.10.50"; return; }
+
+ start_mgsmds || error "start_mgsmds failed"
+ start_ost || error "unable to start OST"
+
+ # add unknown configuration parameter.
+ local PARAM="$FSNAME-OST0000.ost.unknown_param=50"
+ do_facet mgs "$LCTL conf_param $PARAM"
+ cleanup_nocli || error "cleanup_nocli failed with $?"
+ load_modules
+
+ # unknown param should be ignored while mounting.
+ start_ost || error "unable to start OST after unknown param set"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 107 "Unknown config param should not fail target mounting"
+
if ! combined_mgs_mds ; then
stop mgs
fi