ONLY=${ONLY:-"$*"}
# bug number for skipped test: LU-8972
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
is_sles11() # LU-2181
}
run_test 5f "mds down, cleanup after failed mount (bug 2712)"
+test_5g() {
+ modprobe lustre
+ [ $(lustre_version_code client) -lt $(version_code 2.9.53) ] &&
+ { skip "automount of debugfs missing before 2.9.53" && return 0; }
+ umount /sys/kernel/debug
+ $LCTL get_param -n devices | egrep -v "error" && \
+ error "lctl can't access debugfs data"
+ grep " debugfs " /etc/mtab || error "debugfs failed to remount"
+}
+run_test 5g "handle missing debugfs"
+
test_6() {
setup
manual_umount_client
fi
# check MDTs too
- for num in $(seq $MDSCOUNT); do
- local mdtosc=$(get_mdtosc_proc_path mds${num} $FSNAME-OST0001)
- local MPROC="osc.$mdtosc.active"
- local MAX=30
- local WAIT=0
- while [ 1 ]; do
- sleep 5
- RESULT=$(do_facet mds${num} "$LCTL get_param -n $MPROC")
- [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC"
- if [ $RESULT -eq $DEAC ]; then
- echo -n "MDT deactivated also after"
- echo "$WAIT sec (got $RESULT)"
- break
- fi
- WAIT=$((WAIT + 5))
- if [ $WAIT -eq $MAX ]; then
- error "MDT active: wanted $DEAC got $RESULT"
- fi
- echo "Waiting $(($MAX - $WAIT))secs for MDT deactivated"
- done
- done
+ wait_osp_active ost ${FSNAME}-OST0001 1 0
+
# test new client starts deactivated
umount_client $MOUNT || error "umount_client $MOUNT failed"
mount_client $MOUNT || error "mount_client $MOUNT failed"
local IMGTYPE=$(facet_fstype $SINGLEMDS)
- tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\')
+ tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\' | grep -v "2_9")
if [ -z "$tarballs" ]; then
skip "No applicable tarballs found"
local qval
local cmd
+ # LU-2435: if the underlying zfs doesn't support userobj_accounting,
+ # lustre will estimate the object count usage. This fails quota
+ # verification in 32b. The object quota usage should be accurate after
+ # zfs-0.7.0 is released.
+ [ $fstype == "zfs" ] && {
+ local zfs_version=$(do_node $node cat /sys/module/zfs/version)
+
+ [ $(version_code $zfs_version) -lt $(version_code 0.7.0) ] && {
+ echo "Skip quota verify for zfs: $zfs_version"
+ return 0
+ }
+ }
+
$LFS quota -u $T32_QID -v $mnt
qval=$($LFS quota -v -u $T32_QID $mnt |
local img_blimit
local img_ilimit
local fsname=t32fs
- local nid=$($r $LCTL list_nids | head -1)
+ local nid
local mopts
local uuid
local nrpcs_orig
trap 'trap - RETURN; t32_test_cleanup' RETURN
load_modules
+ nid=$($r $LCTL list_nids | head -1)
+
mkdir -p $tmp/mnt/lustre || error "mkdir $tmp/mnt/lustre failed"
$r mkdir -p $tmp/mnt/{mdt,mdt1,ost}
$r tar xjvf $tarball -S -C $tmp || {
$ZPOOL import -f -d $tmp $poolname"
done
+ # upgrade zpool to latest supported features, including
+ # dnode quota accounting in 0.7.0
+ $r "$ZPOOL upgrade -a"
+
mdt_dev=t32fs-mdt1/mdt1
ost_dev=t32fs-ost1/ost1
! $mdt2_is_available || mdt2_dev=t32fs-mdt2/mdt2
mopts="loop,$mopts"
fi
fi
- $r $MOUNT_CMD -o $mopts $ost_dev $tmp/mnt/ost || {
+
+ $r $MOUNT_CMD -onomgs -o$mopts $ost_dev $tmp/mnt/ost || {
error_noexit "Mounting the OST"
return 1
}
if [[ $fstype == zfs ]]; then
local poolname=t32fs-mdt1
$r "modprobe zfs;
- $ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL import -f -d $tmp $poolname"
+
+ # upgrade zpool to latest supported features,
+ # including dnode quota accounting in 0.7.0
+ $r "$ZPOOL upgrade $poolname"
fi
# mount a second time to make sure we didnt leave upgrade flag on
reformat_and_config
}
-test_48() { # bug 17636
- reformat
+test_48() { # bz-17636 LU-7473
+ local count
+
setup_noconfig
check_mount || error "check_mount failed"
$GETSTRIPE $MOUNT/widestripe ||
error "$GETSTRIPE $MOUNT/widestripe failed"
- trap cleanup_48 EXIT ERR
+ # In the future, we may introduce more EAs, such as selinux, enlarged
+ # LOV EA, and so on. These EA will use some EA space that is shared by
+ # ACL entries. So here we only check some reasonable ACL entries count,
+ # instead of the max number that is calculated from the max_ea_size.
+ if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.8.57) ];
+ then
+ count=28 # hard coded of RPC protocol
+ elif [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ count=4000 # max_num 4091 max_ea_size = 32768
+ elif ! large_xattr_enabled; then
+ count=450 # max_num 497 max_ea_size = 4012
+ else
+ count=4500 # max_num 8187 max_ea_size = 1048492
+ # not create too much (>5000) to save test time
+ fi
- # fill acl buffer for avoid expand lsm to them
- getent passwd | awk -F : '{ print "u:"$1":rwx" }' | while read acl; do
- setfacl -m $acl $MOUNT/widestripe
+ echo "It is expected to hold at least $count ACL entries"
+ trap cleanup_48 EXIT ERR
+ for ((i = 0; i < $count; i++)) do
+ setfacl -m u:$((i + 100)):rw $MOUNT/widestripe ||
+ error "Fail to setfacl for $MOUNT/widestripe at $i"
done
+ cancel_lru_locks mdc
stat $MOUNT/widestripe || error "stat $MOUNT/widestripe failed"
+ local r_count=$(getfacl $MOUNT/widestripe | grep "user:" | wc -l)
+ count=$((count + 1)) # for the entry "user::rw-"
+
+ [ $count -eq $r_count ] ||
+ error "Expected ACL entries $count, but got $r_count"
cleanup_48
}
# prepare MDT/OST, make OSC inactive for OST1
[ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return
+ load_modules
[ $(facet_fstype mds2) == zfs ] && import_zpool mds2
do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" ||
error "tunefs MDT2 failed"
"$TEST" "${FSNAME}-MDT0001.mdc.active" 0 ||
error "Unable to deactivate MDT2"
+ wait_osp_active mds ${FSNAME}-MDT0001 1 0
+
$LFS mkdir -i1 $DIR/$tdir/2 &&
error "mkdir $DIR/$tdir/2 succeeds after deactive MDT"
+ $LFS mkdir -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir ||
+ error "mkdir $DIR/$tdir/striped_dir fails after deactive MDT2"
+
+ local stripe_count=$($LFS getdirstripe -c $DIR/$tdir/striped_dir)
+ [ $stripe_count -eq $((MDSCOUNT - 1)) ] ||
+ error "wrong $stripe_count != $((MDSCOUNT -1)) for striped_dir"
+
# cleanup
umount_client $MOUNT || error "Unable to umount client"
stop_mds
sync
echo checking size of lov_objid for ost index $i
- LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}')
+ LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" |
+ grep ^User | awk -F 'Size: ' '{print $2}')
if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then
error "lov_objid size has to be $(lov_objid_size $i), not $LOV_OBJID_SIZE"
else
return
fi
- local inode_slab=$(do_facet $SINGLEMDS \
- "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo")
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) load_module ldiskfs
+ local inode_slab=$(do_facet $SINGLEMDS "cat /proc/slabinfo" |
+ awk '/ldiskfs_inode_cache/ { print $5 / $6 }')
if [ -z "$inode_slab" ]; then
skip "ldiskfs module has not been loaded"
return
fi
- echo "$inode_slab ldisk inodes per page"
- if [ "$inode_slab" -ge "3" ] ; then
- # If kmalloc-128 is also 1 per page - this is a debug kernel
- # and so this is not an error.
- local kmalloc128=$(do_facet $SINGLEMDS \
- "awk '/^(kmalloc|size)-128 / { print \\\$5 }' /proc/slabinfo")
- # 32 128-byte chunks in 4k
- [ "$kmalloc128" -eq "32" ] ||
- error "ldisk inode size is too big, $inode_slab objs per page"
- fi
+ echo "$inode_slab ldiskfs inodes per page"
+ [ "${inode_slab%.*}" -ge "3" ] && return 0
- return
+ # If kmalloc-128 is also 1 per page - this is a debug kernel
+ # and so this is not an error.
+ local kmalloc128=$(do_facet $SINGLEMDS "cat /proc/slabinfo" |
+ awk '/^(kmalloc|size)-128 / { print $5 / $6 }')
+ # 32 128-byte chunks in 4k
+ [ "${kmalloc128%.*}" -lt "32" ] ||
+ error "ldiskfs inode too big, only $inode_slab objs/page, " \
+ "kmalloc128 = $kmalloc128 objs/page"
}
-run_test 63 "Verify each page can at least hold 3 ldisk inodes"
+run_test 63 "Verify each page can at least hold 3 ldiskfs inodes"
test_64() {
start_mds || error "unable to start MDS"
umount_client $MOUNT || error "umount client failed"
+ if ! combined_mgs_mds; then
+ start_mgs || error "start mgs failed"
+ fi
+
start_mdt 1 || error "MDT start failed"
start_ost || error "Unable to start OST1"
local ostdev=$(ostdevname 1)
local cmd="$E2FSCK -fnvd $mdsdev"
local fn=3
+ local add_options
[ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
skip "ldiskfs only test" && return
+ if combined_mgs_mds; then
+ add_options='--reformat'
+ else
+ add_options='--reformat --replace'
+ fi
+
#tune MDT with "-O extents"
for num in $(seq $MDSCOUNT); do
add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \
- --reformat $(mdsdevname $num) $(mdsvdevname $num) ||
+ $add_options $(mdsdevname $num) $(mdsvdevname $num) ||
error "add mds $num failed"
do_facet mds${num} "$TUNE2FS -O extents $(mdsdevname $num)" ||
error "$TUNE2FS failed on mds${num}"
done
- add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev ||
+ add ost1 $(mkfs_opts ost1 $ostdev) $add_options $ostdev ||
error "add $ostdev failed"
- start_mgsmds || error "start mds failed"
+ start_mds || error "start mds failed"
start_ost || error "start ost failed"
mount_client $MOUNT || error "mount client failed"
add mds1 $opts_mds || error "add mds1 failed for new params"
add ost1 $opts_ost || error "add ost1 failed for new params"
+ if ! combined_mgs_mds; then
+ stop_mgs || error "stop mgs failed"
+ fi
+ reformat
return 0
}
run_test 75 "The order of --index should be irrelevant"
test_76a() {
[[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] ||
{ skip "Need MDS version at least 2.4.52" && return 0; }
+
+ if ! combined_mgs_mds; then
+ start_mgs || error "start mgs failed"
+ fi
setup
local MDMB_PARAM="osc.*.max_dirty_mb"
echo "Change MGS params"
local mgsnid
local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)"
+ combined_mgs_mds || stop_mgs || error "stopping MGS service failed"
+
add fs2mds $(mkfs_opts mds1 $fs2mdsdev) --mgs --fsname=$fsname \
--reformat $fs2mdsdev $fs2mdsvdev || error "add fs2mds failed"
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT ||
error "start fs2mds failed"
mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,)
- mgsnid="$mgsnid,$mgsnid:$mgsnid"
+ mgsnid="0.0.0.0@tcp,$mgsnid,$mgsnid:$mgsnid"
- add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \
+ add fs2ost --mgsnode=$mgsnid $(mkfs_opts ost1 $fs2ostdev) \
--failnode=$failnid --fsname=$fsname \
--reformat $fs2ostdev $fs2ostvdev ||
error "add fs2ost failed"
wait_update $HOSTNAME "$LCTL get_param -n lov.$FSNAME-*.pools.$TESTNAME|
sort -u | tr '\n' ' ' " "$ost_targets_uuid" ||
error "wait_update $ost_pool failed"
- [[ -z $(list_pool $ost_pool) ]] &&
- error "list OST pool $ost_pool failed"
+ wait_update_facet $SINGLEMDS "$LCTL pool_list $ost_pool | wc -l" 4 ||
+ error "wait_update pool_list $ost_pool failed"
# If [--pool|-p <pool_name>] is set with [--ost-list|-o <ost_indices>],
# then the OSTs must be the members of the pool.
test_83() {
[[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] ||
{ skip "Need OST version at least 2.6.91" && return 0; }
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- skip "Only applicable to ldiskfs-based MDTs"
+ if [ $(facet_fstype ost1) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based OSTs"
return
fi
# Mount the OST as an ldiskfs filesystem.
log "mount the OST $dev as a $fstype filesystem"
add ost1 $(mkfs_opts ost1 $dev) $FSTYPE_OPT \
- --reformat $dev $dev > /dev/null ||
+ --reformat $dev > /dev/null ||
error "format ost1 error"
if ! test -b $dev; then
run_test 86 "Replacing mkfs.lustre -G option"
test_87() { #LU-6544
- [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.7.56) ]] ||
- { skip "Need MDS version at least 2.7.56" && return; }
+ [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.9.51) ]] ||
+ { skip "Need MDS version at least 2.9.51" && return; }
[[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
{ skip "Only applicable to ldiskfs-based MDTs" && return; }
- [[ $OSTCOUNT -gt 69 ]] &&
+ [[ $OSTCOUNT -gt 59 ]] &&
{ skip "Ignore wide striping situation" && return; }
local mdsdev=$(mdsdevname 1)
local file=$DIR/$tfile
local mntpt=$(facet_mntpt $SINGLEMDS)
local used_xattr_blk=0
- local inode_size=${1:-512}
+ local inode_size=${1:-1024}
local left_size=0
local xtest="trusted.test"
local value
local orig
local i
+ local stripe_cnt=$(($OSTCOUNT + 2))
- #Please see LU-6544 for MDT inode size calculation
- if [ $OSTCOUNT -gt 26 ]; then
+ #Please see ldiskfs_make_lustre() for MDT inode size calculation
+ if [ $stripe_cnt -gt 16 ]; then
inode_size=2048
- elif [ $OSTCOUNT -gt 5 ]; then
- inode_size=1024
fi
left_size=$(expr $inode_size - \
156 - \
32 - \
- 32 - $OSTCOUNT \* 24 - 16 - 3 - \
+ 32 - 40 \* 3 - 32 \* 3 - $stripe_cnt \* 24 - 16 - 3 - \
24 - 16 - 3 - \
24 - 18 - $(expr length $tfile) - 16 - 4)
if [ $left_size -le 0 ]; then
unload_modules
reformat
- add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$OSTCOUNT \
+ add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$stripe_cnt \
--reformat $mdsdev $mdsvdev || error "add mds1 failed"
start_mdt 1 > /dev/null || error "start mdt1 failed"
for i in $(seq $OSTCOUNT); do
check_mount || error "check client $MOUNT failed"
#set xattr
- $SETSTRIPE -c -1 $file || error "$SETSTRIPE -c -1 $file failed"
- $GETSTRIPE $file || error "$GETSTRIPE $file failed"
- i=$($GETSTRIPE -c $file)
+ $SETSTRIPE -E 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file ||
+ error "Create file with 3 components failed"
+ i=$($GETSTRIPE -I 3 -c $file)
if [ $i -ne $OSTCOUNT ]; then
left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24)
echo -n "Since only $i out $OSTCOUNT OSTs are used, "
}
run_test 88 "check the default mount options can be overridden"
+test_89() { # LU-7131
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.54) ]] ||
+ { skip "Need MDT version at least 2.9.54" && return 0; }
+
+ local key=failover.node
+ local val1=192.0.2.254@tcp0 # Reserved IPs, see RFC 5735
+ local val2=192.0.2.255@tcp0
+ local mdsdev=$(mdsdevname 1)
+ local params
+
+ stopall
+
+ [ $(facet_fstype mds1) == zfs ] && import_zpool mds1
+ # Check that parameters are added correctly
+ echo "tunefs --param $key=$val1"
+ do_facet mds "$TUNEFS --param $key=$val1 $mdsdev >/dev/null" ||
+ error "tunefs --param $key=$val1 failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ $(echo $params | tr ' ' '\n' | grep -c $key=$val1) = "1" ] ||
+ error "on-disk parameter not added correctly via tunefs"
+
+ # Check that parameters replace existing instances when added
+ echo "tunefs --param $key=$val2"
+ do_facet mds "$TUNEFS --param $key=$val2 $mdsdev >/dev/null" ||
+ error "tunefs --param $key=$val2 failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ $(echo $params | tr ' ' '\n' | grep -c $key=) = "1" ] ||
+ error "on-disk parameter not replaced via tunefs"
+ [ $(echo $params | tr ' ' '\n' | grep -c $key=$val2) = "1" ] ||
+ error "on-disk parameter not replaced correctly via tunefs"
+
+ # Check that a parameter is erased properly
+ echo "tunefs --erase-param $key"
+ do_facet mds "$TUNEFS --erase-param $key $mdsdev >/dev/null" ||
+ error "tunefs --erase-param $key failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ $(echo $params | tr ' ' '\n' | grep -c $key=) = "0" ] ||
+ error "on-disk parameter not erased correctly via tunefs"
+
+ # Check that all the parameters are erased
+ echo "tunefs --erase-params"
+ do_facet mds "$TUNEFS --erase-params $mdsdev >/dev/null" ||
+ error "tunefs --erase-params failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ -z $params ] ||
+ error "all on-disk parameters not erased correctly via tunefs"
+
+ # Check the order of options --erase-params and --param
+ echo "tunefs --param $key=$val1 --erase-params"
+ do_facet mds \
+ "$TUNEFS --param $key=$val1 --erase-params $mdsdev >/dev/null"||
+ error "tunefs --param $key=$val1 --erase-params failed"
+ params=$(do_facet mds $TUNEFS --dryrun $mdsdev) ||
+ error "tunefs --dryrun failed"
+ params=${params##*Parameters:}
+ params=${params%%exiting*}
+ [ $(echo $params | tr ' ' '\n') == "$key=$val1" ] ||
+ error "on-disk param not added correctly with --erase-params"
+
+ reformat
+}
+run_test 89 "check tunefs --param and --erase-param{s} options"
+
# $1 test directory
# $2 (optional) value of max_mod_rpcs_in_flight to set
check_max_mod_rpcs_in_flight() {
}
run_test 101 "Race MDT->OST reconnection with create"
+test_102() {
+ cleanup || error "cleanup failed with $?"
+
+ local mds1dev=$(mdsdevname 1)
+ local mds1mnt=$(facet_mntpt mds1)
+ local mds1fstype=$(facet_fstype mds1)
+ local mds1opts=$MDS_MOUNT_OPTS
+
+ if [ $mds1fstype == ldiskfs ] &&
+ ! do_facet mds1 test -b $mds1dev; then
+ mds1opts=$(csa_add "$mds1opts" -o loop)
+ fi
+ if [[ $mds1fstype == zfs ]]; then
+ import_zpool mds1 || return ${PIPESTATUS[0]}
+ fi
+
+ # unload all and only load libcfs to allow fail_loc setting
+ do_facet mds1 lustre_rmmod || error "unable to unload modules"
+ do_facet mds1 modprobe libcfs || error "libcfs not loaded"
+ do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded"
+
+ #define OBD_FAIL_OBDCLASS_MODULE_LOAD 0x60a
+ do_facet mds1 "$LCTL set_param fail_loc=0x8000060a"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts &&
+ error "mdt start must fail"
+ do_facet mds1 lsmod \| grep obdclass && error "obdclass must not load"
+
+ do_facet mds1 "$LCTL set_param fail_loc=0x0"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts ||
+ error "mdt start must not fail"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 102 "obdclass module cleanup upon error"
+
+test_renamefs() {
+ local newname=$1
+
+ echo "rename $FSNAME to $newname"
+
+ if [ ! combined_mgs_mds ]; then
+ local facet=$(mgsdevname)
+
+ do_facet mgs \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(7) Fail to rename MGS"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool mgs $newname-mgs
+ fi
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ local facet=$(mdsdevname $num)
+
+ do_facet mds${num} \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(8) Fail to rename MDT $num"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool mds${num} $newname-mdt${num}
+ fi
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ local facet=$(ostdevname $num)
+
+ do_facet ost${num} \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(9) Fail to rename OST $num"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool ost${num} $newname-ost${num}
+ fi
+ done
+}
+
+test_103_set_pool() {
+ local pname=$1
+ local ost_x=$2
+
+ do_facet mgs $LCTL pool_add $FSNAME.$pname ${FSNAME}-$ost_x ||
+ error "Fail to add $ost_x to $FSNAME.$pname"
+ wait_update $HOSTNAME \
+ "lctl get_param -n lov.$FSNAME-clilov-*.pools.$pname |
+ grep $ost_x" "$FSNAME-${ost_x}_UUID" ||
+ error "$ost_x is NOT in pool $FSNAME.$pname"
+}
+
+test_103_check_pool() {
+ local save_fsname=$1
+ local errno=$2
+
+ stat $DIR/$tdir/test-framework.sh ||
+ error "($errno) Fail to stat"
+ do_facet mgs $LCTL pool_list $FSNAME.pool1 ||
+ error "($errno) Fail to list $FSNAME.pool1"
+ do_facet mgs $LCTL pool_list $FSNAME.$save_fsname ||
+ error "($errno) Fail to list $FSNAME.$save_fsname"
+ do_facet mgs $LCTL pool_list $FSNAME.$save_fsname |
+ grep ${FSNAME}-OST0000 ||
+ error "($errno) List $FSNAME.$save_fsname is invalid"
+
+ local pname=$($LFS getstripe --pool $DIR/$tdir/d0)
+ [ "$pname" = "$save_fsname" ] ||
+ error "($errno) Unexpected pool name $pname"
+}
+
+test_103() {
+ check_mount_and_prep
+ rm -rf $DIR/$tdir
+ mkdir $DIR/$tdir || error "(1) Fail to mkdir $DIR/$tdir"
+ cp $LUSTRE/tests/test-framework.sh $DIR/$tdir ||
+ error "(2) Fail to copy test-framework.sh"
+
+ do_facet mgs $LCTL pool_new $FSNAME.pool1 ||
+ error "(3) Fail to create $FSNAME.pool1"
+ # name the pool name as the fsname
+ do_facet mgs $LCTL pool_new $FSNAME.$FSNAME ||
+ error "(4) Fail to create $FSNAME.$FSNAME"
+
+ test_103_set_pool $FSNAME OST0000
+
+ $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 ||
+ error "(6) Fail to setstripe on $DIR/$tdir/d0"
+
+ KEEP_ZPOOL=true
+ stopall
+
+ test_renamefs mylustre
+
+ local save_fsname=$FSNAME
+ FSNAME="mylustre"
+ setupall
+
+ test_103_check_pool $save_fsname 7
+
+ if [ $OSTCOUNT -ge 2 ]; then
+ test_103_set_pool $save_fsname OST0001
+ fi
+
+ $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 ||
+ error "(16) Fail to setstripe on $DIR/$tdir/f0"
+
+ stopall
+
+ test_renamefs tfs
+
+ FSNAME="tfs"
+ setupall
+
+ test_103_check_pool $save_fsname 17
+
+ stopall
+
+ test_renamefs $save_fsname
+
+ FSNAME=$save_fsname
+ setupall
+ KEEP_ZPOOL=false
+}
+run_test 103 "rename filesystem name"
+
+test_104() { # LU-6952
+ local mds_mountopts=$MDS_MOUNT_OPTS
+ local ost_mountopts=$OST_MOUNT_OPTS
+ local mds_mountfsopts=$MDS_MOUNT_FS_OPTS
+ local lctl_ver=$(do_facet $SINGLEMDS $LCTL --version |
+ awk '{ print $2 }')
+
+ [[ $(version_code $lctl_ver) -lt $(version_code 2.9.55) ]] &&
+ { skip "this test needs utils above 2.9.55" && return 0; }
+
+ # specify "acl" in mount options used by mkfs.lustre
+ if [ -z "$MDS_MOUNT_FS_OPTS" ]; then
+ MDS_MOUNT_FS_OPTS="acl,user_xattr"
+ else
+
+ MDS_MOUNT_FS_OPTS="${MDS_MOUNT_FS_OPTS},acl,user_xattr"
+ fi
+
+ echo "mountfsopt: $MDS_MOUNT_FS_OPTS"
+
+ #reformat/remount the MDT to apply the MDT_MOUNT_FS_OPT options
+ formatall
+ if [ -z "$MDS_MOUNT_OPTS" ]; then
+ MDS_MOUNT_OPTS="-o noacl"
+ else
+ MDS_MOUNT_OPTS="${MDS_MOUNT_OPTS},noacl"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ start mds$num $(mdsdevname $num) $MDS_MOUNT_OPTS ||
+ error "Failed to start MDS"
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ start ost$num $(ostdevname $num) $OST_MOUNT_OPTS ||
+ error "Failed to start OST"
+ done
+
+ mount_client $MOUNT
+ setfacl -m "d:$RUNAS_ID:rwx" $MOUNT &&
+ error "ACL is applied when FS is mounted with noacl."
+
+ MDS_MOUNT_OPTS=$mds_mountopts
+ OST_MOUNT_OPTS=$ost_mountopts
+ MDS_MOUNT_FS_OPTS=$mds_mountfsopts
+
+ formatall
+ setupall
+}
+run_test 104 "Make sure user defined options are reflected in mount"
+
if ! combined_mgs_mds ; then
stop mgs
fi