+check_uuid_on_ost() {
+ local nid=$1
+ do_facet ost1 "$LCTL get_param obdfilter.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+check_uuid_on_mdt() {
+ local nid=$1
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+test_91() {
+ local uuid
+ local nid
+ local found
+
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.7.63) ]] ||
+ { skip "Need OST version at least 2.7.63" && return 0; }
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.63) ]] ||
+ { skip "Need MDT version at least 2.7.63" && return 0; }
+
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ if remote_mds; then
+ nid=$($LCTL list_nids | head -1 | sed "s/\./\\\./g")
+ else
+ nid="0@lo"
+ fi
+ uuid=$(get_client_uuid $MOUNT)
+
+ echo "list nids on mdt:"
+ do_facet $SINGLEMDS "$LCTL list_param mdt.${FSNAME}*.exports.*"
+ echo "uuid from $nid:"
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on OST"
+
+ # umount the client so it won't reconnect
+ manual_umount_client --force || error "failed to umount $?"
+ # shouldn't disappear on MDS after forced umount
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid"
+
+ echo "evict $nid"
+ do_facet $SINGLEMDS \
+ "$LCTL set_param -n mdt.${mds1_svc}.evict_client nid:$nid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ # check it didn't reconnect (being umounted)
+ sleep $((TIMEOUT+1))
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ cleanup
+}
+run_test 91 "evict-by-nid support"
+
+generate_ldev_conf() {
+ # generate an ldev.conf file
+ local ldevconfpath=$1
+ local fstype=
+ local fsldevformat=""
+ touch $ldevconfpath
+
+ fstype=$(facet_fstype mgs)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t-\t%s-MGS0000\t%s%s\n" \
+ $mgs_HOST \
+ $FSNAME \
+ $fsldevformat \
+ $(mgsdevname) > $ldevconfpath
+
+ local mdsfo_host=$mdsfailover_HOST;
+ if [ -z "$mdsfo_host" ]; then
+ mdsfo_host="-"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ fstype=$(facet_fstype mds$num)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t%s\t%s-MDT%04d\t%s%s\n" \
+ $mds_HOST \
+ $mdsfo_host \
+ $FSNAME \
+ $num \
+ $fsldevformat \
+ $(mdsdevname $num) >> $ldevconfpath
+ done
+
+ local ostfo_host=$ostfailover_HOST;
+ if [ -z "$ostfo_host" ]; then
+ ostfo_host="-"
+ fi
+
+ for num in $(seq $OSTCOUNT); do
+ fstype=$(facet_fstype ost$num)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t%s\t%s-OST%04d\t%s%s\n" \
+ $ost_HOST \
+ $ostfo_host \
+ $FSNAME \
+ $num \
+ $fsldevformat \
+ $(ostdevname $num) >> $ldevconfpath
+ done
+
+ echo "----- $ldevconfpath -----"
+ cat $ldevconfpath
+ echo "--- END $ldevconfpath ---"
+
+}
+
+generate_nids() {
+ # generate a nids file (mapping between hostname to nid)
+ # looks like we only have the MGS nid available to us
+ # so just echo that to a file
+ local nidspath=$1
+ echo -e "${mgs_HOST}\t${MGSNID}" > $nidspath
+
+ echo "----- $nidspath -----"
+ cat $nidspath
+ echo "--- END $nidspath ---"
+}
+
+compare_ldev_output() {
+ ldev_output=$1
+ expected_output=$2
+
+ sort $expected_output -o $expected_output
+ sort $ldev_output -o $ldev_output
+
+ echo "-- START OF LDEV OUTPUT --"
+ cat $ldev_output
+ echo "--- END OF LDEV OUTPUT ---"
+
+ echo "-- START OF EXPECTED OUTPUT --"
+ cat $expected_output
+ echo "--- END OF EXPECTED OUTPUT ---"
+
+ diff $expected_output $ldev_output
+ return $?
+}
+
+test_92() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ echo "Host is $(hostname)"
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ # echo the mgs nid and compare it to environment variable MGSNID
+ # also, ldev.conf and nids is a server side thing, use the OSS
+ # hostname
+ local output
+ output=$($LDEV -c $LDEVCONFPATH -H $ost_HOST -n $NIDSPATH echo %m)
+
+ echo "-- START OF LDEV OUTPUT --"
+ echo -e "$output"
+ echo "--- END OF LDEV OUTPUT ---"
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed to execute!"
+ fi
+
+ # need to process multiple lines because of combined MGS and MDS
+ echo -e $output | awk '{ print $2 }' | while read -r line ; do
+ if [ "$line" != "$MGSNID" ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed mgs nid '$line', expected '$MGSNID'"
+ fi
+ done
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 92 "ldev returns MGS NID correctly in command substitution"
+
+test_93() {
+ [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return
+
+ reformat
+ #start mgs or mgs/mdt0
+ if ! combined_mgs_mds ; then
+ start_mgs
+ start_mdt 1
+ else
+ start_mdt 1
+ fi
+
+ start_ost || error "OST0 start fail"
+
+ #define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e
+ do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num &
+ done
+
+ mount_client $MOUNT || error "mount client fails"
+ wait_osc_import_state mds ost FULL
+ wait_osc_import_state client ost FULL
+ check_mount || error "check_mount failed"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 93 "register mulitple MDT at the same time"
+
+test_94() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected.txt
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct hostlist!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 94 "ldev outputs correct labels for file system name query"
+
+test_95() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ # SUCCESS CASES
+ # file sys filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F failed!"
+ fi
+
+ # local filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -l &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -l failed!"
+ fi
+
+ # foreign filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f failed!"
+ fi
+
+ # all filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a failed!"
+ fi
+
+ # FAILURE CASES
+ # all & file sys
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -F $FSNAME &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -F incorrectly succeeded"
+ fi
+
+ # all & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -f incorrectly succeeded"
+ fi
+
+ # all & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -l incorrectly succeeded"
+ fi
+
+ # foreign & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f and -l incorrectly succeeded"
+ fi
+
+ # file sys & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -l incorrectly succeeded"
+ fi
+
+ # file sys & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -f incorrectly succeeded"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 95 "ldev should only allow one label filter"
+
+test_96() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -H $mgs_HOST \
+ echo %H-%b | \
+ awk '{print $2}' > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo "$mgs_HOST-$(facet_fstype mgs)" > $EXPECTED_OUTPUT
+
+ if [ "$mgs_HOST" == "$mds_HOST" ]; then
+ for num in $(seq $MDSCOUNT); do
+ echo "$mds_HOST-$(facet_fstype mds$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ if [ "$mgs_HOST" == "$ost_HOST" ]; then
+ for num in $(seq $OSTCOUNT); do
+ echo "$ost_HOST-$(facet_fstype ost$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 96 "ldev returns hostname and backend fs correctly in command sub"
+
+test_97() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo -e "\nMDT role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mdt > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute for mdt role!"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mdt role!"
+ fi
+
+ echo -e "\nOST role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R ost > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for ost role!"
+ fi
+
+ rm $EXPECTED_OUTPUT
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for ost role!"
+ fi
+
+ echo -e "\nMGS role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mgs > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for mgs role!"
+ fi
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mgs role!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 97 "ldev returns correct ouput when querying based on role"
+
+test_98()
+{
+ local mountopt
+ local temp=$MDS_MOUNT_OPTS
+
+ setup
+ check_mount || error "mount failed"
+ mountopt="user_xattr"
+ for ((x = 1; x <= 400; x++)); do
+ mountopt="$mountopt,user_xattr"
+ done
+ remount_client $mountopt $MOUNT 2>&1 | grep "too long" ||
+ error "Buffer overflow check failed"
+ cleanup || error "cleanup failed"
+}
+run_test 98 "Buffer-overflow check while parsing mount_opts"
+
+test_99()
+{
+ [[ $(facet_fstype ost1) != ldiskfs ]] &&
+ { skip "Only applicable to ldiskfs-based OSTs" && return; }
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] ||
+ { skip "Need OST version at least 2.8.57" && return 0; }
+
+ local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+ do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" &&
+ skip "meta_bg already set" && return
+
+ local opts=ost_opts
+ if [[ ${!opts} != *mkfsoptions* ]]; then
+ eval opts=\"${!opts} \
+ --mkfsoptions='\\\"-O ^resize_inode,meta_bg\\\"'\"
+ else
+ local val=${!opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O ^resize_inode,meta_bg }
+ eval opts='${val}'
+ fi
+
+ echo "params: $opts"
+
+ add ost1 $opts || error "add ost1 failed with new params"
+
+ do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" ||
+ error "meta_bg is not set"
+
+ return 0
+}
+run_test 99 "Adding meta_bg option"
+
+test_100() {
+ reformat
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ # Desired output
+ # MGS:
+ # 0@lo
+ # lustre-MDT0000:
+ # 0@lo
+ # lustre-OST0000:
+ # 0@lo
+ do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
+ END {exit rc}' || error "lshowmount have no output MGS"
+
+ do_facet mds1 'lshowmount -v' | awk 'BEGIN {NR == 2; rc=1} /-MDT0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output MDT0"
+
+ do_facet ost1 'lshowmount -v' | awk 'BEGIN {NR == 4; rc=1} /-OST0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output OST0"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo"
+
+test_101() {
+ local createmany_oid
+ local dev=$FSNAME-OST0000-osc-MDT0000
+ setup
+
+ createmany -o $DIR1/$tfile-%d 50000 &
+ createmany_oid=$!
+ # MDT->OST reconnection causes MDT<->OST last_id synchornisation
+ # via osp_precreate_cleanup_orphans.
+ for ((i = 0; i < 100; i++)); do
+ for ((k = 0; k < 10; k++)); do
+ do_facet $SINGLEMDS "$LCTL --device $dev deactivate;" \
+ "$LCTL --device $dev activate"
+ done
+
+ ls -asl $MOUNT | grep '???' &&
+ (kill -9 $createmany_oid &>/dev/null; \
+ error "File hasn't object on OST")
+
+ kill -s 0 $createmany_oid || break
+ done
+ wait $createmany_oid
+ cleanup
+}
+run_test 101 "Race MDT->OST reconnection with create"
+
+test_102() {
+ cleanup || error "cleanup failed with $?"
+
+ local mds1dev=$(mdsdevname 1)
+ local mds1mnt=$(facet_mntpt mds1)
+ local mds1fstype=$(facet_fstype mds1)
+ local mds1opts=$MDS_MOUNT_OPTS
+
+ if [ $mds1fstype == ldiskfs ] &&
+ ! do_facet mds1 test -b $mds1dev; then
+ mds1opts=$(csa_add "$mds1opts" -o loop)
+ fi
+ if [[ $mds1fstype == zfs ]]; then
+ import_zpool mds1 || return ${PIPESTATUS[0]}
+ fi
+
+ # unload all and only load libcfs to allow fail_loc setting
+ do_facet mds1 lustre_rmmod || error "unable to unload modules"
+ do_facet mds1 modprobe libcfs || error "libcfs not loaded"
+ do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded"
+
+ #define OBD_FAIL_OBDCLASS_MODULE_LOAD 0x60a
+ do_facet mds1 "$LCTL set_param fail_loc=0x8000060a"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts &&
+ error "mdt start must fail"
+ do_facet mds1 lsmod \| grep obdclass && error "obdclass must not load"
+
+ do_facet mds1 "$LCTL set_param fail_loc=0x0"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts ||
+ error "mdt start must not fail"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 102 "obdclass module cleanup upon error"
+
+test_renamefs() {
+ local newname=$1
+
+ echo "rename $FSNAME to $newname"
+
+ if [ ! combined_mgs_mds ]; then
+ local facet=$(mgsdevname)
+
+ do_facet mgs \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(7) Fail to rename MGS"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool mgs $newname-mgs
+ fi
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ local facet=$(mdsdevname $num)
+
+ do_facet mds${num} \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(8) Fail to rename MDT $num"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool mds${num} $newname-mdt${num}
+ fi
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ local facet=$(ostdevname $num)
+
+ do_facet ost${num} \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(9) Fail to rename OST $num"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool ost${num} $newname-ost${num}
+ fi
+ done
+}
+
+test_103_set_pool() {
+ local pname=$1
+ local ost_x=$2
+
+ do_facet mgs $LCTL pool_add $FSNAME.$pname ${FSNAME}-$ost_x ||
+ error "Fail to add $ost_x to $FSNAME.$pname"
+ wait_update $HOSTNAME \
+ "lctl get_param -n lov.$FSNAME-clilov-*.pools.$pname |
+ grep $ost_x" "$FSNAME-${ost_x}_UUID" ||
+ error "$ost_x is NOT in pool $FSNAME.$pname"
+}
+
+test_103_check_pool() {
+ local save_fsname=$1
+ local errno=$2
+
+ stat $DIR/$tdir/test-framework.sh ||
+ error "($errno) Fail to stat"
+ do_facet mgs $LCTL pool_list $FSNAME.pool1 ||
+ error "($errno) Fail to list $FSNAME.pool1"
+ do_facet mgs $LCTL pool_list $FSNAME.$save_fsname ||
+ error "($errno) Fail to list $FSNAME.$save_fsname"
+ do_facet mgs $LCTL pool_list $FSNAME.$save_fsname |
+ grep ${FSNAME}-OST0000 ||
+ error "($errno) List $FSNAME.$save_fsname is invalid"
+
+ local pname=$($LFS getstripe --pool $DIR/$tdir/d0)
+ [ "$pname" = "$save_fsname" ] ||
+ error "($errno) Unexpected pool name $pname"
+}
+
+test_103() {
+ check_mount_and_prep
+ rm -rf $DIR/$tdir
+ mkdir $DIR/$tdir || error "(1) Fail to mkdir $DIR/$tdir"
+ cp $LUSTRE/tests/test-framework.sh $DIR/$tdir ||
+ error "(2) Fail to copy test-framework.sh"
+
+ do_facet mgs $LCTL pool_new $FSNAME.pool1 ||
+ error "(3) Fail to create $FSNAME.pool1"
+ # name the pool name as the fsname
+ do_facet mgs $LCTL pool_new $FSNAME.$FSNAME ||
+ error "(4) Fail to create $FSNAME.$FSNAME"
+
+ test_103_set_pool $FSNAME OST0000
+
+ $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 ||
+ error "(6) Fail to setstripe on $DIR/$tdir/d0"
+
+ KEEP_ZPOOL=true
+ stopall
+
+ test_renamefs mylustre
+
+ local save_fsname=$FSNAME
+ FSNAME="mylustre"
+ setupall
+
+ test_103_check_pool $save_fsname 7
+
+ if [ $OSTCOUNT -ge 2 ]; then
+ test_103_set_pool $save_fsname OST0001
+ fi
+
+ $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 ||
+ error "(16) Fail to setstripe on $DIR/$tdir/f0"
+
+ stopall
+
+ test_renamefs tfs
+
+ FSNAME="tfs"
+ setupall
+
+ test_103_check_pool $save_fsname 17
+
+ stopall
+
+ test_renamefs $save_fsname
+
+ FSNAME=$save_fsname
+ setupall
+ KEEP_ZPOOL=false
+}
+run_test 103 "rename filesystem name"
+
+test_104() { # LU-6952
+ local mds_mountopts=$MDS_MOUNT_OPTS
+ local ost_mountopts=$OST_MOUNT_OPTS
+ local mds_mountfsopts=$MDS_MOUNT_FS_OPTS
+ local lctl_ver=$(do_facet $SINGLEMDS $LCTL --version |
+ awk '{ print $2 }')
+
+ [[ $(version_code $lctl_ver) -lt $(version_code 2.9.55) ]] &&
+ { skip "this test needs utils above 2.9.55" && return 0; }
+
+ # specify "acl" in mount options used by mkfs.lustre
+ if [ -z "$MDS_MOUNT_FS_OPTS" ]; then
+ MDS_MOUNT_FS_OPTS="acl,user_xattr"
+ else
+
+ MDS_MOUNT_FS_OPTS="${MDS_MOUNT_FS_OPTS},acl,user_xattr"
+ fi
+
+ echo "mountfsopt: $MDS_MOUNT_FS_OPTS"
+
+ #reformat/remount the MDT to apply the MDT_MOUNT_FS_OPT options
+ formatall
+ if [ -z "$MDS_MOUNT_OPTS" ]; then
+ MDS_MOUNT_OPTS="-o noacl"
+ else
+ MDS_MOUNT_OPTS="${MDS_MOUNT_OPTS},noacl"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ start mds$num $(mdsdevname $num) $MDS_MOUNT_OPTS ||
+ error "Failed to start MDS"
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ start ost$num $(ostdevname $num) $OST_MOUNT_OPTS ||
+ error "Failed to start OST"
+ done
+
+ mount_client $MOUNT
+ setfacl -m "d:$RUNAS_ID:rwx" $MOUNT &&
+ error "ACL is applied when FS is mounted with noacl."
+
+ MDS_MOUNT_OPTS=$mds_mountopts
+ OST_MOUNT_OPTS=$ost_mountopts
+ MDS_MOUNT_FS_OPTS=$mds_mountfsopts
+
+ formatall
+ setupall
+}
+run_test 104 "Make sure user defined options are reflected in mount"
+