+test_93() {
+ [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return
+
+ reformat
+ #start mgs or mgs/mdt0
+ if ! combined_mgs_mds ; then
+ start_mgs
+ start_mdt 1
+ else
+ start_mdt 1
+ fi
+
+ start_ost || error "OST0 start fail"
+
+ #define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e
+ do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num &
+ done
+
+ mount_client $MOUNT || error "mount client fails"
+ wait_osc_import_state mds ost FULL
+ wait_osc_import_state client ost FULL
+ check_mount || error "check_mount failed"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 93 "register mulitple MDT at the same time"
+
+test_94() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected.txt
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct hostlist!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 94 "ldev outputs correct labels for file system name query"
+
+test_95() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ # SUCCESS CASES
+ # file sys filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F failed!"
+ fi
+
+ # local filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -l &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -l failed!"
+ fi
+
+ # foreign filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f failed!"
+ fi
+
+ # all filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a failed!"
+ fi
+
+ # FAILURE CASES
+ # all & file sys
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -F $FSNAME &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -F incorrectly succeeded"
+ fi
+
+ # all & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -f incorrectly succeeded"
+ fi
+
+ # all & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -l incorrectly succeeded"
+ fi
+
+ # foreign & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f and -l incorrectly succeeded"
+ fi
+
+ # file sys & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -l incorrectly succeeded"
+ fi
+
+ # file sys & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -f incorrectly succeeded"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 95 "ldev should only allow one label filter"
+
+test_96() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -H $mgs_HOST \
+ echo %H-%b | \
+ awk '{print $2}' > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo "$mgs_HOST-$(facet_fstype mgs)" > $EXPECTED_OUTPUT
+
+ if [ "$mgs_HOST" == "$mds_HOST" ]; then
+ for num in $(seq $MDSCOUNT); do
+ echo "$mds_HOST-$(facet_fstype mds$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ if [ "$mgs_HOST" == "$ost_HOST" ]; then
+ for num in $(seq $OSTCOUNT); do
+ echo "$ost_HOST-$(facet_fstype ost$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 96 "ldev returns hostname and backend fs correctly in command sub"
+
+test_97() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo -e "\nMDT role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mdt > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute for mdt role!"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mdt role!"
+ fi
+
+ echo -e "\nOST role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R ost > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for ost role!"
+ fi
+
+ rm $EXPECTED_OUTPUT
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for ost role!"
+ fi
+
+ echo -e "\nMGS role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mgs > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for mgs role!"
+ fi
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mgs role!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 97 "ldev returns correct ouput when querying based on role"
+
+test_98()
+{
+ local mountopt
+ local temp=$MDS_MOUNT_OPTS
+
+ setup
+ check_mount || error "mount failed"
+ mountopt="user_xattr"
+ for ((x = 1; x <= 400; x++)); do
+ mountopt="$mountopt,user_xattr"
+ done
+ remount_client $mountopt $MOUNT 2>&1 | grep "too long" ||
+ error "Buffer overflow check failed"
+ cleanup || error "cleanup failed"
+}
+run_test 98 "Buffer-overflow check while parsing mount_opts"
+
+test_99()
+{
+ [[ $(facet_fstype ost1) != ldiskfs ]] &&
+ { skip "Only applicable to ldiskfs-based OSTs" && return; }
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] ||
+ { skip "Need OST version at least 2.8.57" && return 0; }
+
+ local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+ do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" &&
+ skip "meta_bg already set" && return
+
+ local opts=ost_opts
+ if [[ ${!opts} != *mkfsoptions* ]]; then
+ eval opts=\"${!opts} \
+ --mkfsoptions='\\\"-O ^resize_inode,meta_bg\\\"'\"
+ else
+ local val=${!opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O ^resize_inode,meta_bg }
+ eval opts='${val}'
+ fi
+
+ echo "params: $opts"
+
+ add ost1 $opts || error "add ost1 failed with new params"
+
+ do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" ||
+ error "meta_bg is not set"
+
+ return 0
+}
+run_test 99 "Adding meta_bg option"
+
+test_100() {
+ reformat
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ # Desired output
+ # MGS:
+ # 0@lo
+ # lustre-MDT0000:
+ # 0@lo
+ # lustre-OST0000:
+ # 0@lo
+ do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
+ END {exit rc}' || error "lshowmount have no output MGS"
+
+ do_facet mds1 'lshowmount -v' | awk 'BEGIN {NR == 2; rc=1} /-MDT0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output MDT0"
+
+ do_facet ost1 'lshowmount -v' | awk 'BEGIN {NR == 4; rc=1} /-OST0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output OST0"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo"
+
+test_101() {
+ local createmany_oid
+ local dev=$FSNAME-OST0000-osc-MDT0000
+ setup
+
+ createmany -o $DIR1/$tfile-%d 50000 &
+ createmany_oid=$!
+ # MDT->OST reconnection causes MDT<->OST last_id synchornisation
+ # via osp_precreate_cleanup_orphans.
+ for ((i = 0; i < 100; i++)); do
+ for ((k = 0; k < 10; k++)); do
+ do_facet $SINGLEMDS "$LCTL --device $dev deactivate;" \
+ "$LCTL --device $dev activate"
+ done
+
+ ls -asl $MOUNT | grep '???' &&
+ (kill -9 $createmany_oid &>/dev/null; \
+ error "File hasn't object on OST")
+
+ kill -s 0 $createmany_oid || break
+ done
+ wait $createmany_oid
+ cleanup
+}
+run_test 101 "Race MDT->OST reconnection with create"
+
+test_102() {
+ cleanup || error "cleanup failed with $?"
+
+ local mds1dev=$(mdsdevname 1)
+ local mds1mnt=$(facet_mntpt mds1)
+ local mds1fstype=$(facet_fstype mds1)
+ local mds1opts=$MDS_MOUNT_OPTS
+
+ if [ $mds1fstype == ldiskfs ] &&
+ ! do_facet mds1 test -b $mds1dev; then
+ mds1opts=$(csa_add "$mds1opts" -o loop)
+ fi
+ if [[ $mds1fstype == zfs ]]; then
+ import_zpool mds1 || return ${PIPESTATUS[0]}
+ fi
+
+ # unload all and only load libcfs to allow fail_loc setting
+ do_facet mds1 lustre_rmmod || error "unable to unload modules"
+ do_facet mds1 modprobe libcfs || error "libcfs not loaded"
+ do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded"
+
+ #define OBD_FAIL_OBDCLASS_MODULE_LOAD 0x60a
+ do_facet mds1 "$LCTL set_param fail_loc=0x8000060a"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts &&
+ error "mdt start must fail"
+ do_facet mds1 lsmod \| grep obdclass && error "obdclass must not load"
+
+ do_facet mds1 "$LCTL set_param fail_loc=0x0"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts ||
+ error "mdt start must not fail"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 102 "obdclass module cleanup upon error"
+
+test_renamefs() {
+ local newname=$1
+
+ echo "rename $FSNAME to $newname"
+
+ if [ ! combined_mgs_mds ]; then
+ local facet=$(mgsdevname)
+
+ do_facet mgs \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(7) Fail to rename MGS"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool mgs $newname-mgs
+ fi
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ local facet=$(mdsdevname $num)
+
+ do_facet mds${num} \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(8) Fail to rename MDT $num"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool mds${num} $newname-mdt${num}
+ fi
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ local facet=$(ostdevname $num)
+
+ do_facet ost${num} \
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
+ error "(9) Fail to rename OST $num"
+ if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ reimport_zpool ost${num} $newname-ost${num}
+ fi
+ done
+}
+
+test_103_set_pool() {
+ local pname=$1
+ local ost_x=$2
+
+ do_facet mgs $LCTL pool_add $FSNAME.$pname ${FSNAME}-$ost_x ||
+ error "Fail to add $ost_x to $FSNAME.$pname"
+ wait_update $HOSTNAME \
+ "lctl get_param -n lov.$FSNAME-clilov-*.pools.$pname |
+ grep $ost_x" "$FSNAME-${ost_x}_UUID" ||
+ error "$ost_x is NOT in pool $FSNAME.$pname"
+}
+
+test_103_check_pool() {
+ local save_fsname=$1
+ local errno=$2
+
+ stat $DIR/$tdir/test-framework.sh ||
+ error "($errno) Fail to stat"
+ do_facet mgs $LCTL pool_list $FSNAME.pool1 ||
+ error "($errno) Fail to list $FSNAME.pool1"
+ do_facet mgs $LCTL pool_list $FSNAME.$save_fsname ||
+ error "($errno) Fail to list $FSNAME.$save_fsname"
+ do_facet mgs $LCTL pool_list $FSNAME.$save_fsname |
+ grep ${FSNAME}-OST0000 ||
+ error "($errno) List $FSNAME.$save_fsname is invalid"
+
+ local pname=$($LFS getstripe --pool $DIR/$tdir/d0)
+ [ "$pname" = "$save_fsname" ] ||
+ error "($errno) Unexpected pool name $pname"
+}
+
+test_103() {
+ check_mount_and_prep
+ rm -rf $DIR/$tdir
+ mkdir $DIR/$tdir || error "(1) Fail to mkdir $DIR/$tdir"
+ cp $LUSTRE/tests/test-framework.sh $DIR/$tdir ||
+ error "(2) Fail to copy test-framework.sh"
+
+ do_facet mgs $LCTL pool_new $FSNAME.pool1 ||
+ error "(3) Fail to create $FSNAME.pool1"
+ # name the pool name as the fsname
+ do_facet mgs $LCTL pool_new $FSNAME.$FSNAME ||
+ error "(4) Fail to create $FSNAME.$FSNAME"
+
+ test_103_set_pool $FSNAME OST0000
+
+ $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 ||
+ error "(6) Fail to setstripe on $DIR/$tdir/d0"
+
+ KEEP_ZPOOL=true
+ stopall
+
+ test_renamefs mylustre
+
+ local save_fsname=$FSNAME
+ FSNAME="mylustre"
+ setupall
+
+ test_103_check_pool $save_fsname 7
+
+ if [ $OSTCOUNT -ge 2 ]; then
+ test_103_set_pool $save_fsname OST0001
+ fi
+
+ $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 ||
+ error "(16) Fail to setstripe on $DIR/$tdir/f0"
+
+ stopall
+
+ test_renamefs tfs
+
+ FSNAME="tfs"
+ setupall
+
+ test_103_check_pool $save_fsname 17
+
+ stopall
+
+ test_renamefs $save_fsname
+
+ FSNAME=$save_fsname
+ setupall
+ KEEP_ZPOOL=false
+}
+run_test 103 "rename filesystem name"
+