+cleanup_86() {
+ trap 0
+
+ # ost1 has already registered to the MGS before the reformat.
+ # So after reformatting it with option "-G", it could not be
+ # mounted to the MGS. Cleanup the system for subsequent tests.
+ reformat_and_config
+}
+
+test_86() {
+ local server_version=$(lustre_version_code $SINGLEMDS)
+ [ "$(facet_fstype ost1)" = "zfs" ] &&
+ skip "LU-6442: no such mkfs params for ZFS OSTs" && return
+ [[ $server_version -ge $(version_code 2.7.56) ]] ||
+ { skip "Need server version newer than 2.7.55"; return 0; }
+
+ local OST_OPTS="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+
+ local NEWSIZE=1024
+ local OLDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" |
+ awk '/Flex block group size: / { print $NF; exit; }')
+
+ local opts=OST_OPTS
+ if [[ ${!opts} != *mkfsoptions* ]]; then
+ eval opts=\"${!opts} \
+ --mkfsoptions='\\\"-O flex_bg -G $NEWSIZE\\\"'\"
+ else
+ val=${!opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O flex_bg -G $NEWSIZE }
+ eval opts='${val}'
+ fi
+
+ echo "params: $opts"
+
+ trap cleanup_86 EXIT ERR
+
+ stopall
+ add ost1 $opts || error "add ost1 failed with new params"
+
+ local FOUNDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" |
+ awk '/Flex block group size: / { print $NF; exit; }')
+
+ [[ $FOUNDSIZE == $NEWSIZE ]] ||
+ error "Flex block group size: $FOUNDSIZE, expected: $NEWSIZE"
+
+ cleanup_86
+}
+run_test 86 "Replacing mkfs.lustre -G option"
+
+test_87() { #LU-6544
+ [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.7.56) ]] ||
+ { skip "Need MDS version at least 2.7.56" && return; }
+ [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
+ { skip "Only applicable to ldiskfs-based MDTs" && return; }
+ [[ $OSTCOUNT -gt 69 ]] &&
+ { skip "Ignore wide striping situation" && return; }
+
+ local mdsdev=$(mdsdevname 1)
+ local mdsvdev=$(mdsvdevname 1)
+ local file=$DIR/$tfile
+ local mntpt=$(facet_mntpt $SINGLEMDS)
+ local used_xattr_blk=0
+ local inode_size=${1:-512}
+ local left_size=0
+ local xtest="trusted.test"
+ local value
+ local orig
+ local i
+
+ #Please see LU-6544 for MDT inode size calculation
+ if [ $OSTCOUNT -gt 26 ]; then
+ inode_size=2048
+ elif [ $OSTCOUNT -gt 5 ]; then
+ inode_size=1024
+ fi
+ left_size=$(expr $inode_size - \
+ 156 - \
+ 32 - \
+ 32 - $OSTCOUNT \* 24 - 16 - 3 - \
+ 24 - 16 - 3 - \
+ 24 - 18 - $(expr length $tfile) - 16 - 4)
+ if [ $left_size -le 0 ]; then
+ echo "No space($left_size) is expected in inode."
+ echo "Try 1-byte xattr instead to verify this."
+ left_size=1
+ else
+ echo "Estimate: at most $left_size-byte space left in inode."
+ fi
+
+ unload_modules
+ reformat
+
+ add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$OSTCOUNT \
+ --reformat $mdsdev $mdsvdev || error "add mds1 failed"
+ start_mdt 1 > /dev/null || error "start mdt1 failed"
+ for i in $(seq $OSTCOUNT); do
+ start ost$i $(ostdevname $i) $OST_MOUNT_OPTS > /dev/null ||
+ error "start ost$i failed"
+ done
+ mount_client $MOUNT > /dev/null || error "mount client $MOUNT failed"
+ check_mount || error "check client $MOUNT failed"
+
+ #set xattr
+ $SETSTRIPE -c -1 $file || error "$SETSTRIPE -c -1 $file failed"
+ $GETSTRIPE $file || error "$GETSTRIPE $file failed"
+ i=$($GETSTRIPE -c $file)
+ if [ $i -ne $OSTCOUNT ]; then
+ left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24)
+ echo -n "Since only $i out $OSTCOUNT OSTs are used, "
+ echo -n "the expected left space is changed to "
+ echo "$left_size bytes at most."
+ fi
+ value=$(generate_string $left_size)
+ setfattr -n $xtest -v $value $file
+ orig=$(get_xattr_value $xtest $file)
+ [[ "$orig" != "$value" ]] && error "$xtest changed"
+
+ #Verify if inode has some expected space left
+ umount $MOUNT > /dev/null || error "umount $MOUNT failed"
+ stop_mdt 1 > /dev/null || error "stop mdt1 failed"
+ mount_ldiskfs $SINGLEMDS || error "mount -t ldiskfs $SINGLEMDS failed"
+
+ do_facet $SINGLEMDS ls -sal $mntpt/ROOT/$tfile
+ used_xattr_blk=$(do_facet $SINGLEMDS ls -s $mntpt/ROOT/$tfile |
+ awk '{ print $1 }')
+ [[ $used_xattr_blk -eq 0 ]] &&
+ error "Please check MDS inode size calculation: \
+ more than $left_size-byte space left in inode."
+ echo "Verified: at most $left_size-byte space left in inode."
+
+ stopall
+}
+run_test 87 "check if MDT inode can hold EAs with N stripes properly"
+
+test_88() {
+ [ "$(facet_fstype mds1)" == "zfs" ] &&
+ skip "LU-6662: no implementation for ZFS" && return
+
+ load_modules
+
+ add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) \
+ --reformat $(mdsdevname 1) || error "add mds1 failed"
+
+ do_facet mds1 "$TUNEFS $(mdsdevname 1) |
+ grep -e \".*opts:.*errors=remount-ro.*\"" ||
+ error "default mount options is missing"
+
+ add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) \
+ --mountfsoptions="user_xattr,errors=panic" \
+ --reformat $(mdsdevname 1) || error "add mds1 failed"
+
+ do_facet mds1 "$TUNEFS $(mdsdevname 1) |
+ grep -e \".*opts:.*errors=panic.*\"" ||
+ error "user can't override default mount options"
+}
+run_test 88 "check the default mount options can be overridden"
+
+# $1 test directory
+# $2 (optional) value of max_mod_rpcs_in_flight to set
+check_max_mod_rpcs_in_flight() {
+ local dir="$1"
+ local mmr="$2"
+ local idx
+ local facet
+ local tmp
+ local i
+
+ idx=$(printf "%04x" $($LFS getdirstripe -i $dir))
+ facet="mds$((0x$idx + 1))"
+
+ if [ -z "$mmr" ]; then
+ # get value of max_mod_rcps_in_flight
+ mmr=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight) ||
+ error "Unable to get max_mod_rpcs_in_flight"
+ echo "max_mod_rcps_in_flight is $mmr"
+ else
+ # set value of max_mod_rpcs_in_flight
+ $LCTL set_param \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight=$mmr ||
+ error "Unable to set max_mod_rpcs_in_flight to $mmr"
+ echo "max_mod_rpcs_in_flight set to $mmr"
+ fi
+
+ # create mmr+1 files
+ echo "creating $((mmr + 1)) files ..."
+ umask 0022
+ for i in $(seq $((mmr + 1))); do
+ touch $dir/file-$i
+ done
+
+ ### part 1 ###
+
+ # consumes mmr-1 modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $((mmr - 1)) chmod in parallel ..."
+ for i in $(seq $((mmr - 1))); do
+ chmod 0600 $dir/file-$i &
+ done
+ sleep 1
+
+ # send one additional modify RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional chmod in parallel ..."
+ chmod 0600 $dir/file-$mmr &
+ sleep 1
+
+ # check this additional modify RPC get a modify RPC slot
+ # and succeed its operation
+ checkstat -vp 0600 $dir/file-$mmr ||
+ error "Unable to send $mmr modify RPCs in parallel"
+ wait
+
+ ### part 2 ###
+
+ # consumes mmr modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $mmr chmod in parallel ..."
+ for i in $(seq $mmr); do
+ chmod 0666 $dir/file-$i &
+ done
+ sleep 1
+
+ # send one additional modify RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional chmod in parallel ..."
+ chmod 0666 $dir/file-$((mmr + 1)) &
+ sleep 1
+
+ # check this additional modify RPC blocked getting a modify RPC slot
+ checkstat -vp 0644 $dir/file-$((mmr + 1)) ||
+ error "Unexpectedly send $(($mmr + 1)) modify RPCs in parallel"
+ wait
+}
+
+test_90a() {
+ reformat
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ # check default value
+ $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ check_max_mod_rpcs_in_flight $DIR/$tdir
+
+ cleanup
+}
+run_test 90a "check max_mod_rpcs_in_flight is enforced"
+
+test_90b() {
+ local idx
+ local facet
+ local tmp
+ local mmrpc
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ ### test 1.
+ # update max_mod_rpcs_in_flight
+ $LFS mkdir -c1 $DIR/${tdir}1 || error "mkdir $DIR/${tdir}1 failed"
+ check_max_mod_rpcs_in_flight $DIR/${tdir}1 1
+
+ ### test 2.
+ # check client is able to send multiple modify RPCs in paralell
+ tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import |
+ grep -c "multi_mod_rpcs")
+ if [ "$tmp" -ne $MDSCOUNT ]; then
+ echo "Client not able to send multiple modify RPCs in parallel"
+ cleanup
+ return
+ fi
+
+ # update max_mod_rpcs_in_flight
+ $LFS mkdir -c1 $DIR/${tdir}2 || error "mkdir $DIR/${tdir}2 failed"
+ check_max_mod_rpcs_in_flight $DIR/${tdir}2 5
+
+ ### test 3.
+ $LFS mkdir -c1 $DIR/${tdir}3 || error "mkdir $DIR/${tdir}3 failed"
+ idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/${tdir}3))
+ facet="mds$((0x$idx + 1))"
+
+ # save MDT max_mod_rpcs_per_client
+ mmrpc=$(do_facet $facet \
+ cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
+
+ # update max_mod_rpcs_in_flight
+ umount_client $MOUNT
+ do_facet $facet \
+ "echo 16 > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+ $LCTL set_param mdc.$FSNAME-MDT$idx-mdc-*.max_rpcs_in_flight=17
+ check_max_mod_rpcs_in_flight $DIR/${tdir}3 16
+
+ # restore MDT max_mod_rpcs_per_client initial value
+ do_facet $facet \
+ "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+
+ rm -rf $DIR/${tdir}?
+ cleanup
+}
+run_test 90b "check max_mod_rpcs_in_flight is enforced after update"
+
+test_90c() {
+ local tmp
+ local mrif
+ local mmrpc
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ # check client is able to send multiple modify RPCs in paralell
+ tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import |
+ grep -c "multi_mod_rpcs")
+ if [ "$tmp" -ne $MDSCOUNT ]; then
+ skip "Client not able to send multiple modify RPCs in parallel"
+ cleanup
+ return
+ fi
+
+ # get max_rpcs_in_flight value
+ mrif=$($LCTL get_param -n mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight)
+ echo "max_rpcs_in_flight is $mrif"
+
+ # get MDT max_mod_rpcs_per_client
+ mmrpc=$(do_facet mds1 \
+ cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
+ echo "max_mod_rpcs_per_client is $mmrpc"
+
+ # testcase 1
+ # attempt to set max_mod_rpcs_in_flight to max_rpcs_in_flight value
+ # prerequisite: set max_mod_rpcs_per_client to max_rpcs_in_flight value
+ umount_client $MOUNT
+ do_facet mds1 \
+ "echo $mrif > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$mrif &&
+ error "set max_mod_rpcs_in_flight to $mrif should fail"
+
+ umount_client $MOUNT
+ do_facet mds1 \
+ "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+
+ # testcase 2
+ # attempt to set max_mod_rpcs_in_flight to max_mod_rpcs_per_client+1
+ # prerequisite: set max_rpcs_in_flight to max_mod_rpcs_per_client+2
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight=$((mmrpc + 2))
+
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$((mmrpc + 1)) &&
+ error "set max_mod_rpcs_in_flight to $((mmrpc + 1)) should fail"
+
+ cleanup
+}
+run_test 90c "check max_mod_rpcs_in_flight update limits"
+
+test_90d() {
+ local idx
+ local facet
+ local mmr
+ local i
+ local pid
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
+ facet="mds$((0x$idx + 1))"
+
+ # check client version supports multislots
+ tmp=$($LCTL get_param -N \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
+ if [ -z "$tmp" ]; then
+ skip "Client does not support multiple modify RPCs in flight"
+ cleanup
+ return
+ fi
+
+ # get current value of max_mod_rcps_in_flight
+ mmr=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
+ echo "max_mod_rcps_in_flight is $mmr"
+
+ # create mmr files
+ echo "creating $mmr files ..."
+ umask 0022
+ for i in $(seq $mmr); do
+ touch $DIR/$tdir/file-$i
+ done
+
+ # prepare for close RPC
+ multiop_bg_pause $DIR/$tdir/file-close O_c
+ pid=$!
+
+ # consumes mmr modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $mmr chmod in parallel ..."
+ for i in $(seq $mmr); do
+ chmod 0600 $DIR/$tdir/file-$i &
+ done
+
+ # send one additional close RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional close in parallel ..."
+ kill -USR1 $pid
+ cancel_lru_locks mdc
+ sleep 1
+
+ # check this additional close RPC get a modify RPC slot
+ # and multiop process completed
+ [ -d /proc/$pid ] &&
+ error "Unable to send the additional close RPC in parallel"
+ wait
+ rm -rf $DIR/$tdir
+ cleanup
+}
+run_test 90d "check one close RPC is allowed above max_mod_rpcs_in_flight"
+
+check_uuid_on_ost() {
+ local nid=$1
+ do_facet ost1 "$LCTL get_param obdfilter.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+check_uuid_on_mdt() {
+ local nid=$1
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+test_91() {
+ local uuid
+ local nid
+ local found
+
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.7.63) ]] ||
+ { skip "Need OST version at least 2.7.63" && return 0; }
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.63) ]] ||
+ { skip "Need MDT version at least 2.7.63" && return 0; }
+
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ if remote_mds; then
+ nid=$($LCTL list_nids | head -1 | sed "s/\./\\\./g")
+ else
+ nid="0@lo"
+ fi
+ uuid=$(get_client_uuid $MOUNT)
+
+ echo "list nids on mdt:"
+ do_facet $SINGLEMDS "$LCTL list_param mdt.${FSNAME}*.exports.*"
+ echo "uuid from $nid:"
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on OST"
+
+ # umount the client so it won't reconnect
+ manual_umount_client --force || error "failed to umount $?"
+ # shouldn't disappear on MDS after forced umount
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid"
+
+ echo "evict $nid"
+ do_facet $SINGLEMDS \
+ "$LCTL set_param -n mdt.${mds1_svc}.evict_client nid:$nid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ # check it didn't reconnect (being umounted)
+ sleep $((TIMEOUT+1))
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ cleanup
+}
+run_test 91 "evict-by-nid support"
+
+generate_ldev_conf() {
+ # generate an ldev.conf file
+ local ldevconfpath=$1
+ local fstype=
+ local fsldevformat=""
+ touch $ldevconfpath
+
+ fstype=$(facet_fstype mgs)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t-\t%s-MGS0000\t%s%s\n" \
+ $mgs_HOST \
+ $FSNAME \
+ $fsldevformat \
+ $(mgsdevname) > $ldevconfpath
+
+ local mdsfo_host=$mdsfailover_HOST;
+ if [ -z "$mdsfo_host" ]; then
+ mdsfo_host="-"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ fstype=$(facet_fstype mds$num)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t%s\t%s-MDT%04d\t%s%s\n" \
+ $mds_HOST \
+ $mdsfo_host \
+ $FSNAME \
+ $num \
+ $fsldevformat \
+ $(mdsdevname $num) >> $ldevconfpath
+ done
+
+ local ostfo_host=$ostfailover_HOST;
+ if [ -z "$ostfo_host" ]; then
+ ostfo_host="-"
+ fi
+
+ for num in $(seq $OSTCOUNT); do
+ fstype=$(facet_fstype ost$num)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t%s\t%s-OST%04d\t%s%s\n" \
+ $ost_HOST \
+ $ostfo_host \
+ $FSNAME \
+ $num \
+ $fsldevformat \
+ $(ostdevname $num) >> $ldevconfpath
+ done
+
+ echo "----- $ldevconfpath -----"
+ cat $ldevconfpath
+ echo "--- END $ldevconfpath ---"
+
+}
+
+generate_nids() {
+ # generate a nids file (mapping between hostname to nid)
+ # looks like we only have the MGS nid available to us
+ # so just echo that to a file
+ local nidspath=$1
+ echo -e "${mgs_HOST}\t${MGSNID}" > $nidspath
+
+ echo "----- $nidspath -----"
+ cat $nidspath
+ echo "--- END $nidspath ---"
+}
+
+compare_ldev_output() {
+ ldev_output=$1
+ expected_output=$2
+
+ sort $expected_output -o $expected_output
+ sort $ldev_output -o $ldev_output
+
+ echo "-- START OF LDEV OUTPUT --"
+ cat $ldev_output
+ echo "--- END OF LDEV OUTPUT ---"
+
+ echo "-- START OF EXPECTED OUTPUT --"
+ cat $expected_output
+ echo "--- END OF EXPECTED OUTPUT ---"
+
+ diff $expected_output $ldev_output
+ return $?
+}
+
+test_92() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ echo "Host is $(hostname)"
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ # echo the mgs nid and compare it to environment variable MGSNID
+ # also, ldev.conf and nids is a server side thing, use the OSS
+ # hostname
+ local output
+ output=$($LDEV -c $LDEVCONFPATH -H $ost_HOST -n $NIDSPATH echo %m)
+
+ echo "-- START OF LDEV OUTPUT --"
+ echo -e "$output"
+ echo "--- END OF LDEV OUTPUT ---"
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed to execute!"
+ fi
+
+ # need to process multiple lines because of combined MGS and MDS
+ echo -e $output | awk '{ print $2 }' | while read -r line ; do
+ if [ "$line" != "$MGSNID" ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed mgs nid '$line', expected '$MGSNID'"
+ fi
+ done
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 92 "ldev returns MGS NID correctly in command substitution"
+
+test_93() {
+ [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return
+
+ reformat
+ #start mgs or mgs/mdt0
+ if ! combined_mgs_mds ; then
+ start_mgs
+ start_mdt 1
+ else
+ start_mdt 1
+ fi
+
+ start_ost || error "OST0 start fail"
+
+ #define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e
+ do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num &
+ done
+
+ mount_client $MOUNT || error "mount client fails"
+ wait_osc_import_state mds ost FULL
+ wait_osc_import_state client ost FULL
+ check_mount || error "check_mount failed"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 93 "register mulitple MDT at the same time"
+
+test_94() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected.txt
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct hostlist!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 94 "ldev outputs correct labels for file system name query"
+
+test_95() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ # SUCCESS CASES
+ # file sys filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F failed!"
+ fi
+
+ # local filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -l &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -l failed!"
+ fi
+
+ # foreign filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f failed!"
+ fi
+
+ # all filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a failed!"
+ fi
+
+ # FAILURE CASES
+ # all & file sys
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -F $FSNAME &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -F incorrectly succeeded"
+ fi
+
+ # all & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -f incorrectly succeeded"
+ fi
+
+ # all & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -l incorrectly succeeded"
+ fi
+
+ # foreign & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f and -l incorrectly succeeded"
+ fi
+
+ # file sys & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -l incorrectly succeeded"
+ fi
+
+ # file sys & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -f incorrectly succeeded"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 95 "ldev should only allow one label filter"
+
+test_96() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -H $mgs_HOST \
+ echo %H-%b | \
+ awk '{print $2}' > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo "$mgs_HOST-$(facet_fstype mgs)" > $EXPECTED_OUTPUT
+
+ if [ "$mgs_HOST" == "$mds_HOST" ]; then
+ for num in $(seq $MDSCOUNT); do
+ echo "$mds_HOST-$(facet_fstype mds$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ if [ "$mgs_HOST" == "$ost_HOST" ]; then
+ for num in $(seq $OSTCOUNT); do
+ echo "$ost_HOST-$(facet_fstype ost$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 96 "ldev returns hostname and backend fs correctly in command sub"
+
+test_97() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo -e "\nMDT role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mdt > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute for mdt role!"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mdt role!"
+ fi
+
+ echo -e "\nOST role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R ost > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for ost role!"
+ fi
+
+ rm $EXPECTED_OUTPUT
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for ost role!"
+ fi
+
+ echo -e "\nMGS role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mgs > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for mgs role!"
+ fi
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mgs role!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 97 "ldev returns correct ouput when querying based on role"
+
+test_98()
+{
+ local mountopt
+ local temp=$MDS_MOUNT_OPTS
+
+ setup
+ check_mount || error "mount failed"
+ mountopt="user_xattr"
+ for ((x = 1; x <= 400; x++)); do
+ mountopt="$mountopt,user_xattr"
+ done
+ remount_client $mountopt $MOUNT 2>&1 | grep "too long" ||
+ error "Buffer overflow check failed"
+ cleanup || error "cleanup failed"
+}
+run_test 98 "Buffer-overflow check while parsing mount_opts"
+
+test_99()
+{
+ [[ $(facet_fstype ost1) != ldiskfs ]] &&
+ { skip "Only applicable to ldiskfs-based OSTs" && return; }
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] ||
+ { skip "Need OST version at least 2.8.57" && return 0; }
+
+ local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+ do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" &&
+ skip "meta_bg already set" && return
+
+ local opts=ost_opts
+ if [[ ${!opts} != *mkfsoptions* ]]; then
+ eval opts=\"${!opts} \
+ --mkfsoptions='\\\"-O ^resize_inode,meta_bg\\\"'\"
+ else
+ local val=${!opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O ^resize_inode,meta_bg }
+ eval opts='${val}'
+ fi
+
+ echo "params: $opts"
+
+ add ost1 $opts || error "add ost1 failed with new params"
+
+ do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" ||
+ error "meta_bg is not set"
+
+ return 0
+}
+run_test 99 "Adding meta_bg option"
+
+test_100() {
+ reformat
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ # Desired output
+ # MGS:
+ # 0@lo
+ # lustre-MDT0000:
+ # 0@lo
+ # lustre-OST0000:
+ # 0@lo
+ do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0}
+ END {exit rc}' || error "lshowmount have no output MGS"
+
+ do_facet mds1 'lshowmount -v' | awk 'BEGIN {NR == 2; rc=1} /-MDT0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output MDT0"
+
+ do_facet ost1 'lshowmount -v' | awk 'BEGIN {NR == 4; rc=1} /-OST0000:/
+ {rc=0} END {exit rc}' || error "lshowmount have no output OST0"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo"
+
+test_101() {
+ local createmany_oid
+ local dev=$FSNAME-OST0000-osc-MDT0000
+ setup
+
+ createmany -o $DIR1/$tfile-%d 50000 &
+ createmany_oid=$!
+ # MDT->OST reconnection causes MDT<->OST last_id synchornisation
+ # via osp_precreate_cleanup_orphans.
+ for ((i = 0; i < 100; i++)); do
+ for ((k = 0; k < 10; k++)); do
+ do_facet $SINGLEMDS "$LCTL --device $dev deactivate;" \
+ "$LCTL --device $dev activate"
+ done
+
+ ls -asl $MOUNT | grep '???' &&
+ (kill -9 $createmany_oid &>/dev/null; \
+ error "File hasn't object on OST")
+
+ kill -s 0 $createmany_oid || break
+ done
+ wait $createmany_oid
+ cleanup
+}
+run_test 101 "Race MDT->OST reconnection with create"
+
+test_102() {
+ cleanup || error "cleanup failed with $?"
+
+ local mds1dev=$(mdsdevname 1)
+ local mds1mnt=$(facet_mntpt mds1)
+ local mds1fstype=$(facet_fstype mds1)
+ local mds1opts=$MDS_MOUNT_OPTS
+
+ if [ $mds1fstype == ldiskfs ] &&
+ ! do_facet mds1 test -b $mds1dev; then
+ mds1opts=$(csa_add "$mds1opts" -o loop)
+ fi
+ if [[ $mds1fstype == zfs ]]; then
+ import_zpool mds1 || return ${PIPESTATUS[0]}
+ fi
+
+ # unload all and only load libcfs to allow fail_loc setting
+ do_facet mds1 lustre_rmmod || error "unable to unload modules"
+ do_facet mds1 modprobe libcfs || error "libcfs not loaded"
+ do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded"
+
+ #define OBD_FAIL_OBDCLASS_MODULE_LOAD 0x60a
+ do_facet mds1 "$LCTL set_param fail_loc=0x8000060a"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts &&
+ error "mdt start must fail"
+ do_facet mds1 lsmod \| grep obdclass && error "obdclass must not load"
+
+ do_facet mds1 "$LCTL set_param fail_loc=0x0"
+
+ do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts ||
+ error "mdt start must not fail"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 102 "obdclass module cleanup upon error"
+