+test_88() {
+ [ "$(facet_fstype mds1)" == "zfs" ] &&
+ skip "LU-6662: no implementation for ZFS" && return
+
+ load_modules
+
+ add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) \
+ --reformat $(mdsdevname 1) || error "add mds1 failed"
+
+ do_facet mds1 "$TUNEFS $(mdsdevname 1) |
+ grep -e \".*opts:.*errors=remount-ro.*\"" ||
+ error "default mount options is missing"
+
+ add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) \
+ --mountfsoptions="user_xattr,errors=panic" \
+ --reformat $(mdsdevname 1) || error "add mds1 failed"
+
+ do_facet mds1 "$TUNEFS $(mdsdevname 1) |
+ grep -e \".*opts:.*errors=panic.*\"" ||
+ error "user can't override default mount options"
+}
+run_test 88 "check the default mount options can be overridden"
+
+# $1 test directory
+# $2 (optional) value of max_mod_rpcs_in_flight to set
+check_max_mod_rpcs_in_flight() {
+ local dir="$1"
+ local mmr="$2"
+ local idx
+ local facet
+ local tmp
+ local i
+
+ idx=$(printf "%04x" $($LFS getdirstripe -i $dir))
+ facet="mds$((0x$idx + 1))"
+
+ if [ -z "$mmr" ]; then
+ # get value of max_mod_rcps_in_flight
+ mmr=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight) ||
+ error "Unable to get max_mod_rpcs_in_flight"
+ echo "max_mod_rcps_in_flight is $mmr"
+ else
+ # set value of max_mod_rpcs_in_flight
+ $LCTL set_param \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight=$mmr ||
+ error "Unable to set max_mod_rpcs_in_flight to $mmr"
+ echo "max_mod_rpcs_in_flight set to $mmr"
+ fi
+
+ # create mmr+1 files
+ echo "creating $((mmr + 1)) files ..."
+ umask 0022
+ for i in $(seq $((mmr + 1))); do
+ touch $dir/file-$i
+ done
+
+ ### part 1 ###
+
+ # consumes mmr-1 modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $((mmr - 1)) chmod in parallel ..."
+ for i in $(seq $((mmr - 1))); do
+ chmod 0600 $dir/file-$i &
+ done
+ sleep 1
+
+ # send one additional modify RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional chmod in parallel ..."
+ chmod 0600 $dir/file-$mmr &
+ sleep 1
+
+ # check this additional modify RPC get a modify RPC slot
+ # and succeed its operation
+ checkstat -vp 0600 $dir/file-$mmr ||
+ error "Unable to send $mmr modify RPCs in parallel"
+ wait
+
+ ### part 2 ###
+
+ # consumes mmr modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $mmr chmod in parallel ..."
+ for i in $(seq $mmr); do
+ chmod 0666 $dir/file-$i &
+ done
+ sleep 1
+
+ # send one additional modify RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional chmod in parallel ..."
+ chmod 0666 $dir/file-$((mmr + 1)) &
+ sleep 1
+
+ # check this additional modify RPC blocked getting a modify RPC slot
+ checkstat -vp 0644 $dir/file-$((mmr + 1)) ||
+ error "Unexpectedly send $(($mmr + 1)) modify RPCs in parallel"
+ wait
+}
+
+test_90a() {
+ reformat
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ # check default value
+ $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ check_max_mod_rpcs_in_flight $DIR/$tdir
+
+ cleanup
+}
+run_test 90a "check max_mod_rpcs_in_flight is enforced"
+
+test_90b() {
+ local idx
+ local facet
+ local tmp
+ local mmrpc
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ ### test 1.
+ # update max_mod_rpcs_in_flight
+ $LFS mkdir -c1 $DIR/${tdir}1 || error "mkdir $DIR/${tdir}1 failed"
+ check_max_mod_rpcs_in_flight $DIR/${tdir}1 1
+
+ ### test 2.
+ # check client is able to send multiple modify RPCs in paralell
+ tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import |
+ grep -c "multi_mod_rpcs")
+ if [ "$tmp" -ne $MDSCOUNT ]; then
+ echo "Client not able to send multiple modify RPCs in parallel"
+ cleanup
+ return
+ fi
+
+ # update max_mod_rpcs_in_flight
+ $LFS mkdir -c1 $DIR/${tdir}2 || error "mkdir $DIR/${tdir}2 failed"
+ check_max_mod_rpcs_in_flight $DIR/${tdir}2 5
+
+ ### test 3.
+ $LFS mkdir -c1 $DIR/${tdir}3 || error "mkdir $DIR/${tdir}3 failed"
+ idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/${tdir}3))
+ facet="mds$((0x$idx + 1))"
+
+ # save MDT max_mod_rpcs_per_client
+ mmrpc=$(do_facet $facet \
+ cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
+
+ # update max_mod_rpcs_in_flight
+ umount_client $MOUNT
+ do_facet $facet \
+ "echo 16 > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+ $LCTL set_param mdc.$FSNAME-MDT$idx-mdc-*.max_rpcs_in_flight=17
+ check_max_mod_rpcs_in_flight $DIR/${tdir}3 16
+
+ # restore MDT max_mod_rpcs_per_client initial value
+ do_facet $facet \
+ "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+
+ rm -rf $DIR/${tdir}?
+ cleanup
+}
+run_test 90b "check max_mod_rpcs_in_flight is enforced after update"
+
+test_90c() {
+ local tmp
+ local mrif
+ local mmrpc
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ # check client is able to send multiple modify RPCs in paralell
+ tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import |
+ grep -c "multi_mod_rpcs")
+ if [ "$tmp" -ne $MDSCOUNT ]; then
+ skip "Client not able to send multiple modify RPCs in parallel"
+ cleanup
+ return
+ fi
+
+ # get max_rpcs_in_flight value
+ mrif=$($LCTL get_param -n mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight)
+ echo "max_rpcs_in_flight is $mrif"
+
+ # get MDT max_mod_rpcs_per_client
+ mmrpc=$(do_facet mds1 \
+ cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
+ echo "max_mod_rpcs_per_client is $mmrpc"
+
+ # testcase 1
+ # attempt to set max_mod_rpcs_in_flight to max_rpcs_in_flight value
+ # prerequisite: set max_mod_rpcs_per_client to max_rpcs_in_flight value
+ umount_client $MOUNT
+ do_facet mds1 \
+ "echo $mrif > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$mrif &&
+ error "set max_mod_rpcs_in_flight to $mrif should fail"
+
+ umount_client $MOUNT
+ do_facet mds1 \
+ "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+
+ # testcase 2
+ # attempt to set max_mod_rpcs_in_flight to max_mod_rpcs_per_client+1
+ # prerequisite: set max_rpcs_in_flight to max_mod_rpcs_per_client+2
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight=$((mmrpc + 2))
+
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$((mmrpc + 1)) &&
+ error "set max_mod_rpcs_in_flight to $((mmrpc + 1)) should fail"
+
+ cleanup
+}
+run_test 90c "check max_mod_rpcs_in_flight update limits"
+
+test_90d() {
+ local idx
+ local facet
+ local mmr
+ local i
+ local pid
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
+ facet="mds$((0x$idx + 1))"
+
+ # check client version supports multislots
+ tmp=$($LCTL get_param -N \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
+ if [ -z "$tmp" ]; then
+ skip "Client does not support multiple modify RPCs in flight"
+ cleanup
+ return
+ fi
+
+ # get current value of max_mod_rcps_in_flight
+ mmr=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
+ echo "max_mod_rcps_in_flight is $mmr"
+
+ # create mmr files
+ echo "creating $mmr files ..."
+ umask 0022
+ for i in $(seq $mmr); do
+ touch $DIR/$tdir/file-$i
+ done
+
+ # prepare for close RPC
+ multiop_bg_pause $DIR/$tdir/file-close O_c
+ pid=$!
+
+ # consumes mmr modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $mmr chmod in parallel ..."
+ for i in $(seq $mmr); do
+ chmod 0600 $DIR/$tdir/file-$i &
+ done
+
+ # send one additional close RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional close in parallel ..."
+ kill -USR1 $pid
+ cancel_lru_locks mdc
+ sleep 1
+
+ # check this additional close RPC get a modify RPC slot
+ # and multiop process completed
+ [ -d /proc/$pid ] &&
+ error "Unable to send the additional close RPC in parallel"
+ wait
+ rm -rf $DIR/$tdir
+ cleanup
+}
+run_test 90d "check one close RPC is allowed above max_mod_rpcs_in_flight"
+
+check_uuid_on_ost() {
+ local nid=$1
+ do_facet ost1 "$LCTL get_param obdfilter.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+check_uuid_on_mdt() {
+ local nid=$1
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+test_91() {
+ local uuid
+ local nid
+ local found
+
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.7.63) ]] ||
+ { skip "Need OST version at least 2.7.63" && return 0; }
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.63) ]] ||
+ { skip "Need MDT version at least 2.7.63" && return 0; }
+
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ if remote_mds; then
+ nid=$($LCTL list_nids | head -1 | sed "s/\./\\\./g")
+ else
+ nid="0@lo"
+ fi
+ uuid=$(get_client_uuid $MOUNT)
+
+ echo "list nids on mdt:"
+ do_facet $SINGLEMDS "$LCTL list_param mdt.${FSNAME}*.exports.*"
+ echo "uuid from $nid:"
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on OST"
+
+ # umount the client so it won't reconnect
+ manual_umount_client --force || error "failed to umount $?"
+ # shouldn't disappear on MDS after forced umount
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid"
+
+ echo "evict $nid"
+ do_facet $SINGLEMDS \
+ "$LCTL set_param -n mdt.${mds1_svc}.evict_client nid:$nid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ # check it didn't reconnect (being umounted)
+ sleep $((TIMEOUT+1))
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ cleanup
+}
+run_test 91 "evict-by-nid support"
+
+generate_ldev_conf() {
+ # generate an ldev.conf file
+ local ldevconfpath=$1
+ local fstype=
+ local fsldevformat=""
+ touch $ldevconfpath
+
+ fstype=$(facet_fstype mgs)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t-\t%s-MGS0000\t%s%s\n" \
+ $mgs_HOST \
+ $FSNAME \
+ $fsldevformat \
+ $(mgsdevname) > $ldevconfpath
+
+ local mdsfo_host=$mdsfailover_HOST;
+ if [ -z "$mdsfo_host" ]; then
+ mdsfo_host="-"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ fstype=$(facet_fstype mds$num)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t%s\t%s-MDT%04d\t%s%s\n" \
+ $mds_HOST \
+ $mdsfo_host \
+ $FSNAME \
+ $num \
+ $fsldevformat \
+ $(mdsdevname $num) >> $ldevconfpath
+ done
+
+ local ostfo_host=$ostfailover_HOST;
+ if [ -z "$ostfo_host" ]; then
+ ostfo_host="-"
+ fi
+
+ for num in $(seq $OSTCOUNT); do
+ fstype=$(facet_fstype ost$num)
+ if [ "$fstype" == "zfs" ]; then
+ fsldevformat="$fstype:"
+ else
+ fsldevformat=""
+ fi
+
+ printf "%s\t%s\t%s-OST%04d\t%s%s\n" \
+ $ost_HOST \
+ $ostfo_host \
+ $FSNAME \
+ $num \
+ $fsldevformat \
+ $(ostdevname $num) >> $ldevconfpath
+ done
+
+ echo "----- $ldevconfpath -----"
+ cat $ldevconfpath
+ echo "--- END $ldevconfpath ---"
+
+}
+
+generate_nids() {
+ # generate a nids file (mapping between hostname to nid)
+ # looks like we only have the MGS nid available to us
+ # so just echo that to a file
+ local nidspath=$1
+ echo -e "${mgs_HOST}\t${MGSNID}" > $nidspath
+
+ echo "----- $nidspath -----"
+ cat $nidspath
+ echo "--- END $nidspath ---"
+}
+
+compare_ldev_output() {
+ ldev_output=$1
+ expected_output=$2
+
+ sort $expected_output -o $expected_output
+ sort $ldev_output -o $ldev_output
+
+ echo "-- START OF LDEV OUTPUT --"
+ cat $ldev_output
+ echo "--- END OF LDEV OUTPUT ---"
+
+ echo "-- START OF EXPECTED OUTPUT --"
+ cat $expected_output
+ echo "--- END OF EXPECTED OUTPUT ---"
+
+ diff $expected_output $ldev_output
+ return $?
+}
+
+test_92() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ echo "Host is $(hostname)"
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ # echo the mgs nid and compare it to environment variable MGSNID
+ # also, ldev.conf and nids is a server side thing, use the OSS
+ # hostname
+ local output
+ output=$($LDEV -c $LDEVCONFPATH -H $ost_HOST -n $NIDSPATH echo %m)
+
+ echo "-- START OF LDEV OUTPUT --"
+ echo -e "$output"
+ echo "--- END OF LDEV OUTPUT ---"
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed to execute!"
+ fi
+
+ # need to process multiple lines because of combined MGS and MDS
+ echo -e $output | awk '{ print $2 }' | while read -r line ; do
+ if [ "$line" != "$MGSNID" ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev failed mgs nid '$line', expected '$MGSNID'"
+ fi
+ done
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 92 "ldev returns MGS NID correctly in command substitution"
+
+test_93() {
+ [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return
+
+ reformat
+ #start mgs or mgs/mdt0
+ if ! combined_mgs_mds ; then
+ start_mgs
+ start_mdt 1
+ else
+ start_mdt 1
+ fi
+
+ start_ost || error "OST0 start fail"
+
+ #define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e
+ do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e"
+ for num in $(seq 2 $MDSCOUNT); do
+ start_mdt $num &
+ done
+
+ mount_client $MOUNT || error "mount client fails"
+ wait_osc_import_state mds ost FULL
+ wait_osc_import_state client ost FULL
+ check_mount || error "check_mount failed"
+
+ cleanup || error "cleanup failed with $?"
+}
+run_test 93 "register mulitple MDT at the same time"
+
+test_94() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected.txt
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct hostlist!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 94 "ldev outputs correct labels for file system name query"
+
+test_95() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ # SUCCESS CASES
+ # file sys filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F failed!"
+ fi
+
+ # local filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -l &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -l failed!"
+ fi
+
+ # foreign filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f failed!"
+ fi
+
+ # all filter
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a &>/dev/null
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a failed!"
+ fi
+
+ # FAILURE CASES
+ # all & file sys
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -F $FSNAME &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -F incorrectly succeeded"
+ fi
+
+ # all & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -f incorrectly succeeded"
+ fi
+
+ # all & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -a -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -a and -l incorrectly succeeded"
+ fi
+
+ # foreign & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -f -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -f and -l incorrectly succeeded"
+ fi
+
+ # file sys & local
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -l &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -l incorrectly succeeded"
+ fi
+
+ # file sys & foreign
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -f &>/dev/null
+ if [ $? -eq 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH
+ error "ldev label filtering w/ -F and -f incorrectly succeeded"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH
+}
+run_test 95 "ldev should only allow one label filter"
+
+test_96() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -H $mgs_HOST \
+ echo %H-%b | \
+ awk '{print $2}' > $LDEV_OUTPUT
+
+ # ldev failed, error
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute!"
+ fi
+
+ # expected output
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo "$mgs_HOST-$(facet_fstype mgs)" > $EXPECTED_OUTPUT
+
+ if [ "$mgs_HOST" == "$mds_HOST" ]; then
+ for num in $(seq $MDSCOUNT); do
+ echo "$mds_HOST-$(facet_fstype mds$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ if [ "$mgs_HOST" == "$ost_HOST" ]; then
+ for num in $(seq $OSTCOUNT); do
+ echo "$ost_HOST-$(facet_fstype ost$num)" \
+ >> $EXPECTED_OUTPUT
+ done
+ fi
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 96 "ldev returns hostname and backend fs correctly in command sub"
+
+test_97() {
+ if [ -z "$LDEV" ]; then
+ error "ldev is missing!"
+ fi
+
+ local LDEVCONFPATH=$TMP/ldev.conf
+ local NIDSPATH=$TMP/nids
+
+ generate_ldev_conf $LDEVCONFPATH
+ generate_nids $NIDSPATH
+
+ local LDEV_OUTPUT=$TMP/ldev-output.txt
+ local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt
+
+ echo -e "\nMDT role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mdt > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT
+ error "ldev failed to execute for mdt role!"
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mdt role!"
+ fi
+
+ echo -e "\nOST role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R ost > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for ost role!"
+ fi
+
+ rm $EXPECTED_OUTPUT
+ for num in $(seq $OSTCOUNT); do
+ printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT
+ done
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for ost role!"
+ fi
+
+ echo -e "\nMGS role"
+ $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mgs > $LDEV_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT
+ error "ldev failed to execute for mgs role!"
+ fi
+
+ printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT
+
+ compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT
+
+ if [ $? -ne 0 ]; then
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+ error "ldev failed to produce the correct output for mgs role!"
+ fi
+
+ rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT
+}
+run_test 97 "ldev returns correct ouput when querying based on role"