ONLY=${ONLY:-"$*"}
-# bug number for skipped test:
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
+# bug number for skipped test: LU-7005
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 50i"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
is_sles11() # LU-2181
test_30b() {
setup
+ local orignids=$($LCTL get_param -n \
+ osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
+
+ local orignidcount=$(echo "$orignids" | wc -w)
+
# Make a fake nid. Use the OST nid, and add 20 to the least significant
# numerical part of it. Hopefully that's not already a failover address
# for the server.
- OSTNID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
- ORIGVAL=$(echo $OSTNID | egrep -oi "[0-9]*@")
- NEWVAL=$((($(echo $ORIGVAL | egrep -oi "[0-9]*") + 20) % 256))
- NEW=$(echo $OSTNID | sed "s/$ORIGVAL/$NEWVAL@/")
+ local OSTNID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | \
+ awk '{print $1}')
+ local ORIGVAL=$(echo $OSTNID | egrep -oi "[0-9]*@")
+ local NEWVAL=$((($(echo $ORIGVAL | egrep -oi "[0-9]*") + 20) % 256))
+ local NEW=$(echo $OSTNID | sed "s/$ORIGVAL/$NEWVAL@/")
echo "Using fake nid $NEW"
- TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
+ local TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
set_conf_param_and_check client "$TEST" \
"$FSNAME-OST0000.failover.node" $NEW ||
error "didn't add failover nid $NEW"
- NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
+ local NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids)
echo $NIDS
- # The NIDS value is the failover nid strings and "[" and "]". So
- # we need to subtract the space taken by the delimiters. This has
- # changed from earlier version of Lustre but this test is run only
- # locally so this change will not break interop. See LU-3386
- NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 3))
- echo "should have 2 failover nids: $NIDCOUNT"
- [ $NIDCOUNT -eq 2 ] || error "Failover nid not added"
+ local NIDCOUNT=$(echo "$NIDS" | wc -w)
+ echo "should have $((orignidcount + 1)) entries \
+ in failover nids string, have $NIDCOUNT"
+ [ $NIDCOUNT -eq $((orignidcount + 1)) ] ||
+ error "Failover nid not added"
+
do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" ||
error "conf_param delete failed"
umount_client $MOUNT
NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import |
grep failover_nids)
echo $NIDS
- NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 3))
- echo "only 1 final nid should remain: $NIDCOUNT"
- [ $NIDCOUNT -eq 1 ] || error "Failover nids not removed"
+ NIDCOUNT=$(echo "$NIDS" | wc -w)
+ echo "only $orignidcount final entries should remain \
+ in failover nids string, have $NIDCOUNT"
+ [ $NIDCOUNT -eq $orignidcount ] || error "Failover nids not removed"
cleanup || error "cleanup failed with rc $?"
}
local fstype=$(facet_fstype $SINGLEMDS)
local mdt_dev=$tmp/mdt
local ost_dev=$tmp/ost
+ local dir
trap 'trap - RETURN; t32_test_cleanup' RETURN
$LFS setdirstripe -D -c2 $tmp/mnt/lustre/remote_dir
+ $r $LCTL set_param -n \
+ mdt.${fsname}*.enable_remote_dir=1 2>/dev/null
+
pushd $tmp/mnt/lustre
tar -cf - . --exclude=./remote_dir |
tar -xvf - -C remote_dir 1>/dev/null || {
echo "list verification skipped"
fi
+ if [ $(lustre_version_code mds1) -ge $(version_code 2.7.50) -a \
+ $dne_upgrade != "no" ]; then
+ $r $LCTL set_param -n \
+ mdt.${fsname}*.enable_remote_dir=1 2>/dev/null
+
+ echo "test migration"
+ pushd $tmp/mnt/lustre
+ # migrate the files/directories to the remote MDT, then
+ # move it back
+ for dir in $(find ! -name .lustre ! -name . -type d); do
+ mdt_index=$($LFS getdirstripe -i $dir)
+ stripe_cnt=$($LFS getdirstripe -c $dir)
+ if [ $mdt_index = 0 -a $stripe_cnt -le 1 ]; then
+ $LFS mv -M 1 $dir || {
+ popd
+ error_noexit "migrate MDT1 failed"
+ return 1
+ }
+ fi
+ done
+
+ for dir in $(find ! -name . ! -name .lustre -type d); do
+ mdt_index=$($LFS getdirstripe -i $dir)
+ stripe_cnt=$($LFS getdirstripe -c $dir)
+ if [ $mdt_index = 1 -a $stripe_cnt -le 1 ]; then
+ $LFS mv -M 0 $dir || {
+ popd
+ error_noexit "migrate MDT0 failed"
+ return 1
+ }
+ fi
+ done
+ popd
+ fi
+
#
# When adding new data verification tests, please check for
# the presence of the required reference files first, like
# Do not support 1_8 and 2_1 direct upgrade to DNE2 anymore */
echo $tarball | grep "1_8" && continue
echo $tarball | grep "2_1" && continue
+ load_modules
dne_upgrade=yes t32_test $tarball writeconf || rc=$?
done
return $rc
}
run_test 50h "LU-642: activate deactivated OST"
+test_50i() {
+ # prepare MDT/OST, make OSC inactive for OST1
+ [ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return
+
+ [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
+ load_modules
+ do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" ||
+ error "tunefs MDT2 failed"
+ start_mds || error "Unable to start MDT"
+ start_ost || error "Unable to start OST1"
+ start_ost2 || error "Unable to start OST2"
+ mount_client $MOUNT || error "client start failed"
+
+ mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+
+ $LCTL conf_param ${FSNAME}-MDT0000.mdc.active=0 &&
+ error "deactive MDC0 succeeds"
+ # activate MDC for MDT2
+ local TEST="$LCTL get_param -n mdc.${FSNAME}-MDT0001-mdc-[!M]*.active"
+ set_conf_param_and_check client \
+ "$TEST" "${FSNAME}-MDT0001.mdc.active" 1 ||
+ error "Unable to activate MDT2"
+
+ $LFS mkdir -i1 $DIR/$tdir/2 || error "mkdir $DIR/$tdir/2 failed"
+ # create some file
+ createmany -o $DIR/$tdir/2/$tfile-%d 1 || error "create files failed"
+
+ rm -rf $DIR/$tdir/2 || error "unlink dir failed"
+
+ # deactivate MDC for MDT2
+ local TEST="$LCTL get_param -n mdc.${FSNAME}-MDT0001-mdc-[!M]*.active"
+ set_conf_param_and_check client \
+ "$TEST" "${FSNAME}-MDT0001.mdc.active" 0 ||
+ error "Unable to deactivate MDT2"
+
+ $LFS mkdir -i1 $DIR/$tdir/2 &&
+ error "mkdir $DIR/$tdir/2 succeeds after deactive MDT"
+
+ # cleanup
+ umount_client $MOUNT || error "Unable to umount client"
+ stop_mds
+ stop_ost
+ stop_ost 2
+}
+run_test 50i "activate deactivated MDT"
+
test_51() {
local LOCAL_TIMEOUT=20
}
run_test 87 "check if MDT inode can hold EAs with N stripes properly"
+# $1 test directory
+# $2 (optional) value of max_mod_rpcs_in_flight to set
+check_max_mod_rpcs_in_flight() {
+ local dir="$1"
+ local mmr="$2"
+ local idx
+ local facet
+ local tmp
+ local i
+
+ idx=$(printf "%04x" $($LFS getdirstripe -i $dir))
+ facet="mds$((0x$idx + 1))"
+
+ if [ -z "$mmr" ]; then
+ # get value of max_mod_rcps_in_flight
+ mmr=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight) ||
+ error "Unable to get max_mod_rpcs_in_flight"
+ echo "max_mod_rcps_in_flight is $mmr"
+ else
+ # set value of max_mod_rpcs_in_flight
+ $LCTL set_param \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight=$mmr ||
+ error "Unable to set max_mod_rpcs_in_flight to $mmr"
+ echo "max_mod_rpcs_in_flight set to $mmr"
+ fi
+
+ # create mmr+1 files
+ echo "creating $((mmr + 1)) files ..."
+ umask 0022
+ for i in $(seq $((mmr + 1))); do
+ touch $dir/file-$i
+ done
+
+ ### part 1 ###
+
+ # consumes mmr-1 modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $((mmr - 1)) chmod in parallel ..."
+ for i in $(seq $((mmr - 1))); do
+ chmod 0600 $dir/file-$i &
+ done
+
+ # send one additional modify RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional chmod in parallel ..."
+ chmod 0600 $dir/file-$mmr &
+ sleep 1
+
+ # check this additional modify RPC get a modify RPC slot
+ # and succeed its operation
+ checkstat -vp 0600 $dir/file-$mmr ||
+ error "Unable to send $mmr modify RPCs in parallel"
+ wait
+
+ ### part 2 ###
+
+ # consumes mmr modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $mmr chmod in parallel ..."
+ for i in $(seq $mmr); do
+ chmod 0666 $dir/file-$i &
+ done
+
+ # send one additional modify RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional chmod in parallel ..."
+ chmod 0666 $dir/file-$((mmr + 1)) &
+ sleep 1
+
+ # check this additional modify RPC blocked getting a modify RPC slot
+ checkstat -vp 0644 $dir/file-$((mmr + 1)) ||
+ error "Unexpectedly send $mmr modify RPCs in parallel"
+ wait
+}
+
+test_90a() {
+ reformat
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ # check default value
+ $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ check_max_mod_rpcs_in_flight $DIR/$tdir
+
+ cleanup
+}
+run_test 90a "check max_mod_rpcs_in_flight is enforced"
+
+test_90b() {
+ local idx
+ local facet
+ local tmp
+ local mmrpc
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ ### test 1.
+ # update max_mod_rpcs_in_flight
+ $LFS mkdir -c1 $DIR/${tdir}1 || error "mkdir $DIR/${tdir}1 failed"
+ check_max_mod_rpcs_in_flight $DIR/${tdir}1 1
+
+ ### test 2.
+ # check client is able to send multiple modify RPCs in paralell
+ tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import |
+ grep -c "multi_mod_rpcs")
+ if [ "$tmp" -ne $MDSCOUNT ]; then
+ echo "Client not able to send multiple modify RPCs in parallel"
+ cleanup
+ return
+ fi
+
+ # update max_mod_rpcs_in_flight
+ $LFS mkdir -c1 $DIR/${tdir}2 || error "mkdir $DIR/${tdir}2 failed"
+ check_max_mod_rpcs_in_flight $DIR/${tdir}2 5
+
+ ### test 3.
+ $LFS mkdir -c1 $DIR/${tdir}3 || error "mkdir $DIR/${tdir}3 failed"
+ idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/${tdir}3))
+ facet="mds$((0x$idx + 1))"
+
+ # save MDT max_mod_rpcs_per_client
+ mmrpc=$(do_facet $facet \
+ cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
+
+ # update max_mod_rpcs_in_flight
+ umount_client $MOUNT
+ do_facet $facet \
+ "echo 16 > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+ $LCTL set_param mdc.$FSNAME-MDT$idx-mdc-*.max_rpcs_in_flight=17
+ check_max_mod_rpcs_in_flight $DIR/${tdir}3 16
+
+ # restore MDT max_mod_rpcs_per_client initial value
+ do_facet $facet \
+ "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+
+ rm -rf $DIR/${tdir}?
+ cleanup
+}
+run_test 90b "check max_mod_rpcs_in_flight is enforced after update"
+
+test_90c() {
+ local tmp
+ local mrif
+ local mmrpc
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ # check client is able to send multiple modify RPCs in paralell
+ tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import |
+ grep -c "multi_mod_rpcs")
+ if [ "$tmp" -ne $MDSCOUNT ]; then
+ skip "Client not able to send multiple modify RPCs in parallel"
+ cleanup
+ return
+ fi
+
+ # get max_rpcs_in_flight value
+ mrif=$($LCTL get_param -n mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight)
+ echo "max_rpcs_in_flight is $mrif"
+
+ # get MDT max_mod_rpcs_per_client
+ mmrpc=$(do_facet mds1 \
+ cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
+ echo "max_mod_rpcs_per_client is $mmrpc"
+
+ # testcase 1
+ # attempt to set max_mod_rpcs_in_flight to max_rpcs_in_flight value
+ # prerequisite: set max_mod_rpcs_per_client to max_rpcs_in_flight value
+ umount_client $MOUNT
+ do_facet mds1 \
+ "echo $mrif > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$mrif &&
+ error "set max_mod_rpcs_in_flight to $mrif should fail"
+
+ umount_client $MOUNT
+ do_facet mds1 \
+ "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+
+ # testcase 2
+ # attempt to set max_mod_rpcs_in_flight to max_mod_rpcs_per_client+1
+ # prerequisite: set max_rpcs_in_flight to max_mod_rpcs_per_client+2
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight=$((mmrpc + 2))
+
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$((mmrpc + 1)) &&
+ error "set max_mod_rpcs_in_flight to $((mmrpc + 1)) should fail"
+
+ cleanup
+}
+run_test 90c "check max_mod_rpcs_in_flight update limits"
+
+test_90d() {
+ local idx
+ local facet
+ local mmr
+ local i
+ local pid
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
+ facet="mds$((0x$idx + 1))"
+
+ # check client version supports multislots
+ tmp=$($LCTL get_param -N \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
+ if [ -z "$tmp" ]; then
+ skip "Client does not support multiple modify RPCs in flight"
+ cleanup
+ return
+ fi
+
+ # get current value of max_mod_rcps_in_flight
+ mmr=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
+ echo "max_mod_rcps_in_flight is $mmr"
+
+ # create mmr files
+ echo "creating $mmr files ..."
+ umask 0022
+ for i in $(seq $mmr); do
+ touch $DIR/$tdir/file-$i
+ done
+
+ # prepare for close RPC
+ multiop_bg_pause $DIR/$tdir/file-close O_c
+ pid=$!
+
+ # consumes mmr modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $mmr chmod in parallel ..."
+ for i in $(seq $mmr); do
+ chmod 0600 $DIR/$tdir/file-$i &
+ done
+
+ # send one additional close RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional close in parallel ..."
+ kill -USR1 $pid
+ cancel_lru_locks mdc
+ sleep 1
+
+ # check this additional close RPC get a modify RPC slot
+ # and multiop process completed
+ [ -d /proc/$pid ] &&
+ error "Unable to send the additional close RPC in parallel"
+ wait
+ rm -rf $DIR/$tdir
+ cleanup
+}
+run_test 90d "check one close RPC is allowed above max_mod_rpcs_in_flight"
if ! combined_mgs_mds ; then
stop mgs