X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=4c2aa803a2e2549f2b3bb840d346d967d70eb121;hb=c2d27a0f12688c0d029880919f8b002e557b540c;hp=52843327a05b220518bd6754e146ae0c3ce88e32;hpb=43f96aa9cc3cec66d9b9e0a03e5fc23e094525e7;p=fs%2Flustre-release.git diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh index 5284332..4c2aa80 100644 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -4,8 +4,8 @@ set -e ONLY=${ONLY:-"$*"} -# bug number for skipped test: LU-2828 -ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 59 64" +# bug number for skipped test: LU-7005 +ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 50i" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! is_sles11() # LU-2181 @@ -78,8 +78,8 @@ if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then fi [ $(facet_fstype $SINGLEMDS) = "zfs" ] && -# bug number for skipped test: LU-2778 LU-4444 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 57b 69" +# bug number for skipped test: LU-4444 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 69" init_logging @@ -321,9 +321,9 @@ test_1() { run_test 1 "start up ost twice (should return errors)" test_2() { - start_mdt 1 || error "MDT0 start fail" + start_mds || error "MDT start failed" echo "start mds second time.." - start_mdt 1 && error "2nd MDT start should fail" + start_mds && error "2nd MDT start should fail" start_ost || error "OST start failed" mount_client $MOUNT || error "mount_client failed to start client" check_mount || error "check_mount failed" @@ -982,7 +982,7 @@ test_24a() { # the MDS must remain up until last MDT stop_mds MDS=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | - awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -n1) + awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) [ -z "$MDS" ] && error "No MDT" cleanup_fs2 cleanup_nocli || error "cleanup_nocli failed with rc $?" @@ -1152,7 +1152,7 @@ test_28a() { # LU-4221 # Check 3. # prepare a non-symlink parameter in the OSD - name="lma_self_repair" + name="auto_scrub" param="$device.osd.$name" cmd="$LCTL get_param -n osd-*.$device.$name" @@ -1528,7 +1528,7 @@ t32_wait_til_devices_gone() { done echo "waiting for dev on $node: dev $devices loop $loops given up" do_rpc_nodes $node "losetup -a" - do_rpc_nodes $node "$LCTL devices_list" + do_rpc_nodes $node "$LCTL device_list" return 1 } @@ -1631,7 +1631,7 @@ t32_test() { local img_bspace local img_ispace local fsname=t32fs - local nid=$($r $LCTL list_nids | head -n1) + local nid=$($r $LCTL list_nids | head -1) local mopts local uuid local nrpcs_orig @@ -1643,6 +1643,7 @@ t32_test() { trap 'trap - RETURN; t32_test_cleanup' RETURN + load_modules mkdir -p $tmp/mnt/lustre || error "mkdir $tmp/mnt/lustre failed" $r mkdir -p $tmp/mnt/{mdt,ost} $r tar xjvf $tarball -S -C $tmp || { @@ -1658,7 +1659,7 @@ t32_test() { echo " Commit: $img_commit" echo " Kernel: $img_kernel" echo " Arch: $img_arch" - echo "OST version: $(get_lustre_version ost1)" + echo "OST version: $(lustre_build_version ost1)" # The conversion can be made only when both of the following # conditions are satisfied: @@ -1705,8 +1706,7 @@ t32_test() { "(Need MGS version at least 2.3.59)"; return 0; } local osthost=$(facet_active_host ost1) - local ostnid=$(do_node $osthost $LCTL list_nids | - head -n1) + local ostnid=$(do_node $osthost $LCTL list_nids | head -1) mopts=nosvc if [ $fstype == "ldiskfs" ]; then @@ -1894,11 +1894,13 @@ t32_test() { } if [ "$dne_upgrade" != "no" ]; then - $LFS mkdir -i 1 $tmp/mnt/lustre/remote_dir || { + $LFS mkdir -i 1 -c2 $tmp/mnt/lustre/remote_dir || { error_noexit "set remote dir failed" return 1 } + $LFS setdirstripe -D -c2 $tmp/mnt/lustre/remote_dir + pushd $tmp/mnt/lustre tar -cf - . --exclude=./remote_dir | tar -xvf - -C remote_dir 1>/dev/null || { @@ -2091,6 +2093,9 @@ test_32c() { [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return t32_check for tarball in $tarballs; do + # Do not support 1_8 and 2_1 direct upgrade to DNE2 anymore */ + echo $tarball | grep "1_8" && continue + echo $tarball | grep "2_1" && continue dne_upgrade=yes t32_test $tarball writeconf || rc=$? done return $rc @@ -2216,7 +2221,7 @@ test_35a() { # bug 12459 log "Set up a fake failnode for the MDS" FAKENID="127.0.0.2" local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | - awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -n1) + awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) do_facet mgs "$LCTL conf_param \ ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || error "Setting ${device}.failover.node=\ @@ -2274,7 +2279,7 @@ test_35b() { # bug 18674 log "Set up a fake failnode for the MDS" FAKENID="127.0.0.2" local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | - awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -n1) + awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) do_facet mgs "$LCTL conf_param \ ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || error "Set ${device}.failover.node=\ @@ -2568,9 +2573,14 @@ test_41a() { #bug 14134 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) - start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n + start_mdt 1 -o nosvc -n + if [ $MDSCOUNT -ge 2 ]; then + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num || return + done + fi start ost1 $(ostdevname 1) $OST_MOUNT_OPTS - start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force + start_mdt 1 -o nomgs,force mount_client $MOUNT || error "mount_client $MOUNT failed" sleep 5 @@ -2598,9 +2608,14 @@ test_41b() { reformat local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) - start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n + start_mdt 1 -o nosvc -n + if [ $MDSCOUNT -ge 2 ]; then + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num || return + done + fi start_ost || error "Unable to start OST1" - start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force + start_mdt 1 -o nomgs,force mount_client $MOUNT || error "mount_client $MOUNT failed" sleep 5 @@ -2625,7 +2640,7 @@ test_41c() { { skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"; return; } cleanup - # MDT concurent start + # MDT concurrent start #define OBD_FAIL_TGT_DELAY_CONNECT 0x703 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x703" start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS & @@ -2646,10 +2661,17 @@ test_41c() { echo "2nd MDT start succeed" else stop mds1 -f - error "unexpected concurent MDT mounts result, rc=$rc rc2=$rc2" + error "unexpected concurrent MDT mounts result, rc=$rc rc2=$rc2" fi - # OST concurent start + if [ $MDSCOUNT -ge 2 ]; then + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num || return + done + fi + + # OST concurrent start + #define OBD_FAIL_TGT_DELAY_CONNECT 0x703 do_facet ost1 "$LCTL set_param fail_loc=0x703" start ost1 $(ostdevname 1) $OST_MOUNT_OPTS & @@ -2669,26 +2691,26 @@ test_41c() { echo "1st OST start failed with EALREADY" echo "2nd OST start succeed" else - stop mds1 -f + stop_mds -f stop ost1 -f - error "unexpected concurent OST mounts result, rc=$rc rc2=$rc2" + error "unexpected concurrent OST mounts result, rc=$rc rc2=$rc2" fi # cleanup - stop mds1 -f + stop_mds stop ost1 -f # verify everything ok start_mds if [ $? != 0 ] then - stop mds1 -f + stop_mds error "MDT(s) start failed" fi start_ost if [ $? != 0 ] then - stop mds1 -f + stop_mds stop ost1 -f error "OST(s) start failed" fi @@ -2696,20 +2718,20 @@ test_41c() { mount_client $MOUNT if [ $? != 0 ] then - stop mds1 -f + stop_mds stop ost1 -f error "client start failed" fi check_mount if [ $? != 0 ] then - stop mds1 -f + stop_mds stop ost1 -f error "client mount failed" fi cleanup } -run_test 41c "concurent mounts of MDT/OST should all fail but one" +run_test 41c "concurrent mounts of MDT/OST should all fail but one" test_42() { #bug 14693 setup @@ -3023,7 +3045,7 @@ run_test 47 "server restart does not make client loss lru_resize settings" cleanup_48() { trap 0 - # reformat after this test is needed - if test will failed + # reformat after this test is needed - if the test fails, # we will have unkillable file at FS reformat_and_config } @@ -3363,6 +3385,52 @@ test_50h() { } run_test 50h "LU-642: activate deactivated OST" +test_50i() { + # prepare MDT/OST, make OSC inactive for OST1 + [ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return + + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 + load_modules + do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" || + error "tunefs MDT2 failed" + start_mds || error "Unable to start MDT" + start_ost || error "Unable to start OST1" + start_ost2 || error "Unable to start OST2" + mount_client $MOUNT || error "client start failed" + + mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" + + $LCTL conf_param ${FSNAME}-MDT0000.mdc.active=0 && + error "deactive MDC0 succeeds" + # activate MDC for MDT2 + local TEST="$LCTL get_param -n mdc.${FSNAME}-MDT0001-mdc-[!M]*.active" + set_conf_param_and_check client \ + "$TEST" "${FSNAME}-MDT0001.mdc.active" 1 || + error "Unable to activate MDT2" + + $LFS mkdir -i1 $DIR/$tdir/2 || error "mkdir $DIR/$tdir/2 failed" + # create some file + createmany -o $DIR/$tdir/2/$tfile-%d 1 || error "create files failed" + + rm -rf $DIR/$tdir/2 || error "unlink dir failed" + + # deactivate MDC for MDT2 + local TEST="$LCTL get_param -n mdc.${FSNAME}-MDT0001-mdc-[!M]*.active" + set_conf_param_and_check client \ + "$TEST" "${FSNAME}-MDT0001.mdc.active" 0 || + error "Unable to deactivate MDT2" + + $LFS mkdir -i1 $DIR/$tdir/2 && + error "mkdir $DIR/$tdir/2 succeeds after deactive MDT" + + # cleanup + umount_client $MOUNT || error "Unable to umount client" + stop_mds + stop_ost + stop_ost 2 +} +run_test 50i "activate deactivated MDT" + test_51() { local LOCAL_TIMEOUT=20 @@ -3499,9 +3567,12 @@ test_52() { [ $? -eq 0 ] || { error "Unable to move objects"; return 14; } # recover objects dry-run - echo "ll_recover_lost_found_objs dry_run" - do_node $ost1node "ll_recover_lost_found_objs -n -d $ost1mnt/O" || - error "ll_recover_lost_found_objs failed" + if [ $(lustre_version_code ost1) -ge $(version_code 2.5.56) ]; then + echo "ll_recover_lost_found_objs dry_run" + do_node $ost1node \ + "ll_recover_lost_found_objs -n -d $ost1mnt/O" || + error "ll_recover_lost_found_objs failed" + fi # recover objects echo "ll_recover_lost_found_objs fix run" @@ -3530,64 +3601,49 @@ test_52() { } run_test 52 "check recovering objects from lost+found" -thread_param_get() { - local facet=$1 - local pbase=$2 - local param=$3 - - do_facet $facet "lctl get_param -n $pbase.$param" || echo 0 -} - -cleanup_thread_sanity() { - trap 0 - cleanup -} - # Checks threads_min/max/started for some service # # Arguments: service name (OST or MDT), facet (e.g., ost1, $SINGLEMDS), and a # parameter pattern prefix like 'ost.*.ost'. thread_sanity() { - local modname=$1 - local facet=$2 - local ppat=$3 - local opts=$4 + local modname=$1 + local facet=$2 + local parampat=$3 + local opts=$4 local basethr=$5 - local tmin - local tmin2 - local tmax - local tmax2 - local tstarted - local tstarted2 - local pname - local pbase - local msg="Insane $modname thread counts" - local ncpts + local tmin + local tmin2 + local tmax + local tmax2 + local tstarted + local paramp + local msg="Insane $modname thread counts" + local ncpts=$(check_cpt_number $facet) local nthrs - shift 4 - - setup - check_mount || { error "filesystem is not mounted"; return 40; } - trap cleanup_thread_sanity EXIT + shift 4 - ncpts=$(check_cpt_number $facet) + check_mount || return 41 - # We need to expand $ppat, but it may match multiple parameters, - # so we'll pick the first one - pname=$(do_facet $facet "$LCTL get_param -N $ppat.threads_min" | - head -n1) - [ -n "$pname" ] || { error "Can't expand $ppat.threads_min"; return 20;} + # We need to expand $parampat, but it may match multiple parameters, so + # we'll pick the first one + if ! paramp=$(do_facet $facet "lctl get_param -N ${parampat}.threads_min"|head -1); then + error "Couldn't expand ${parampat}.threads_min parameter name" + return 22 + fi # Remove the .threads_min part - pbase=${pname%.threads_min} + paramp=${paramp%.threads_min} # Check for sanity in defaults - tmin=$(thread_param_get $facet $pbase threads_min) - tmax=$(thread_param_get $facet $pbase threads_max) - tstarted=$(thread_param_get $facet $pbase threads_started) - lassert 21 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || + tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" || + echo 0) + tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" || + echo 0) + tstarted=$(do_facet $facet "$LCTL get_param \ + -n ${paramp}.threads_started" || echo 0) + lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || return $? - lassert 22 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax))' || + lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $? nthrs=$(expr $tmax - $tmin) if [ $nthrs -lt $ncpts ]; then @@ -3598,63 +3654,84 @@ thread_sanity() { [ $tmin -eq $tmax -a $tmin -eq $tstarted ] && skip_env "module parameter forced $facet thread count" && - return 0 + tmin=3 && tmax=$((3 * tmax)) # Check that we can change min/max - do_facet $facet "$LCTL set_param $pbase.threads_min=$((tmin + nthrs))" - do_facet $facet "$LCTL set_param $pbase.threads_max=$((tmax - nthrs))" - tmin2=$(thread_param_get $facet $pbase threads_min) - tmax2=$(thread_param_get $facet $pbase threads_max) - lassert 23 "$msg" '(($tmin2 == ($tmin + $nthrs)))' || return $? - lassert 24 "$msg" '(($tmax2 == ($tmax - $nthrs)))' || return $? - - sleep 3 # give threads a chance to start - tstarted=$(thread_param_get $facet $pbase threads_started) - lassert 25 "$msg" '(($tstarted >= $tmin2))' || return $? + do_facet $facet "$LCTL set_param \ + ${paramp}.threads_min=$((tmin + nthrs))" + do_facet $facet "$LCTL set_param \ + ${paramp}.threads_max=$((tmax - nthrs))" + tmin2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" || + echo 0) + tmax2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" || + echo 0) + lassert 25 "$msg" '(($tmin2 == ($tmin + $nthrs) && + $tmax2 == ($tmax - $nthrs)))' || return $? # Check that we can set min/max to the same value - do_facet $facet "$LCTL set_param $pbase.threads_max=$tmin2" - tmax2=$(thread_param_get $facet $pbase threads_max) - lassert 26 "$msg" '(($tmax2 == $tmin2))' || return $? + tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" || + echo 0) + do_facet $facet "$LCTL set_param ${paramp}.threads_max=$tmin" + tmin2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" || + echo 0) + tmax2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" || + echo 0) + lassert 26 "$msg" '(($tmin2 == $tmin && $tmax2 == $tmin))' || return $? # Check that we can't set max < min - do_facet $facet "$LCTL set_param $pbase.threads_max=$((tmin - 1))" - tmax2=$(thread_param_get $facet $pbase threads_max) - lassert 27 "$msg" '(($tmax2 < $tmin))' || return $? + do_facet $facet "$LCTL set_param ${paramp}.threads_max=$((tmin - 1))" + tmin2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min" || + echo 0) + tmax2=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max" || + echo 0) + lassert 27 "$msg" '(($tmin2 <= $tmax2))' || return $? # We need to ensure that we get the module options desired; to do this # we set LOAD_MODULES_REMOTE=true and we call setmodopts below. LOAD_MODULES_REMOTE=true cleanup local oldvalue - local newvalue="${opts}=$((basethr * ncpts))" + local newvalue="${opts}=$(expr $basethr \* $ncpts)" setmodopts -a $modname "$newvalue" oldvalue load_modules setup - check_mount || { error "filesystem failed remount"; return 41; } + check_mount || return 41 # Restore previous setting of MODOPTS_* setmodopts $modname "$oldvalue" # Check that $opts took - tmin=$(thread_param_get $facet $pbase threads_min) - tmax=$(thread_param_get $facet $pbase threads_max) - tstarted=$(thread_param_get $facet $pbase threads_started) - lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' || + tmin=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_min") + tmax=$(do_facet $facet "$LCTL get_param -n ${paramp}.threads_max") + tstarted=$(do_facet $facet \ + "$LCTL get_param -n ${paramp}.threads_started") + lassert 28 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $? + cleanup - cleanup_thread_sanity || error "cleanup failed with rc $?" load_modules + setup } test_53a() { - thread_sanity OST ost1 'ost.*.ost' oss_num_threads 16 + setup + thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads' '16' + cleanup || error "cleanup failed with rc $?" } run_test 53a "check OSS thread count params" test_53b() { - thread_sanity MDT $SINGLEMDS 'mds.*.*' mds_num_threads 16 + setup + local mds=$(do_facet $SINGLEMDS "$LCTL get_param \ + -N mds.*.*.threads_max 2>/dev/null") + if [ -z "$mds" ]; then + #running this on an old MDT + thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' 16 + else + thread_sanity MDT $SINGLEMDS 'mds.*.*.' 'mds_num_threads' 16 + fi + cleanup || error "cleanup failed with $?" } run_test 53b "check MDS thread count params" @@ -3883,20 +3960,19 @@ test_60() { # LU-471 run_test 60 "check mkfs.lustre --mkfsoptions -E -O options setting" test_61() { # LU-80 - local reformat=false + local lxattr=false [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.1.53) ] || { skip "Need MDS version at least 2.1.53"; return 0; } if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] && - ! large_xattr_enabled; then - reformat=true - LDISKFS_MKFS_OPTS+=" -O large_xattr" + ! large_xattr_enabled; then + lxattr=true for num in $(seq $MDSCOUNT); do - add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \ - --reformat $(mdsdevname $num) $(mdsvdevname $num) || - error "add mds $num failed" + do_facet mds${num} $TUNE2FS -O large_xattr \ + $(mdsdevname $num) || + error "tune2fs on mds $num failed" done fi @@ -3945,9 +4021,12 @@ test_61() { # LU-80 rm -f $file stopall - if $reformat; then - LDISKFS_MKFS_OPTS=${LDISKFS_MKFS_OPTS% -O large_xattr} - reformat + if $lxattr; then + for num in $(seq $MDSCOUNT); do + do_facet mds${num} $TUNE2FS -O ^large_xattr \ + $(mdsdevname $num) || + error "tune2fs on mds $num failed" + done fi } run_test 61 "large xattr" @@ -3966,10 +4045,10 @@ test_62() { { skip "Need MDS version at least 2.2.51"; return 0; } echo "disable journal for mds" - do_facet mds tune2fs -O ^has_journal $mdsdev || error "tune2fs failed" + do_facet mds $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed" start_mds && error "MDT start should fail" echo "disable journal for ost" - do_facet ost1 tune2fs -O ^has_journal $ostdev || error "tune2fs failed" + do_facet ost1 $TUNE2FS -O ^has_journal $ostdev || error "tune2fs failed" start_ost && error "OST start should fail" cleanup || error "cleanup failed with rc $?" reformat_and_config @@ -4055,8 +4134,8 @@ test_66() { { skip "Need MGS version at least 2.3.59"; return 0; } setup - local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -n1) - local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -n1) + local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1) + local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1) echo "replace_nids should fail if MDS, OSTs and clients are UP" do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID && @@ -4239,15 +4318,13 @@ test_69() { # use OST0000 since it probably has the most creations local OSTNAME=$(ostname_from_index 0) local mdtosc_proc1=$(get_mdtosc_proc_path mds1 $OSTNAME) - local last_id=$(do_facet mds1 $LCTL get_param -n \ - osc.$mdtosc_proc1.prealloc_last_id) # Want to have OST LAST_ID over 1.5 * OST_MAX_PRECREATE to # verify that the LAST_ID recovery is working properly. If # not, then the OST will refuse to allow the MDS connect # because the LAST_ID value is too different from the MDS #define OST_MAX_PRECREATE=20000 - local num_create=$((20000 * 5)) + local num_create=$((20000 * 3)) mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" $SETSTRIPE -i 0 $DIR/$tdir || error "$SETSTRIPE -i 0 $DIR/$tdir failed" @@ -4257,6 +4334,7 @@ test_69() { # filesystem is not inconsistent later on $LFS find $MOUNT --ost 0 | xargs rm + umount_client $MOUNT || error "umount client failed" stop_ost || error "OST0 stop failure" add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --reformat --replace \ $(ostdevname 1) $(ostvdevname 1) || @@ -4264,6 +4342,7 @@ test_69() { start_ost || error "OST0 restart failure" wait_osc_import_state mds ost FULL + mount_client $MOUNT || error "mount client failed" touch $DIR/$tdir/$tfile-last || error "create file after reformat" local idx=$($GETSTRIPE -i $DIR/$tdir/$tfile-last) [ $idx -ne 0 ] && error "$DIR/$tdir/$tfile-last on $idx not 0" || true @@ -4281,8 +4360,9 @@ test_70a() { start_mdt 1 || error "MDT0 start fail" start_ost || error "OST0 start fail" - - start_mdt 2 || error "MDT1 start fail" + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num || return + done mount_client $MOUNT || error "mount client fails" @@ -4302,8 +4382,7 @@ test_70b() { start_ost || error "OST0 start fail" - start_mdt 1 || error "MDT0 start fail" - start_mdt 2 || error "MDT1 start fail" + start_mds || error "MDS start fail" mount_client $MOUNT || error "mount client fails" @@ -4322,8 +4401,7 @@ test_70c() { [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return local MDTIDX=1 - start_mdt 1 || error "MDT0 start fail" - start_mdt 2 || error "MDT1 start fail" + start_mds || error "MDS start fail" start_ost || error "OST0 start fail" mount_client $MOUNT || error "mount client fails" @@ -4347,8 +4425,7 @@ test_70d() { [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return local MDTIDX=1 - start_mdt 1 || error "MDT0 start fail" - start_mdt 2 || error "MDT1 start fail" + start_mds || error "MDS start fail" start_ost || error "OST0 start fail" mount_client $MOUNT || error "mount client fails" @@ -4380,7 +4457,10 @@ test_71a() { start_mdt 1 || error "MDT0 start fail" start_ost || error "OST0 start fail" - start_mdt 2 || error "MDT1 start fail" + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num || return + done + start_ost2 || error "OST1 start fail" mount_client $MOUNT || error "mount client fails" @@ -4393,8 +4473,7 @@ test_71a() { rm -rf $DIR/$tdir || error "delete dir fail" umount_client $MOUNT || error "umount_client failed" - stop_mdt 1 || error "MDT0 stop fail" - stop_mdt 2 || error "MDT1 stop fail" + stop_mds || error "MDS stop fail" stop_ost || error "OST0 stop fail" stop_ost2 || error "OST1 stop fail" } @@ -4407,7 +4486,9 @@ test_71b() { fi local MDTIDX=1 - start_mdt 2 || error "MDT1 start fail" + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num || return + done start_ost || error "OST0 start fail" start_mdt 1 || error "MDT0 start fail" start_ost2 || error "OST1 start fail" @@ -4422,8 +4503,7 @@ test_71b() { rm -rf $DIR/$tdir || error "delete dir fail" umount_client $MOUNT || error "umount_client failed" - stop_mdt 1 || error "MDT0 stop fail" - stop_mdt 2 || error "MDT1 stop fail" + stop_mds || error "MDT0 stop fail" stop_ost || error "OST0 stop fail" stop_ost2 || error "OST1 stop fail" } @@ -4438,7 +4518,9 @@ test_71c() { start_ost || error "OST0 start fail" start_ost2 || error "OST1 start fail" - start_mdt 2 || error "MDT1 start fail" + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num || return + done start_mdt 1 || error "MDT0 start fail" mount_client $MOUNT || error "mount client fails" @@ -4451,8 +4533,7 @@ test_71c() { rm -rf $DIR/$tdir || error "delete dir fail" umount_client $MOUNT || error "umount_client failed" - stop_mdt 1 || error "MDT0 stop fail" - stop_mdt 2 || error "MDT1 stop fail" + stop_mds || error "MDS stop fail" stop_ost || error "OST0 stop fail" stop_ost2 || error "OST1 stop fail" @@ -4467,7 +4548,9 @@ test_71d() { local MDTIDX=1 start_ost || error "OST0 start fail" - start_mdt 2 || error "MDT0 start fail" + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num || return + done start_mdt 1 || error "MDT0 start fail" start_ost2 || error "OST1 start fail" @@ -4481,8 +4564,7 @@ test_71d() { rm -rf $DIR/$tdir || error "delete dir fail" umount_client $MOUNT || error "umount_client failed" - stop_mdt 1 || error "MDT0 stop fail" - stop_mdt 2 || error "MDT1 stop fail" + stop_mds || error "MDS stop fail" stop_ost || error "OST0 stop fail" stop_ost2 || error "OST1 stop fail" @@ -4497,7 +4579,9 @@ test_71e() { local MDTIDX=1 start_ost || error "OST0 start fail" - start_mdt 2 || error "MDT1 start fail" + for num in $(seq 2 $MDSCOUNT); do + start_mdt $num || return + done start_ost2 || error "OST1 start fail" start_mdt 1 || error "MDT0 start fail" @@ -4511,8 +4595,7 @@ test_71e() { rm -rf $DIR/$tdir || error "delete dir fail" umount_client $MOUNT || error "umount_client failed" - stop_mdt 1 || error "MDT0 stop fail" - stop_mdt 2 || error "MDT1 stop fail" + stop_mds || error "MDS stop fail" stop_ost || error "OST0 stop fail" stop_ost2 || error "OST1 stop fail" @@ -4534,7 +4617,8 @@ test_72() { #LU-2634 add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \ --reformat $(mdsdevname $num) $(mdsvdevname $num) || error "add mds $num failed" - $TUNE2FS -O extents $(mdsdevname $num) + do_facet mds${num} "$TUNE2FS -O extents $(mdsdevname $num)" || + error "$TUNE2FS failed on mds${num}" done add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev || @@ -4611,14 +4695,15 @@ test_76a() { setup local MDMB_PARAM="osc.*.max_dirty_mb" echo "Change MGS params" - local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -n1) + local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | + head -1) echo "max_dirty_mb: $MAX_DIRTY_MB" local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB + MAX_DIRTY_MB)) echo "new_max_dirty_mb: $NEW_MAX_DIRTY_MB" do_facet mgs $LCTL set_param -P $MDMB_PARAM=$NEW_MAX_DIRTY_MB wait_update $HOSTNAME "$LCTL get_param -n $MDMB_PARAM | - head -n1" $NEW_MAX_DIRTY_MB - MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -n1) + head -1" $NEW_MAX_DIRTY_MB + MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1) echo "$MAX_DIRTY_MB" [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] || error "error while apply max_dirty_mb" @@ -4627,8 +4712,8 @@ test_76a() { stopall setupall wait_update $HOSTNAME "$LCTL get_param -n $MDMB_PARAM | - head -n1" $NEW_MAX_DIRTY_MB - MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -n1) + head -1" $NEW_MAX_DIRTY_MB + MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1) [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] || error "max_dirty_mb is not saved after remount" @@ -4636,15 +4721,15 @@ test_76a() { CLIENT_PARAM="obdfilter.*.client_cache_count" local CLIENT_CACHE_COUNT CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | - head -n1) + head -1) echo "client_cache_count: $CLIENT_CACHE_COUNT" NEW_CLIENT_CACHE_COUNT=$((CLIENT_CACHE_COUNT+CLIENT_CACHE_COUNT)) echo "new_client_cache_count: $NEW_CLIENT_CACHE_COUNT" do_facet mgs $LCTL set_param -P $CLIENT_PARAM=$NEW_CLIENT_CACHE_COUNT wait_update $(facet_host ost1) "$LCTL get_param -n $CLIENT_PARAM | - head -n1" $NEW_CLIENT_CACHE_COUNT + head -1" $NEW_CLIENT_CACHE_COUNT CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | - head -n1) + head -1) echo "$CLIENT_CACHE_COUNT" [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] || error "error while apply client_cache_count" @@ -4653,9 +4738,9 @@ test_76a() { stopall setupall wait_update $(facet_host ost1) "$LCTL get_param -n $CLIENT_PARAM | - head -n1" $NEW_CLIENT_CACHE_COUNT + head -1" $NEW_CLIENT_CACHE_COUNT CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | - head -n1) + head -1) echo "$CLIENT_CACHE_COUNT" [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] || error "client_cache_count is not saved after remount" @@ -4919,7 +5004,7 @@ test_80() { } run_test 80 "mgc import reconnect race" -# Save the original values of $OSTCOUNT and $OSTINDEX$i. +#Save the original values of $OSTCOUNT and $OSTINDEX$i. save_ostindex() { local new_ostcount=$1 saved_ostcount=$OSTCOUNT @@ -5223,16 +5308,18 @@ test_82b() { # LU-4665 run_test 82b "specify OSTs for file with --pool and --ost-list options" test_83() { - local dev - local ostmnt - local fstype - local mnt_opts - + [[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] || + { skip "Need OST version at least 2.6.91" && return 0; } if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "Only applicable to ldiskfs-based MDTs" return fi + local dev + local ostmnt + local fstype + local mnt_opts + dev=$(ostdevname 1) ostmnt=$(facet_mntpt ost1) fstype=$(facet_fstype ost1) @@ -5358,6 +5445,422 @@ test_84() { } run_test 84 "check recovery_hard_time" +test_85() { + [[ $(lustre_version_code ost1) -ge $(version_code 2.7.55) ]] || + { skip "Need OST version at least 2.7.55" && return 0; } +##define OBD_FAIL_OSD_OST_EA_FID_SET 0x197 + do_facet ost1 "lctl set_param fail_loc=0x197" + start_ost + stop_ost +} +run_test 85 "osd_ost init: fail ea_fid_set" + +test_86() { + [ "$(facet_fstype ost1)" = "zfs" ] && + skip "LU-6442: no such mkfs params for ZFS OSTs" && return + + local OST_OPTS="$(mkfs_opts ost1 $(ostdevname 1)) \ + --reformat $(ostdevname 1) $(ostvdevname 1)" + + local NEWSIZE=1024 + local OLDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" | + awk '/Flex block group size: / { print $NF; exit; }') + + local opts=OST_OPTS + if [[ ${!opts} != *mkfsoptions* ]]; then + eval opts=\"${!opts} \ + --mkfsoptions='\\\"-O flex_bg -G $NEWSIZE\\\"'\" + else + val=${!opts//--mkfsoptions=\\\"/ \ + --mkfsoptions=\\\"-O flex_bg -G $NEWSIZE } + eval opts='${val}' + fi + + echo "params: $opts" + + add ost1 $opts || error "add ost1 failed with new params" + + local FOUNDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" | + awk '/Flex block group size: / { print $NF; exit; }') + + [[ $FOUNDSIZE == $NEWSIZE ]] || + error "Flex block group size: $FOUNDSIZE, expected: $NEWSIZE" + return 0 +} +run_test 86 "Replacing mkfs.lustre -G option" + +test_87() { #LU-6544 + [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.7.56) ]] || + { skip "Need MDS version at least 2.7.56" && return; } + [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] && + { skip "Only applicable to ldiskfs-based MDTs" && return; } + [[ $OSTCOUNT -gt 69 ]] && + { skip "Ignore wide striping situation" && return; } + + local mdsdev=$(mdsdevname 1) + local mdsvdev=$(mdsvdevname 1) + local file=$DIR/$tfile + local mntpt=$(facet_mntpt $SINGLEMDS) + local used_xattr_blk=0 + local inode_size=${1:-512} + local left_size=0 + local xtest="trusted.test" + local value + local orig + local i + + #Please see LU-6544 for MDT inode size calculation + if [ $OSTCOUNT -gt 26 ]; then + inode_size=2048 + elif [ $OSTCOUNT -gt 5 ]; then + inode_size=1024 + fi + left_size=$(expr $inode_size - \ + 156 - \ + 32 - \ + 32 - $OSTCOUNT \* 24 - 16 - 3 - \ + 24 - 16 - 3 - \ + 24 - 18 - $(expr length $tfile) - 16 - 4) + if [ $left_size -le 0 ]; then + echo "No space($left_size) is expected in inode." + echo "Try 1-byte xattr instead to verify this." + left_size=1 + else + echo "Estimate: at most $left_size-byte space left in inode." + fi + + unload_modules + reformat + + add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$OSTCOUNT \ + --reformat $mdsdev $mdsvdev || error "add mds1 failed" + start_mdt 1 > /dev/null || error "start mdt1 failed" + for i in $(seq $OSTCOUNT); do + start ost$i $(ostdevname $i) $OST_MOUNT_OPTS > /dev/null || + error "start ost$i failed" + done + mount_client $MOUNT > /dev/null || error "mount client $MOUNT failed" + check_mount || error "check client $MOUNT failed" + + #set xattr + $SETSTRIPE -c -1 $file || error "$SETSTRIPE -c -1 $file failed" + $GETSTRIPE $file || error "$GETSTRIPE $file failed" + i=$($GETSTRIPE -c $file) + if [ $i -ne $OSTCOUNT ]; then + left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24) + echo -n "Since only $i out $OSTCOUNT OSTs are used, " + echo -n "the expected left space is changed to " + echo "$left_size bytes at most." + fi + value=$(generate_string $left_size) + setfattr -n $xtest -v $value $file + orig=$(get_xattr_value $xtest $file) + [[ "$orig" != "$value" ]] && error "$xtest changed" + + #Verify if inode has some expected space left + umount $MOUNT > /dev/null || error "umount $MOUNT failed" + stop_mdt 1 > /dev/null || error "stop mdt1 failed" + mount_ldiskfs $SINGLEMDS || error "mount -t ldiskfs $SINGLEMDS failed" + + do_facet $SINGLEMDS ls -sal $mntpt/ROOT/$tfile + used_xattr_blk=$(do_facet $SINGLEMDS ls -s $mntpt/ROOT/$tfile | + awk '{ print $1 }') + [[ $used_xattr_blk -eq 0 ]] && + error "Please check MDS inode size calculation: \ + more than $left_size-byte space left in inode." + echo "Verified: at most $left_size-byte space left in inode." + + stopall +} +run_test 87 "check if MDT inode can hold EAs with N stripes properly" + +# $1 test directory +# $2 (optional) value of max_mod_rpcs_in_flight to set +check_max_mod_rpcs_in_flight() { + local dir="$1" + local mmr="$2" + local idx + local facet + local tmp + local i + + idx=$(printf "%04x" $($LFS getdirstripe -i $dir)) + facet="mds$((0x$idx + 1))" + + if [ -z "$mmr" ]; then + # get value of max_mod_rcps_in_flight + mmr=$($LCTL get_param -n \ + mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight) || + error "Unable to get max_mod_rpcs_in_flight" + echo "max_mod_rcps_in_flight is $mmr" + else + # set value of max_mod_rpcs_in_flight + $LCTL set_param \ + mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight=$mmr || + error "Unable to set max_mod_rpcs_in_flight to $mmr" + echo "max_mod_rpcs_in_flight set to $mmr" + fi + + # create mmr+1 files + echo "creating $((mmr + 1)) files ..." + umask 0022 + for i in $(seq $((mmr + 1))); do + touch $dir/file-$i + done + + ### part 1 ### + + # consumes mmr-1 modify RPC slots + #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159 + # drop requests on MDT so that RPC slots are consumed + # during all the request resend interval + do_facet $facet "$LCTL set_param fail_loc=0x159" + echo "launch $((mmr - 1)) chmod in parallel ..." + for i in $(seq $((mmr - 1))); do + chmod 0600 $dir/file-$i & + done + + # send one additional modify RPC + do_facet $facet "$LCTL set_param fail_loc=0" + echo "launch 1 additional chmod in parallel ..." + chmod 0600 $dir/file-$mmr & + sleep 1 + + # check this additional modify RPC get a modify RPC slot + # and succeed its operation + checkstat -vp 0600 $dir/file-$mmr || + error "Unable to send $mmr modify RPCs in parallel" + wait + + ### part 2 ### + + # consumes mmr modify RPC slots + #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159 + # drop requests on MDT so that RPC slots are consumed + # during all the request resend interval + do_facet $facet "$LCTL set_param fail_loc=0x159" + echo "launch $mmr chmod in parallel ..." + for i in $(seq $mmr); do + chmod 0666 $dir/file-$i & + done + + # send one additional modify RPC + do_facet $facet "$LCTL set_param fail_loc=0" + echo "launch 1 additional chmod in parallel ..." + chmod 0666 $dir/file-$((mmr + 1)) & + sleep 1 + + # check this additional modify RPC blocked getting a modify RPC slot + checkstat -vp 0644 $dir/file-$((mmr + 1)) || + error "Unexpectedly send $mmr modify RPCs in parallel" + wait +} + +test_90a() { + reformat + if ! combined_mgs_mds ; then + start_mgs + fi + setup + + [[ $($LCTL get_param mdc.*.import | + grep "connect_flags:.*multi_mod_rpc") ]] || + { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; } + + # check default value + $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed" + check_max_mod_rpcs_in_flight $DIR/$tdir + + cleanup +} +run_test 90a "check max_mod_rpcs_in_flight is enforced" + +test_90b() { + local idx + local facet + local tmp + local mmrpc + + setup + + [[ $($LCTL get_param mdc.*.import | + grep "connect_flags:.*multi_mod_rpc") ]] || + { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; } + + ### test 1. + # update max_mod_rpcs_in_flight + $LFS mkdir -c1 $DIR/${tdir}1 || error "mkdir $DIR/${tdir}1 failed" + check_max_mod_rpcs_in_flight $DIR/${tdir}1 1 + + ### test 2. + # check client is able to send multiple modify RPCs in paralell + tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import | + grep -c "multi_mod_rpcs") + if [ "$tmp" -ne $MDSCOUNT ]; then + echo "Client not able to send multiple modify RPCs in parallel" + cleanup + return + fi + + # update max_mod_rpcs_in_flight + $LFS mkdir -c1 $DIR/${tdir}2 || error "mkdir $DIR/${tdir}2 failed" + check_max_mod_rpcs_in_flight $DIR/${tdir}2 5 + + ### test 3. + $LFS mkdir -c1 $DIR/${tdir}3 || error "mkdir $DIR/${tdir}3 failed" + idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/${tdir}3)) + facet="mds$((0x$idx + 1))" + + # save MDT max_mod_rpcs_per_client + mmrpc=$(do_facet $facet \ + cat /sys/module/mdt/parameters/max_mod_rpcs_per_client) + + # update max_mod_rpcs_in_flight + umount_client $MOUNT + do_facet $facet \ + "echo 16 > /sys/module/mdt/parameters/max_mod_rpcs_per_client" + mount_client $MOUNT + $LCTL set_param mdc.$FSNAME-MDT$idx-mdc-*.max_rpcs_in_flight=17 + check_max_mod_rpcs_in_flight $DIR/${tdir}3 16 + + # restore MDT max_mod_rpcs_per_client initial value + do_facet $facet \ + "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client" + + rm -rf $DIR/${tdir}? + cleanup +} +run_test 90b "check max_mod_rpcs_in_flight is enforced after update" + +test_90c() { + local tmp + local mrif + local mmrpc + + setup + + [[ $($LCTL get_param mdc.*.import | + grep "connect_flags:.*multi_mod_rpc") ]] || + { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; } + + # check client is able to send multiple modify RPCs in paralell + tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import | + grep -c "multi_mod_rpcs") + if [ "$tmp" -ne $MDSCOUNT ]; then + skip "Client not able to send multiple modify RPCs in parallel" + cleanup + return + fi + + # get max_rpcs_in_flight value + mrif=$($LCTL get_param -n mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight) + echo "max_rpcs_in_flight is $mrif" + + # get MDT max_mod_rpcs_per_client + mmrpc=$(do_facet mds1 \ + cat /sys/module/mdt/parameters/max_mod_rpcs_per_client) + echo "max_mod_rpcs_per_client is $mmrpc" + + # testcase 1 + # attempt to set max_mod_rpcs_in_flight to max_rpcs_in_flight value + # prerequisite: set max_mod_rpcs_per_client to max_rpcs_in_flight value + umount_client $MOUNT + do_facet mds1 \ + "echo $mrif > /sys/module/mdt/parameters/max_mod_rpcs_per_client" + mount_client $MOUNT + + $LCTL set_param \ + mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$mrif && + error "set max_mod_rpcs_in_flight to $mrif should fail" + + umount_client $MOUNT + do_facet mds1 \ + "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client" + mount_client $MOUNT + + # testcase 2 + # attempt to set max_mod_rpcs_in_flight to max_mod_rpcs_per_client+1 + # prerequisite: set max_rpcs_in_flight to max_mod_rpcs_per_client+2 + $LCTL set_param \ + mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight=$((mmrpc + 2)) + + $LCTL set_param \ + mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$((mmrpc + 1)) && + error "set max_mod_rpcs_in_flight to $((mmrpc + 1)) should fail" + + cleanup +} +run_test 90c "check max_mod_rpcs_in_flight update limits" + +test_90d() { + local idx + local facet + local mmr + local i + local pid + + setup + + [[ $($LCTL get_param mdc.*.import | + grep "connect_flags:.*multi_mod_rpc") ]] || + { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; } + + $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed" + idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir)) + facet="mds$((0x$idx + 1))" + + # check client version supports multislots + tmp=$($LCTL get_param -N \ + mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight) + if [ -z "$tmp" ]; then + skip "Client does not support multiple modify RPCs in flight" + cleanup + return + fi + + # get current value of max_mod_rcps_in_flight + mmr=$($LCTL get_param -n \ + mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight) + echo "max_mod_rcps_in_flight is $mmr" + + # create mmr files + echo "creating $mmr files ..." + umask 0022 + for i in $(seq $mmr); do + touch $DIR/$tdir/file-$i + done + + # prepare for close RPC + multiop_bg_pause $DIR/$tdir/file-close O_c + pid=$! + + # consumes mmr modify RPC slots + #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159 + # drop requests on MDT so that RPC slots are consumed + # during all the request resend interval + do_facet $facet "$LCTL set_param fail_loc=0x159" + echo "launch $mmr chmod in parallel ..." + for i in $(seq $mmr); do + chmod 0600 $DIR/$tdir/file-$i & + done + + # send one additional close RPC + do_facet $facet "$LCTL set_param fail_loc=0" + echo "launch 1 additional close in parallel ..." + kill -USR1 $pid + cancel_lru_locks mdc + sleep 1 + + # check this additional close RPC get a modify RPC slot + # and multiop process completed + [ -d /proc/$pid ] && + error "Unable to send the additional close RPC in parallel" + wait + rm -rf $DIR/$tdir + cleanup +} +run_test 90d "check one close RPC is allowed above max_mod_rpcs_in_flight" + if ! combined_mgs_mds ; then stop mgs fi