+#Save the original values of $OSTCOUNT and $OSTINDEX$i.
+save_ostindex() {
+ local new_ostcount=$1
+ saved_ostcount=$OSTCOUNT
+ OSTCOUNT=$new_ostcount
+
+ local i
+ local index
+ for i in $(seq $OSTCOUNT); do
+ index=OSTINDEX$i
+ eval saved_ostindex$i=${!index}
+ eval OSTINDEX$i=""
+ done
+}
+
+# Restore the original values of $OSTCOUNT and $OSTINDEX$i.
+restore_ostindex() {
+ trap 0
+
+ local i
+ local index
+ for i in $(seq $OSTCOUNT); do
+ index=saved_ostindex$i
+ eval OSTINDEX$i=${!index}
+ done
+ OSTCOUNT=$saved_ostcount
+
+ formatall
+}
+
+# The main purpose of this test is to ensure the OST_INDEX_LIST functions as
+# expected. This test uses OST_INDEX_LIST to format OSTs with a randomly
+# assigned index and ensures we can mount such a formatted file system
+test_81() { # LU-4665
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] ||
+ { skip "Need MDS version at least 2.6.54" && return; }
+ [[ $OSTCOUNT -ge 3 ]] || { skip_env "Need at least 3 OSTs" && return; }
+
+ stopall
+
+ # Each time RANDOM is referenced, a random integer between 0 and 32767
+ # is generated.
+ local i
+ local saved_ostindex1=$OSTINDEX1
+ for i in 65535 $((RANDOM + 65536)); do
+ echo -e "\nFormat ost1 with --index=$i, should fail"
+ OSTINDEX1=$i
+ if add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --reformat \
+ $(ostdevname 1) $(ostvdevname 1); then
+ OSTINDEX1=$saved_ostindex1
+ error "format ost1 with --index=$i should fail"
+ fi
+ done
+ OSTINDEX1=$saved_ostindex1
+
+ save_ostindex 3
+
+ # Format OSTs with random sparse indices.
+ trap "restore_ostindex" EXIT
+ echo -e "\nFormat $OSTCOUNT OSTs with sparse indices"
+ OST_INDEX_LIST=[0,$((RANDOM * 2 % 65533 + 1)),65534] formatall
+
+ # Setup and check Lustre filesystem.
+ start_mgsmds || error "start_mgsmds failed"
+ for i in $(seq $OSTCOUNT); do
+ start ost$i $(ostdevname $i) $OST_MOUNT_OPTS ||
+ error "start ost$i failed"
+ done
+
+ mount_client $MOUNT || error "mount client $MOUNT failed"
+ check_mount || error "check client $MOUNT failed"
+
+ # Check max_easize.
+ local max_easize=$($LCTL get_param -n llite.*.max_easize)
+ [[ $max_easize -eq 128 ]] ||
+ error "max_easize is $max_easize, should be 128 bytes"
+
+ restore_ostindex
+}
+run_test 81 "sparse OST indexing"
+
+# Wait OSTs to be active on both client and MDT side.
+wait_osts_up() {
+ local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd |
+ awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
+ wait_update $HOSTNAME "eval $cmd" $OSTCOUNT ||
+ error "wait_update OSTs up on client failed"
+
+ cmd="$LCTL get_param -n lod.$FSNAME-MDT*-*.target_obd | sort -u |
+ awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
+ wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT ||
+ error "wait_update OSTs up on MDT failed"
+}
+
+# Here we exercise the stripe placement functionality on a file system that
+# has formatted the OST with a random index. With the file system the following
+# functionality is tested:
+#
+# 1. Creating a new file with a specific stripe layout.
+#
+# 2. Modifiy a existing empty file with a specific stripe layout.
+#
+# 3. Ensure we fail to set the stripe layout of a file that already has one.
+#
+# 4. If ost-index is defined we need to ensure it is the first entry in the
+# ost index list returned by lfs getstripe.
+#
+# 5. Lastly ensure this functionality fails with directories.
+test_82a() { # LU-4665
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] ||
+ { skip "Need MDS version at least 2.6.54" && return; }
+ [[ $OSTCOUNT -ge 3 ]] || { skip_env "Need at least 3 OSTs" && return; }
+
+ stopall
+
+ save_ostindex 3
+
+ # Format OSTs with random sparse indices.
+ local i
+ local index
+ local ost_indices
+ for i in $(seq $OSTCOUNT); do
+ index=$((RANDOM * 2))
+ ost_indices+=" $index"
+ done
+ ost_indices=$(comma_list $ost_indices)
+
+ trap "restore_ostindex" EXIT
+ echo -e "\nFormat $OSTCOUNT OSTs with sparse indices $ost_indices"
+ OST_INDEX_LIST=[$ost_indices] formatall
+
+ # Setup Lustre filesystem.
+ start_mgsmds || error "start_mgsmds failed"
+ for i in $(seq $OSTCOUNT); do
+ start ost$i $(ostdevname $i) $OST_MOUNT_OPTS ||
+ error "start ost$i failed"
+ done
+
+ mount_client $MOUNT || error "mount client $MOUNT failed"
+ wait_osts_up
+
+ $LFS df $MOUNT || error "$LFS df $MOUNT failed"
+ mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+
+ # 1. If the file does not exist, new file will be created
+ # with specified OSTs.
+ local file=$DIR/$tdir/$tfile-1
+ local cmd="$SETSTRIPE -o $ost_indices $file"
+ echo -e "\n$cmd"
+ eval $cmd || error "$cmd failed"
+ check_stripe_count $file $OSTCOUNT
+ check_obdidx $file $ost_indices
+ dd if=/dev/urandom of=$file count=1 bs=1M > /dev/null 2>&1 ||
+ error "write $file failed"
+
+ # 2. If the file already exists and is an empty file, the file
+ # will be attached with specified layout.
+ file=$DIR/$tdir/$tfile-2
+ mcreate $file || error "mcreate $file failed"
+ cmd="$SETSTRIPE -o $ost_indices $file"
+ echo -e "\n$cmd"
+ eval $cmd || error "$cmd failed"
+ dd if=/dev/urandom of=$file count=1 bs=1M > /dev/null 2>&1 ||
+ error "write $file failed"
+ check_stripe_count $file $OSTCOUNT
+ check_obdidx $file $ost_indices
+
+ # 3. If the file already has a valid layout attached, the command
+ # should fail with EBUSY.
+ echo -e "\n$cmd"
+ eval $cmd && error "stripe is already set on $file, $cmd should fail"
+
+ # 4. If [--stripe-index|-i <start_ost_idx>] is used, the index must
+ # be in the OST indices list.
+ local start_ost_idx=${ost_indices##*,}
+ file=$DIR/$tdir/$tfile-3
+ cmd="$SETSTRIPE -o $ost_indices -i $start_ost_idx $file"
+ echo -e "\n$cmd"
+ eval $cmd || error "$cmd failed"
+ check_stripe_count $file $OSTCOUNT
+ check_obdidx $file $ost_indices
+ check_start_ost_idx $file $start_ost_idx
+
+ file=$DIR/$tdir/$tfile-4
+ cmd="$SETSTRIPE"
+ cmd+=" -o $(exclude_items_from_list $ost_indices $start_ost_idx)"
+ cmd+=" -i $start_ost_idx $file"
+ echo -e "\n$cmd"
+ eval $cmd && error "index $start_ost_idx should be in $ost_indices"
+
+ # 5. Specifying OST indices for directory should fail with ENOSUPP.
+ local dir=$DIR/$tdir/$tdir
+ mkdir $dir || error "mkdir $dir failed"
+ cmd="$SETSTRIPE -o $ost_indices $dir"
+ echo -e "\n$cmd"
+ eval $cmd && error "$cmd should fail, specifying OST indices" \
+ "for directory is not supported"
+
+ restore_ostindex
+}
+run_test 82a "specify OSTs for file (succeed) or directory (fail)"
+
+cleanup_82b() {
+ trap 0
+
+ # Remove OSTs from a pool and destroy the pool.
+ destroy_pool $ost_pool || true
+
+ restore_ostindex
+}
+
+# Test 82b is run to ensure that if the user supplies a pool with a specific
+# stripe layout that it behaves proprerly. It should fail in the case that
+# the supplied OST index list points to OSTs not contained in the user
+# supplied pool.
+test_82b() { # LU-4665
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] ||
+ { skip "Need MDS version at least 2.6.54" && return; }
+ [[ $OSTCOUNT -ge 4 ]] || { skip_env "Need at least 4 OSTs" && return; }
+
+ stopall
+
+ save_ostindex 4
+
+ # Format OSTs with random sparse indices.
+ local i
+ local index
+ local ost_indices
+ for i in $(seq $OSTCOUNT); do
+ index=$((RANDOM * 2))
+ ost_indices+=" $index"
+ done
+ ost_indices=$(comma_list $ost_indices)
+
+ trap "restore_ostindex" EXIT
+ echo -e "\nFormat $OSTCOUNT OSTs with sparse indices $ost_indices"
+ OST_INDEX_LIST=[$ost_indices] formatall
+
+ # Setup Lustre filesystem.
+ start_mgsmds || error "start_mgsmds failed"
+ for i in $(seq $OSTCOUNT); do
+ start ost$i $(ostdevname $i) $OST_MOUNT_OPTS ||
+ error "start ost$i failed"
+ done
+
+ mount_client $MOUNT || error "mount client $MOUNT failed"
+ wait_osts_up
+ $LFS df $MOUNT || error "$LFS df $MOUNT failed"
+ mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+
+ # Create a new pool and add OSTs into it.
+ local ost_pool=$FSNAME.$TESTNAME
+ create_pool $ost_pool || error "create OST pool $ost_pool failed"
+
+ trap - EXIT
+ trap "cleanup_82b" EXIT
+
+ local ost_idx_in_list=${ost_indices##*,}
+ local ost_idx_in_pool=$(exclude_items_from_list $ost_indices \
+ $ost_idx_in_list)
+
+ local ost_targets="$FSNAME-OST["
+ for i in ${ost_idx_in_pool//,/ }; do
+ ost_targets=$ost_targets$(printf "%04x," $i)
+ done
+ ost_targets="${ost_targets%,}]"
+
+ local ost_targets_uuid=$(for i in ${ost_idx_in_pool//,/ }; \
+ do printf "$FSNAME-OST%04x_UUID\n" $i; done |
+ sort -u | tr '\n' ' ')
+
+ local cmd="$LCTL pool_add $ost_pool $ost_targets"
+ do_facet mgs $cmd || error "$cmd failed"
+ wait_update $HOSTNAME "$LCTL get_param -n lov.$FSNAME-*.pools.$TESTNAME|
+ sort -u | tr '\n' ' ' " "$ost_targets_uuid" ||
+ error "wait_update $ost_pool failed"
+ pool_list $ost_pool || error "list OST pool $ost_pool failed"
+
+ # If [--pool|-p <pool_name>] is set with [--ost-list|-o <ost_indices>],
+ # then the OSTs must be the members of the pool.
+ local file=$DIR/$tdir/$tfile
+ cmd="$SETSTRIPE -p $ost_pool -o $ost_idx_in_list $file"
+ echo -e "\n$cmd"
+ eval $cmd && error "OST with index $ost_idx_in_list should be" \
+ "in OST pool $ost_pool"
+
+ # Only select OST $ost_idx_in_list from $ost_pool for file.
+ ost_idx_in_list=${ost_idx_in_pool#*,}
+ cmd="$SETSTRIPE -p $ost_pool -o $ost_idx_in_list $file"
+ echo -e "\n$cmd"
+ eval $cmd || error "$cmd failed"
+ cmd="$GETSTRIPE $file"
+ echo -e "\n$cmd"
+ eval $cmd || error "$cmd failed"
+ check_stripe_count $file 2
+ check_obdidx $file $ost_idx_in_list
+ dd if=/dev/urandom of=$file count=1 bs=1M > /dev/null 2>&1 ||
+ error "write $file failed"
+
+ cleanup_82b
+}
+run_test 82b "specify OSTs for file with --pool and --ost-list options"
+
+test_83() {
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] ||
+ { skip "Need OST version at least 2.6.91" && return 0; }
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
+ local dev
+ local ostmnt
+ local fstype
+ local mnt_opts
+
+ dev=$(ostdevname 1)
+ ostmnt=$(facet_mntpt ost1)
+ fstype=$(facet_fstype ost1)
+
+ # Mount the OST as an ldiskfs filesystem.
+ log "mount the OST $dev as a $fstype filesystem"
+ add ost1 $(mkfs_opts ost1 $dev) $FSTYPE_OPT \
+ --reformat $dev $dev > /dev/null ||
+ error "format ost1 error"
+
+ if ! test -b $dev; then
+ mnt_opts=$(csa_add "$OST_MOUNT_OPTS" -o loop)
+ fi
+ echo "mnt_opts $mnt_opts"
+ do_facet ost1 mount -t $fstype $dev \
+ $ostmnt $mnt_opts
+ # Run llverfs on the mounted ldiskfs filesystem.
+ # It is needed to get ENOSPACE.
+ log "run llverfs in partial mode on the OST $fstype $ostmnt"
+ do_rpc_nodes $(facet_host ost1) run_llverfs $ostmnt -vpl \
+ "no" || error "run_llverfs error on $fstype"
+
+ # Unmount the OST.
+ log "unmount the OST $dev"
+ stop ost1
+
+ # Delete file IO_scrub. Later osd_scrub_setup will try to
+ # create "IO_scrub" but will get ENOSPACE.
+ writeconf_all
+ echo "start ost1 service on `facet_active_host ost1`"
+ start ost1 `ostdevname 1` $OST_MOUNT_OPTS
+
+ local err
+ err=$(do_facet ost1 dmesg | grep "VFS: Busy inodes after unmount of")
+ echo "string err $err"
+ [ -z "$err" ] || error $err
+ reformat
+}
+run_test 83 "ENOSPACE on OST doesn't cause message VFS: \
+Busy inodes after unmount ..."
+
+recovery_time_min() {
+ local CONNECTION_SWITCH_MIN=5
+ local CONNECTION_SWITCH_INC=5
+ local CONNECTION_SWITCH_MAX
+ local RECONNECT_DELAY_MAX
+ local INITIAL_CONNECT_TIMEOUT
+ local max
+ local TO_20
+
+ #CONNECTION_SWITCH_MAX=min(50, max($CONNECTION_SWITCH_MIN,$TIMEOUT)
+ (($CONNECTION_SWITCH_MIN>$TIMEOUT)) && \
+ max=$CONNECTION_SWITCH_MIN || max=$TIMEOUT
+ (($max<50)) && CONNECTION_SWITCH_MAX=$max || CONNECTION_SWITCH_MAX=50
+
+ #INITIAL_CONNECT_TIMEOUT = max(CONNECTION_SWITCH_MIN, \
+ #obd_timeout/20)
+ TO_20=$(($TIMEOUT/20))
+ (($CONNECTION_SWITCH_MIN>$TO_20)) && \
+ INITIAL_CONNECT_TIMEOUT=$CONNECTION_SWITCH_MIN || \
+ INITIAL_CONNECT_TIMEOUT=$TO_20
+
+ RECONNECT_DELAY_MAX=$(($CONNECTION_SWITCH_MAX+$CONNECTION_SWITCH_INC+ \
+ $INITIAL_CONNECT_TIMEOUT))
+ echo $((2*$RECONNECT_DELAY_MAX))
+}
+
+test_84() {
+ local facet=$SINGLEMDS
+ local num=$(echo $facet | tr -d "mds")
+ local dev=$(mdsdevname $num)
+ local time_min=$(recovery_time_min)
+ local recovery_duration
+ local completed_clients
+ local correct_clients
+ local wrap_up=5
+
+ load_modules
+ echo "start mds service on $(facet_active_host $facet)"
+ start_mds \
+ "-o recovery_time_hard=$time_min,recovery_time_soft=$time_min" $@ ||
+ error "start MDS failed"
+
+ start_ost
+ start_ost2
+
+ echo "recovery_time=$time_min, timeout=$TIMEOUT, wrap_up=$wrap_up"
+
+ mount_client $MOUNT1 || error "mount failed"
+ mount_client $MOUNT2 || error "mount failed"
+
+ replay_barrier $SINGLEMDS
+ createmany -o $DIR1/$tfile-%d 1000
+
+ # We need to catch the end of recovery window to extend it.
+ # Skip 5 requests and add delay to request handling.
+ #define OBD_FAIL_TGT_REPLAY_DELAY 0x709 | FAIL_SKIP
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x20000709 fail_val=5"
+
+ facet_failover --fsck $SINGLEMDS || error "failover: $?"
+ client_up
+
+ echo "recovery status"
+ do_facet $SINGLEMDS \
+ "$LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_status"
+
+ recovery_duration=$(do_facet $SINGLEMDS \
+ "$LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_status" |
+ awk '/recovery_duration/ { print $2 }')
+ (( $recovery_duration > $time_min + $wrap_up )) &&
+ error "recovery_duration > recovery_time_hard + wrap up"
+ completed_clients=$(do_facet $SINGLEMDS \
+ "$LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_status" |
+ awk '/completed_clients/ { print $2 }')
+
+ correct_clients="$MDSCOUNT/$((MDSCOUNT+1))"
+ [ "$completed_clients" = "${correct_clients}" ] ||
+ error "$completed_clients != $correct_clients"
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ umount_client $MOUNT1
+ umount_client $MOUNT2
+
+ stop_ost
+ stop_ost2
+ stop_mds
+}
+run_test 84 "check recovery_hard_time"
+
+test_85() {
+ [[ $(lustre_version_code ost1) -ge $(version_code 2.7.55) ]] ||
+ { skip "Need OST version at least 2.7.55" && return 0; }
+##define OBD_FAIL_OSD_OST_EA_FID_SET 0x197
+ do_facet ost1 "lctl set_param fail_loc=0x197"
+ start_ost
+ stop_ost
+}
+run_test 85 "osd_ost init: fail ea_fid_set"
+
+test_86() {
+ [ "$(facet_fstype ost1)" = "zfs" ] &&
+ skip "LU-6442: no such mkfs params for ZFS OSTs" && return
+
+ local OST_OPTS="$(mkfs_opts ost1 $(ostdevname 1)) \
+ --reformat $(ostdevname 1) $(ostvdevname 1)"
+
+ local NEWSIZE=1024
+ local OLDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" |
+ awk '/Flex block group size: / { print $NF; exit; }')
+
+ local opts=OST_OPTS
+ if [[ ${!opts} != *mkfsoptions* ]]; then
+ eval opts=\"${!opts} \
+ --mkfsoptions='\\\"-O flex_bg -G $NEWSIZE\\\"'\"
+ else
+ val=${!opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O flex_bg -G $NEWSIZE }
+ eval opts='${val}'
+ fi
+
+ echo "params: $opts"
+
+ add ost1 $opts || error "add ost1 failed with new params"
+
+ local FOUNDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" |
+ awk '/Flex block group size: / { print $NF; exit; }')
+
+ [[ $FOUNDSIZE == $NEWSIZE ]] ||
+ error "Flex block group size: $FOUNDSIZE, expected: $NEWSIZE"
+ return 0
+}
+run_test 86 "Replacing mkfs.lustre -G option"
+
+test_87() { #LU-6544
+ [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.7.56) ]] ||
+ { skip "Need MDS version at least 2.7.56" && return; }
+ [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
+ { skip "Only applicable to ldiskfs-based MDTs" && return; }
+ [[ $OSTCOUNT -gt 69 ]] &&
+ { skip "Ignore wide striping situation" && return; }
+
+ local mdsdev=$(mdsdevname 1)
+ local mdsvdev=$(mdsvdevname 1)
+ local file=$DIR/$tfile
+ local mntpt=$(facet_mntpt $SINGLEMDS)
+ local used_xattr_blk=0
+ local inode_size=${1:-512}
+ local left_size=0
+ local xtest="trusted.test"
+ local value
+ local orig
+ local i
+
+ #Please see LU-6544 for MDT inode size calculation
+ if [ $OSTCOUNT -gt 26 ]; then
+ inode_size=2048
+ elif [ $OSTCOUNT -gt 5 ]; then
+ inode_size=1024
+ fi
+ left_size=$(expr $inode_size - \
+ 156 - \
+ 32 - \
+ 32 - $OSTCOUNT \* 24 - 16 - 3 - \
+ 24 - 16 - 3 - \
+ 24 - 18 - $(expr length $tfile) - 16 - 4)
+ if [ $left_size -le 0 ]; then
+ echo "No space($left_size) is expected in inode."
+ echo "Try 1-byte xattr instead to verify this."
+ left_size=1
+ else
+ echo "Estimate: at most $left_size-byte space left in inode."
+ fi
+
+ unload_modules
+ reformat
+
+ add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$OSTCOUNT \
+ --reformat $mdsdev $mdsvdev || error "add mds1 failed"
+ start_mdt 1 > /dev/null || error "start mdt1 failed"
+ for i in $(seq $OSTCOUNT); do
+ start ost$i $(ostdevname $i) $OST_MOUNT_OPTS > /dev/null ||
+ error "start ost$i failed"
+ done
+ mount_client $MOUNT > /dev/null || error "mount client $MOUNT failed"
+ check_mount || error "check client $MOUNT failed"
+
+ #set xattr
+ $SETSTRIPE -c -1 $file || error "$SETSTRIPE -c -1 $file failed"
+ $GETSTRIPE $file || error "$GETSTRIPE $file failed"
+ i=$($GETSTRIPE -c $file)
+ if [ $i -ne $OSTCOUNT ]; then
+ left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24)
+ echo -n "Since only $i out $OSTCOUNT OSTs are used, "
+ echo -n "the expected left space is changed to "
+ echo "$left_size bytes at most."
+ fi
+ value=$(generate_string $left_size)
+ setfattr -n $xtest -v $value $file
+ orig=$(get_xattr_value $xtest $file)
+ [[ "$orig" != "$value" ]] && error "$xtest changed"
+
+ #Verify if inode has some expected space left
+ umount $MOUNT > /dev/null || error "umount $MOUNT failed"
+ stop_mdt 1 > /dev/null || error "stop mdt1 failed"
+ mount_ldiskfs $SINGLEMDS || error "mount -t ldiskfs $SINGLEMDS failed"
+
+ do_facet $SINGLEMDS ls -sal $mntpt/ROOT/$tfile
+ used_xattr_blk=$(do_facet $SINGLEMDS ls -s $mntpt/ROOT/$tfile |
+ awk '{ print $1 }')
+ [[ $used_xattr_blk -eq 0 ]] &&
+ error "Please check MDS inode size calculation: \
+ more than $left_size-byte space left in inode."
+ echo "Verified: at most $left_size-byte space left in inode."
+
+ stopall
+}
+run_test 87 "check if MDT inode can hold EAs with N stripes properly"
+
+# $1 test directory
+# $2 (optional) value of max_mod_rpcs_in_flight to set
+check_max_mod_rpcs_in_flight() {
+ local dir="$1"
+ local mmr="$2"
+ local idx
+ local facet
+ local tmp
+ local i
+
+ idx=$(printf "%04x" $($LFS getdirstripe -i $dir))
+ facet="mds$((0x$idx + 1))"
+
+ if [ -z "$mmr" ]; then
+ # get value of max_mod_rcps_in_flight
+ mmr=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight) ||
+ error "Unable to get max_mod_rpcs_in_flight"
+ echo "max_mod_rcps_in_flight is $mmr"
+ else
+ # set value of max_mod_rpcs_in_flight
+ $LCTL set_param \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight=$mmr ||
+ error "Unable to set max_mod_rpcs_in_flight to $mmr"
+ echo "max_mod_rpcs_in_flight set to $mmr"
+ fi
+
+ # create mmr+1 files
+ echo "creating $((mmr + 1)) files ..."
+ umask 0022
+ for i in $(seq $((mmr + 1))); do
+ touch $dir/file-$i
+ done
+
+ ### part 1 ###
+
+ # consumes mmr-1 modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $((mmr - 1)) chmod in parallel ..."
+ for i in $(seq $((mmr - 1))); do
+ chmod 0600 $dir/file-$i &
+ done
+ sleep 1
+
+ # send one additional modify RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional chmod in parallel ..."
+ chmod 0600 $dir/file-$mmr &
+ sleep 1
+
+ # check this additional modify RPC get a modify RPC slot
+ # and succeed its operation
+ checkstat -vp 0600 $dir/file-$mmr ||
+ error "Unable to send $mmr modify RPCs in parallel"
+ wait
+
+ ### part 2 ###
+
+ # consumes mmr modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $mmr chmod in parallel ..."
+ for i in $(seq $mmr); do
+ chmod 0666 $dir/file-$i &
+ done
+ sleep 1
+
+ # send one additional modify RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional chmod in parallel ..."
+ chmod 0666 $dir/file-$((mmr + 1)) &
+ sleep 1
+
+ # check this additional modify RPC blocked getting a modify RPC slot
+ checkstat -vp 0644 $dir/file-$((mmr + 1)) ||
+ error "Unexpectedly send $(($mmr + 1)) modify RPCs in parallel"
+ wait
+}
+
+test_90a() {
+ reformat
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ # check default value
+ $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ check_max_mod_rpcs_in_flight $DIR/$tdir
+
+ cleanup
+}
+run_test 90a "check max_mod_rpcs_in_flight is enforced"
+
+test_90b() {
+ local idx
+ local facet
+ local tmp
+ local mmrpc
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ ### test 1.
+ # update max_mod_rpcs_in_flight
+ $LFS mkdir -c1 $DIR/${tdir}1 || error "mkdir $DIR/${tdir}1 failed"
+ check_max_mod_rpcs_in_flight $DIR/${tdir}1 1
+
+ ### test 2.
+ # check client is able to send multiple modify RPCs in paralell
+ tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import |
+ grep -c "multi_mod_rpcs")
+ if [ "$tmp" -ne $MDSCOUNT ]; then
+ echo "Client not able to send multiple modify RPCs in parallel"
+ cleanup
+ return
+ fi
+
+ # update max_mod_rpcs_in_flight
+ $LFS mkdir -c1 $DIR/${tdir}2 || error "mkdir $DIR/${tdir}2 failed"
+ check_max_mod_rpcs_in_flight $DIR/${tdir}2 5
+
+ ### test 3.
+ $LFS mkdir -c1 $DIR/${tdir}3 || error "mkdir $DIR/${tdir}3 failed"
+ idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/${tdir}3))
+ facet="mds$((0x$idx + 1))"
+
+ # save MDT max_mod_rpcs_per_client
+ mmrpc=$(do_facet $facet \
+ cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
+
+ # update max_mod_rpcs_in_flight
+ umount_client $MOUNT
+ do_facet $facet \
+ "echo 16 > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+ $LCTL set_param mdc.$FSNAME-MDT$idx-mdc-*.max_rpcs_in_flight=17
+ check_max_mod_rpcs_in_flight $DIR/${tdir}3 16
+
+ # restore MDT max_mod_rpcs_per_client initial value
+ do_facet $facet \
+ "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+
+ rm -rf $DIR/${tdir}?
+ cleanup
+}
+run_test 90b "check max_mod_rpcs_in_flight is enforced after update"
+
+test_90c() {
+ local tmp
+ local mrif
+ local mmrpc
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ # check client is able to send multiple modify RPCs in paralell
+ tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import |
+ grep -c "multi_mod_rpcs")
+ if [ "$tmp" -ne $MDSCOUNT ]; then
+ skip "Client not able to send multiple modify RPCs in parallel"
+ cleanup
+ return
+ fi
+
+ # get max_rpcs_in_flight value
+ mrif=$($LCTL get_param -n mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight)
+ echo "max_rpcs_in_flight is $mrif"
+
+ # get MDT max_mod_rpcs_per_client
+ mmrpc=$(do_facet mds1 \
+ cat /sys/module/mdt/parameters/max_mod_rpcs_per_client)
+ echo "max_mod_rpcs_per_client is $mmrpc"
+
+ # testcase 1
+ # attempt to set max_mod_rpcs_in_flight to max_rpcs_in_flight value
+ # prerequisite: set max_mod_rpcs_per_client to max_rpcs_in_flight value
+ umount_client $MOUNT
+ do_facet mds1 \
+ "echo $mrif > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$mrif &&
+ error "set max_mod_rpcs_in_flight to $mrif should fail"
+
+ umount_client $MOUNT
+ do_facet mds1 \
+ "echo $mmrpc > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
+ mount_client $MOUNT
+
+ # testcase 2
+ # attempt to set max_mod_rpcs_in_flight to max_mod_rpcs_per_client+1
+ # prerequisite: set max_rpcs_in_flight to max_mod_rpcs_per_client+2
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_rpcs_in_flight=$((mmrpc + 2))
+
+ $LCTL set_param \
+ mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight=$((mmrpc + 1)) &&
+ error "set max_mod_rpcs_in_flight to $((mmrpc + 1)) should fail"
+
+ cleanup
+}
+run_test 90c "check max_mod_rpcs_in_flight update limits"
+
+test_90d() {
+ local idx
+ local facet
+ local mmr
+ local i
+ local pid
+
+ setup
+
+ [[ $($LCTL get_param mdc.*.import |
+ grep "connect_flags:.*multi_mod_rpc") ]] ||
+ { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
+
+ $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
+ facet="mds$((0x$idx + 1))"
+
+ # check client version supports multislots
+ tmp=$($LCTL get_param -N \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
+ if [ -z "$tmp" ]; then
+ skip "Client does not support multiple modify RPCs in flight"
+ cleanup
+ return
+ fi
+
+ # get current value of max_mod_rcps_in_flight
+ mmr=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
+ echo "max_mod_rcps_in_flight is $mmr"
+
+ # create mmr files
+ echo "creating $mmr files ..."
+ umask 0022
+ for i in $(seq $mmr); do
+ touch $DIR/$tdir/file-$i
+ done
+
+ # prepare for close RPC
+ multiop_bg_pause $DIR/$tdir/file-close O_c
+ pid=$!
+
+ # consumes mmr modify RPC slots
+ #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
+ # drop requests on MDT so that RPC slots are consumed
+ # during all the request resend interval
+ do_facet $facet "$LCTL set_param fail_loc=0x159"
+ echo "launch $mmr chmod in parallel ..."
+ for i in $(seq $mmr); do
+ chmod 0600 $DIR/$tdir/file-$i &
+ done
+
+ # send one additional close RPC
+ do_facet $facet "$LCTL set_param fail_loc=0"
+ echo "launch 1 additional close in parallel ..."
+ kill -USR1 $pid
+ cancel_lru_locks mdc
+ sleep 1
+
+ # check this additional close RPC get a modify RPC slot
+ # and multiop process completed
+ [ -d /proc/$pid ] &&
+ error "Unable to send the additional close RPC in parallel"
+ wait
+ rm -rf $DIR/$tdir
+ cleanup
+}
+run_test 90d "check one close RPC is allowed above max_mod_rpcs_in_flight"
+
+check_uuid_on_ost() {
+ local nid=$1
+ do_facet ost1 "$LCTL get_param obdfilter.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+check_uuid_on_mdt() {
+ local nid=$1
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+}
+
+test_91() {
+ local uuid
+ local nid
+ local found
+
+ load_modules
+ start_mds || error "MDS start failed"
+ start_ost || error "unable to start OST"
+ mount_client $MOUNT || error "client start failed"
+ check_mount || error "check_mount failed"
+
+ if remote_mds; then
+ nid=$($LCTL list_nids | head -1 | sed "s/\./\\\./g")
+ else
+ nid="0@lo"
+ fi
+ uuid=$(get_client_uuid $MOUNT)
+
+ echo "list nids on mdt:"
+ do_facet $SINGLEMDS "$LCTL list_param mdt.${FSNAME}*.exports.*"
+ echo "uuid from $nid:"
+ do_facet $SINGLEMDS "$LCTL get_param mdt.${FSNAME}*.exports.'$nid'.uuid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid on OST"
+
+ # umount the client so it won't reconnect
+ manual_umount_client --force || error "failed to umount $?"
+ # shouldn't disappear on MDS after forced umount
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -z "$found" ] && error "can't find $uuid $nid"
+
+ echo "evict $nid"
+ do_facet $SINGLEMDS \
+ "$LCTL set_param -n mdt.${mds1_svc}.evict_client nid:$nid"
+
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ # check it didn't reconnect (being umounted)
+ sleep $((TIMEOUT+1))
+ found=$(check_uuid_on_mdt $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on MDT"
+ found=$(check_uuid_on_ost $nid | grep $uuid)
+ [ -n "$found" ] && error "found $uuid $nid on OST"
+
+ cleanup
+}
+run_test 91 "evict-by-nid support"
+