# bug number: LU-8411 LU-9054
ALWAYS_EXCEPT+=" 407 312"
-if $SHARED_KEY; then
- # bug number: LU-9795 LU-9795 LU-9795 LU-9795
- ALWAYS_EXCEPT+=" 17n 60a 133g 300f"
-fi
-
selinux_status=$(getenforce)
if [ "$selinux_status" != "Disabled" ]; then
# bug number:
ALWAYS_EXCEPT+=" $GRANT_CHECK_LIST"
# bug number: LU-11671 LU-11667
ALWAYS_EXCEPT+=" 45 317"
+ # bug number: LU-14067 LU-14067
+ ALWAYS_EXCEPT+=" 400a 400b"
fi
+# skip splice tests on kernels >= 4.15.0 until they are fixed
+if [ $LINUX_VERSION_CODE -ge $(version_code 4.15.0) ]; then
+ # bug number: LU-14045
+ ALWAYS_EXCEPT+=" 426"
+fi
# skip nfs tests on kernels >= 4.12.0 until they are fixed
if [ $LINUX_VERSION_CODE -ge $(version_code 4.12.0) ]; then
# bug number: LU-12661
}
run_test 24F "hash order vs readdir (LU-11330)"
+test_24G () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+
+ local ino1
+ local ino2
+
+ $LFS mkdir -i 0 $DIR/$tdir-0 || error "mkdir $tdir-0"
+ $LFS mkdir -i 1 $DIR/$tdir-1 || error "mkdir $tdir-1"
+ touch $DIR/$tdir-0/f1 || error "touch f1"
+ ln -s $DIR/$tdir-0/f1 $DIR/$tdir-0/s1 || error "ln s1"
+ ino1=$(stat -c%i $DIR/$tdir-0/s1)
+ mv $DIR/$tdir-0/s1 $DIR/$tdir-1 || error "mv s1"
+ ino2=$(stat -c%i $DIR/$tdir-1/s1)
+ [ $ino1 -ne $ino2 ] || error "s1 should be migrated"
+}
+run_test 24G "migrate symlink in rename"
+
test_25a() {
echo '== symlink sanity ============================================='
[[ $MDS1_VERSION -gt $(version_code 2.12.58) ]] ||
skip "MDS < 2.12.58 doesn't return LSOM data"
local dir=$DIR/$tdir
+ local old_agl=$($LCTL get_param -n llite.*.statahead_agl)
+
+ [[ $OSC == "mdc" ]] && skip "statahead not needed for DoM files"
- [[ $OSC == "mdc" ]] && skip "DoM files" && return
+ # statahead_agl may cause extra glimpse which confuses results. LU-13017
+ $LCTL set_param -n llite.*.statahead_agl=0
+ stack_trap "$LCTL set_param -n llite.*.statahead_agl=$old_agl"
setup_56 $dir $NUMFILES $NUMDIRS "-c 1"
# open and close all files to ensure LSOM is updated
error "cannot setstripe 20MB file"
echo "done"
echo -n "Sizing 20MB test file..."
- truncate "$dir/20mb" 20971520 || error "cannot create 20MB test file"
+ $TRUNCATE "$dir/20mb" 20971520 || error "cannot create 20MB test file"
echo "done"
echo -n "Verifying small file autostripe count is 1..."
$LFS_MIGRATE -y -A -C 1 "$dir/20mb" ||
echo "done"
echo -n "Sizing 1GB test file..."
# File size is 1GB + 3KB
- truncate "$dir/1gb" 1073744896 || error "cannot create 1GB test file"
+ $TRUNCATE "$dir/1gb" 1073744896 || error "cannot create 1GB test file"
echo "done"
# need at least 512MB per OST for 1GB file to fit in 2 stripes
do_facet mds$index $LCTL set_param fail_loc=0x8000019a \
> /dev/null
- usleep 100
+ sleep 0.01
done
kill -9 $pid
}
run_test 74c "ldlm_lock_create error path, (shouldn't LBUG)"
-num_inodes() {
- [ -f /sys/kernel/slab/lustre_inode_cache/shrink ] &&
- echo 1 > /sys/kernel/slab/lustre_inode_cache/shrink
- awk '/lustre_inode_cache/ {print $2; exit}' /proc/slabinfo
+slab_lic=/sys/kernel/slab/lustre_inode_cache
+num_objects() {
+ [ -f $slab_lic/shrink ] && echo 1 > $slab_lic/shrink
+ [ -f $slab_lic/objects ] && awk '{ print $1 }' $slab_lic/objects ||
+ awk '/lustre_inode_cache/ { print $2; exit }' /proc/slabinfo
}
-test_76() { # Now for bug 20433, added originally in bug 1443
+test_76a() { # Now for b=20433, added originally in b=1443
[ $PARALLEL == "yes" ] && skip "skip parallel run"
cancel_lru_locks osc
+ # there may be some slab objects cached per core
local cpus=$(getconf _NPROCESSORS_ONLN 2>/dev/null)
- local before=$(num_inodes)
+ local before=$(num_objects)
local count=$((512 * cpus))
- [ "$SLOW" = "no" ] && count=$((64 * cpus))
+ [ "$SLOW" = "no" ] && count=$((128 * cpus))
+ local margin=$((count / 10))
+ if [[ -f $slab_lic/aliases ]]; then
+ local aliases=$(cat $slab_lic/aliases)
+ (( aliases > 0 )) && margin=$((margin * aliases))
+ fi
- echo "before inodes: $before"
+ echo "before slab objects: $before"
for i in $(seq $count); do
touch $DIR/$tfile
rm -f $DIR/$tfile
done
cancel_lru_locks osc
- local after=$(num_inodes)
- echo "after inodes: $after"
- while (( after > before + 8 * ${cpus:-1} )); do
+ local after=$(num_objects)
+ echo "created: $count, after slab objects: $after"
+ # shared slab counts are not very accurate, allow significant margin
+ # the main goal is that the cache growth is not permanently > $count
+ while (( after > before + margin )); do
sleep 1
- after=$(num_inodes)
+ after=$(num_objects)
wait=$((wait + 1))
- (( wait % 5 == 0 )) && echo "wait $wait seconds inodes: $after"
- if (( wait > 30 )); then
- error "inode slab grew from $before to $after"
+ (( wait % 5 == 0 )) && echo "wait $wait seconds objects: $after"
+ if (( wait > 60 )); then
+ error "inode slab grew from $before+$margin to $after"
fi
done
}
-run_test 76 "confirm clients recycle inodes properly ===="
+run_test 76a "confirm clients recycle inodes properly ===="
+
+test_76b() {
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+ [ $CLIENT_VERSION -ge $(version_code 2.13.55) ] || skip "not supported"
+
+ local count=512
+ local before=$(num_objects)
+
+ for i in $(seq $count); do
+ mkdir $DIR/$tdir
+ rmdir $DIR/$tdir
+ done
+
+ local after=$(num_objects)
+ local wait=0
+
+ while (( after > before )); do
+ sleep 1
+ after=$(num_objects)
+ wait=$((wait + 1))
+ (( wait % 5 == 0 )) && echo "wait $wait seconds objects: $after"
+ if (( wait > 60 )); then
+ error "inode slab grew from $before to $after"
+ fi
+ done
+ echo "slab objects before: $before, after: $after"
+}
+run_test 76b "confirm clients recycle directory inodes properly ===="
export ORIG_CSUM=""
set_checksums()
sed -n '/pages per rpc/,/^$/p' |
awk '/'$pages':/ { reads += $2; writes += $6 }; \
END { print reads,writes }'))
- [ ${rpcs[0]} -ne $count ] && error "${rpcs[0]} != $count read RPCs" &&
- return 5
- [ ${rpcs[1]} -ne $count ] && error "${rpcs[1]} != $count write RPCs" &&
- return 6
-
- return 0
+ # allow one extra full-sized read RPC for async readahead
+ [[ ${rpcs[0]} == $count || ${rpcs[0]} == $((count + 1)) ]] ||
+ { error "${rpcs[0]} != $count read RPCs"; return 5; }
+ [[ ${rpcs[1]} == $count ]] ||
+ { error "${rpcs[1]} != $count write RPCs"; return 6; }
}
test_101g() {
skip "Limit is too small $LIMIT"
fi
- # Make LVF so higher that sleeping for $SLEEP is enough to _start_
- # killing locks. Some time was spent for creating locks. This means
- # that up to the moment of sleep finish we must have killed some of
- # them (10-100 locks). This depends on how fast ther were created.
- # Many of them were touched in almost the same moment and thus will
- # be killed in groups.
- local LVF=$(($MAX_HRS * 60 * 60 / $SLEEP * $LIMIT / $LRU_SIZE))
-
- # Use $LRU_SIZE_B here to take into account real number of locks
- # created in the case of CMD, LRU_SIZE_B != $NR in most of cases
- local LRU_SIZE_B=$LRU_SIZE
- log "LVF=$LVF"
+ # Make LVF so higher that sleeping for $SLEEP is enough to _start_
+ # killing locks. Some time was spent for creating locks. This means
+ # that up to the moment of sleep finish we must have killed some of
+ # them (10-100 locks). This depends on how fast ther were created.
+ # Many of them were touched in almost the same moment and thus will
+ # be killed in groups.
+ local LVF=$(($MAX_HRS * 60 * 60 / $SLEEP * $LIMIT / $LRU_SIZE * 100))
+
+ # Use $LRU_SIZE_B here to take into account real number of locks
+ # created in the case of CMD, LRU_SIZE_B != $NR in most of cases
+ local LRU_SIZE_B=$LRU_SIZE
+ log "LVF=$LVF"
local OLD_LVF=$($LCTL get_param -n $NSDIR.pool.lock_volume_factor)
log "OLD_LVF=$OLD_LVF"
$LCTL set_param -n $NSDIR.pool.lock_volume_factor $LVF
}
run_test 150d "Verify fallocate Size and Blocks - Non zero start"
+test_150e() {
+ [ "$ost1_FSTYPE" != ldiskfs ] && skip "non-ldiskfs backend"
+ [ $OST1_VERSION -ge $(version_code 2.13.55) ] ||
+ skip "Need OST version at least 2.13.55"
+
+ echo "df before:"
+ $LFS df
+ $LFS setstripe -c${OSTCOUNT} $DIR/$tfile ||
+ error "$LFS setstripe -c${OSTCOUNT} $DIR/$tfile failed"
+
+ # Find OST with Minimum Size
+ min_size_ost=$($LFS df | awk "/$FSNAME-OST/ { print \$4 }" |
+ sort -un | head -1)
+
+ # Get 90% of the available space
+ local space=$(((min_size_ost * 90)/100 * OSTCOUNT))
+
+ fallocate -l${space}k $DIR/$tfile ||
+ error "fallocate ${space}k $DIR/$tfile failed"
+ echo "'fallocate -l ${space}k $DIR/$tfile' succeeded"
+
+ # get size immediately after fallocate. This should be correctly
+ # updated
+ local size=$(stat -c '%s' $DIR/$tfile)
+ local used=$(( $(stat -c '%b * %B' $DIR/$tfile) / 1024))
+
+ # Sleep for a while for statfs to get updated. And not pull from cache.
+ sleep 2
+
+ echo "df after fallocate:"
+ $LFS df
+
+ (( size / 1024 == space )) || error "size $size != requested $space"
+ [ "$ost1_FSTYPE" != ldiskfs ] || (( used >= space )) ||
+ error "used $used < space $space"
+
+ rm $DIR/$tfile || error "rm failed"
+ sync
+ wait_delete_completed
+
+ echo "df after unlink:"
+ $LFS df
+}
+run_test 150e "Verify 90% of available OST space consumed by fallocate"
+
#LU-2902 roc_hit was not able to read all values from lproc
function roc_hit_init() {
local list=$(comma_list $(osts_nodes))
dd if=$ref1 of=$file1 bs=16k &
local DD_PID=$!
- # Make sure dd starts to copy file
- while [ ! -f $file1 ]; do sleep 0.1; done
+ # Make sure dd starts to copy file, but wait at most 5 seconds
+ local loops=0
+ while [ ! -s $file1 -a $((loops++)) -lt 50 ]; do sleep 0.1; done
$LFS swap_layouts $file1 $file2
local rc=$?
error "Unexpected jobids when jobid_var=$JOBENV"
fi
- lctl set_param jobid_var=USER jobid_name="S.%j.%e.%u.%h.E"
- JOBENV="JOBCOMPLEX"
- JOBCOMPLEX="S.$USER.touch.$(id -u).$(hostname).E"
+ # test '%j' access to environment variable - if supported
+ if lctl set_param jobid_var=USER jobid_name="S.%j.%e.%u.%h.E"; then
+ JOBENV="JOBCOMPLEX"
+ JOBCOMPLEX="S.$USER.touch.$(id -u).$(hostname).E"
+
+ verify_jobstats "touch $DIR/$tfile" $SINGLEMDS
+ fi
+
+ # test '%j' access to per-session jobid - if supported
+ if lctl list_param jobid_this_session > /dev/null 2>&1
+ then
+ lctl set_param jobid_var=session jobid_name="S.%j.%e.%u.%h.E"
+ lctl set_param jobid_this_session=$USER
+
+ JOBENV="JOBCOMPLEX"
+ JOBCOMPLEX="S.$USER.touch.$(id -u).$(hostname).E"
- verify_jobstats "touch $DIR/$tfile" $SINGLEMDS
+ verify_jobstats "touch $DIR/$tfile" $SINGLEMDS
+ fi
}
run_test 205a "Verify job stats"
test_205b() {
job_stats="mdt.*.job_stats"
$LCTL set_param $job_stats=clear
- $LCTL set_param jobid_var=USER jobid_name="%e.%u"
+ # Setting jobid_var to USER might not be supported
+ $LCTL set_param jobid_var=USER || true
+ $LCTL set_param jobid_name="%e.%u"
env -i USERTESTJOBSTATS=foolish touch $DIR/$tfile.1
do_facet $SINGLEMDS $LCTL get_param $job_stats |
grep "job_id:.*foolish" &&
local stripe_index
local nr_files
+ # test with fewer files on ZFS
+ [ "$mds1_FSTYPE" == "zfs" ] && threshold=40
+
stack_trap "do_nodes $mdts $LCTL set_param \
mdt.*.dir_split_count=$saved_threshold"
stack_trap "do_nodes $mdts $LCTL set_param \
return 0
lowest_speedup=$(bc <<<"scale=2; $average_cache / 2")
- [ ${average_ladvise%.*} -gt $lowest_speedup ] ||
+ [[ ${average_ladvise%.*} > $lowest_speedup ]] ||
error_not_in_vm "Speedup with willread is less than " \
"$lowest_speedup%, got $average_ladvise%"
}
aiocp -a $PAGE_SIZE -b 64M -s 64M -f O_DIRECT $DIR/$tfile $aio_file
diff $DIR/$tfile $aio_file || "file diff after aiocp"
+
+ # make sure we don't crash and fail properly
+ aiocp -a 512 -b 64M -s 64M -f O_DIRECT $DIR/$tfile $aio_file &&
+ error "aio not aligned with PAGE SIZE should fail"
+
rm -rf $DIR/$tfile $aio_file
}
run_test 398d "run aiocp to verify block size > stripe size"
[ $blocks -gt 1000 ] && blocks=1000 # 1G in maximum
if [ "$read_write" = "read" ]; then
- truncate -s $(expr 1048576 \* $blocks) $DIR/$tfile
+ $TRUNCATE $DIR/$tfile $(expr 1048576 \* $blocks)
fi
local start_time=$(date +%s.%N)
run_test 401a "Verify if 'lctl list_param -R' can list parameters recursively"
test_401b() {
- local save=$($LCTL get_param -n jobid_var)
- local tmp=testing
+ # jobid_var may not allow arbitrary values, so use jobid_name
+ # if available
+ if $LCTL list_param jobid_name > /dev/null 2>&1; then
+ local testname=jobid_name tmp='testing%p'
+ else
+ local testname=jobid_var tmp=testing
+ fi
+
+ local save=$($LCTL get_param -n $testname)
- $LCTL set_param foo=bar jobid_var=$tmp bar=baz &&
+ $LCTL set_param foo=bar $testname=$tmp bar=baz &&
error "no error returned when setting bad parameters"
- local jobid_new=$($LCTL get_param -n foe jobid_var baz)
+ local jobid_new=$($LCTL get_param -n foe $testname baz)
[[ "$jobid_new" == "$tmp" ]] || error "jobid tmp $jobid_new != $tmp"
- $LCTL set_param -n fog=bam jobid_var=$save bat=fog
- local jobid_old=$($LCTL get_param -n foe jobid_var bag)
+ $LCTL set_param -n fog=bam $testname=$save bat=fog
+ local jobid_old=$($LCTL get_param -n foe $testname bag)
[[ "$jobid_old" == "$save" ]] || error "jobid new $jobid_old != $save"
}
run_test 401b "Verify 'lctl {get,set}_param' continue after error"
test_401c() {
- local jobid_var_old=$($LCTL get_param -n jobid_var)
+ # jobid_var may not allow arbitrary values, so use jobid_name
+ # if available
+ if $LCTL list_param jobid_name > /dev/null 2>&1; then
+ local testname=jobid_name
+ else
+ local testname=jobid_var
+ fi
+
+ local jobid_var_old=$($LCTL get_param -n $testname)
local jobid_var_new
- $LCTL set_param jobid_var= &&
+ $LCTL set_param $testname= &&
error "no error returned for 'set_param a='"
- jobid_var_new=$($LCTL get_param -n jobid_var)
+ jobid_var_new=$($LCTL get_param -n $testname)
[[ "$jobid_var_old" == "$jobid_var_new" ]] ||
- error "jobid_var was changed by setting without value"
+ error "$testname was changed by setting without value"
- $LCTL set_param jobid_var &&
+ $LCTL set_param $testname &&
error "no error returned for 'set_param a'"
- jobid_var_new=$($LCTL get_param -n jobid_var)
+ jobid_var_new=$($LCTL get_param -n $testname)
[[ "$jobid_var_old" == "$jobid_var_new" ]] ||
- error "jobid_var was changed by setting without value"
+ error "$testname was changed by setting without value"
}
run_test 401c "Verify 'lctl set_param' without value fails in either format."
test_401d() {
- local jobid_var_old=$($LCTL get_param -n jobid_var)
+ # jobid_var may not allow arbitrary values, so use jobid_name
+ # if available
+ if $LCTL list_param jobid_name > /dev/null 2>&1; then
+ local testname=jobid_name new_value='foo=bar%p'
+ else
+ local testname=jobid_var new_valuie=foo=bar
+ fi
+
+ local jobid_var_old=$($LCTL get_param -n $testname)
local jobid_var_new
- local new_value="foo=bar"
- $LCTL set_param jobid_var=$new_value ||
+ $LCTL set_param $testname=$new_value ||
error "'set_param a=b' did not accept a value containing '='"
- jobid_var_new=$($LCTL get_param -n jobid_var)
+ jobid_var_new=$($LCTL get_param -n $testname)
[[ "$jobid_var_new" == "$new_value" ]] ||
error "'set_param a=b' failed on a value containing '='"
- # Reset the jobid_var to test the other format
- $LCTL set_param jobid_var=$jobid_var_old
- jobid_var_new=$($LCTL get_param -n jobid_var)
+ # Reset the $testname to test the other format
+ $LCTL set_param $testname=$jobid_var_old
+ jobid_var_new=$($LCTL get_param -n $testname)
[[ "$jobid_var_new" == "$jobid_var_old" ]] ||
- error "failed to reset jobid_var"
+ error "failed to reset $testname"
- $LCTL set_param jobid_var $new_value ||
+ $LCTL set_param $testname $new_value ||
error "'set_param a b' did not accept a value containing '='"
- jobid_var_new=$($LCTL get_param -n jobid_var)
+ jobid_var_new=$($LCTL get_param -n $testname)
[[ "$jobid_var_new" == "$new_value" ]] ||
error "'set_param a b' failed on a value containing '='"
- $LCTL set_param jobid_var $jobid_var_old
- jobid_var_new=$($LCTL get_param -n jobid_var)
+ $LCTL set_param $testname $jobid_var_old
+ jobid_var_new=$($LCTL get_param -n $testname)
[[ "$jobid_var_new" == "$jobid_var_old" ]] ||
- error "failed to reset jobid_var"
+ error "failed to reset $testname"
}
run_test 401d "Verify 'lctl set_param' accepts values containing '='"
}
run_test 424 "simulate ENOMEM in ptl_send_rpc bulk reply ME attach"
+test_425() {
+ test_mkdir -c -1 $DIR/$tdir
+ $LFS setstripe -c -1 $DIR/$tdir
+
+ lru_resize_disable "" 100
+ stack_trap "lru_resize_enable" EXIT
+
+ sleep 5
+
+ for i in $(seq $((MDSCOUNT * 125))); do
+ local t=$DIR/$tdir/$tfile_$i
+
+ dd if=/dev/zero of=$t bs=4K count=1 > /dev/null 2>&1 ||
+ error_noexit "Create file $t"
+ done
+ stack_trap "rm -rf $DIR/$tdir" EXIT
+
+ for oscparam in $($LCTL list_param ldlm.namespaces.*osc-[-0-9a-f]*); do
+ local lru_size=$($LCTL get_param -n $oscparam.lru_size)
+ local lock_count=$($LCTL get_param -n $oscparam.lock_count)
+
+ [ $lock_count -le $lru_size ] ||
+ error "osc lock count $lock_count > lru size $lru_size"
+ done
+
+ for mdcparam in $($LCTL list_param ldlm.namespaces.*mdc-*); do
+ local lru_size=$($LCTL get_param -n $mdcparam.lru_size)
+ local lock_count=$($LCTL get_param -n $mdcparam.lock_count)
+
+ [ $lock_count -le $lru_size ] ||
+ error "mdc lock count $lock_count > lru size $lru_size"
+ done
+}
+run_test 425 "lock count should not exceed lru size"
+
+test_426() {
+ splice-test -r $DIR/$tfile
+ splice-test -rd $DIR/$tfile
+ splice-test $DIR/$tfile
+ splice-test -d $DIR/$tfile
+}
+run_test 426 "splice test on Lustre"
+
prep_801() {
[[ $MDS1_VERSION -lt $(version_code 2.9.55) ]] ||
[[ $OST1_VERSION -lt $(version_code 2.9.55) ]] &&
}
run_test 802b "be able to set MDTs to readonly"
-test_803() {
+test_803a() {
[[ $MDSCOUNT -lt 2 ]] && skip_env "needs >= 2 MDTs"
[ $MDS1_VERSION -lt $(version_code 2.10.54) ] &&
skip "MDS needs to be newer than 2.10.54"
[ $after_used -le $((before_used + 1)) ] ||
error "after ($after_used) > before ($before_used) + 1"
}
-run_test 803 "verify agent object for remote object"
+run_test 803a "verify agent object for remote object"
+
+test_803b() {
+ [[ $MDSCOUNT -lt 2 ]] && skip_env "needs >= 2 MDTs"
+ [ $MDS1_VERSION -lt $(version_code 2.13.56) ] &&
+ skip "MDS needs to be newer than 2.13.56"
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+
+ for i in $(seq 0 $((MDSCOUNT - 1))); do
+ $LFS mkdir -i $i $DIR/$tdir.$i || error "mkdir $tdir.$i"
+ done
+
+ local before=0
+ local after=0
+
+ local tmp
+
+ stat $DIR/$tdir.* >/dev/null || error "stat $tdir.*"
+ for i in $(seq 0 $((MDSCOUNT - 1))); do
+ tmp=$(do_facet mds$i $LCTL get_param mdt.*-MDT000$i.md_stats |
+ awk '/getattr/ { print $2 }')
+ before=$((before + tmp))
+ done
+ stat $DIR/$tdir.* >/dev/null || error "stat $tdir.*"
+ for i in $(seq 0 $((MDSCOUNT - 1))); do
+ tmp=$(do_facet mds$i $LCTL get_param mdt.*-MDT000$i.md_stats |
+ awk '/getattr/ { print $2 }')
+ after=$((after + tmp))
+ done
+
+ [ $before -eq $after ] || error "getattr count $before != $after"
+}
+run_test 803b "remote object can getattr from cache"
test_804() {
[[ $MDSCOUNT -lt 2 ]] && skip_env "needs >= 2 MDTs"
test_812a() {
[ $OST1_VERSION -lt $(version_code 2.12.51) ] &&
skip "OST < 2.12.51 doesn't support this fail_loc"
- [ "$SHARED_KEY" = true ] &&
- skip "OSC connections never go IDLE with Shared-Keys enabled"
$LFS setstripe -c 1 -i 0 $DIR/$tfile
# ensure ost1 is connected
test_812b() { # LU-12378
[ $OST1_VERSION -lt $(version_code 2.12.51) ] &&
skip "OST < 2.12.51 doesn't support this fail_loc"
- [ "$SHARED_KEY" = true ] &&
- skip "OSC connections never go IDLE with Shared-Keys enabled"
$LFS setstripe -c 1 -i 0 $DIR/$tfile || error "setstripe failed"
# ensure ost1 is connected
run_test 815 "zero byte tiny write doesn't hang (LU-12382)"
test_816() {
- [ "$SHARED_KEY" = true ] &&
- skip "OSC connections never go IDLE with Shared-Keys enabled"
-
$LFS setstripe -c 1 -i 0 $DIR/$tfile
# ensure ost1 is connected
stat $DIR/$tfile >/dev/null || error "can't stat"