# record size (KBytes) ( 7168 max)
rszlo=${rszlo:-1024}
rszhi=${rszhi:-1024}
+rszmax=${rszmax:-4096}
# number of objects per OST
nobjlo=${nobjlo:-1}
echo $minusn "$*"
}
+version_code() {
+ # split arguments like "2.3.61" into "2", "3", "61"
+ eval set -- $(tr "[:punct:]" " " <<< $*)
+ echo -n "$((($1 << 16) | ($2 << 8) | $3))"
+}
+
+get_lustre_version() {
+ local host=${1:-${unique_hosts[0]}}
+ remote_shell $host $lctl get_param -n version |
+ awk '/^lustre:/ {print $2}'
+}
+
+# Check whether the record size (KBytes) exceeds the maximum bulk I/O RPC size
+# or not.
+check_record_size() {
+ [ $(version_code $(get_lustre_version)) -lt $(version_code 2.3.61) ] &&
+ rszmax=1024
+
+ if [ "$rszhi" -gt "$rszmax" ]; then
+ echo "Test disk case support maximum ${rszmax}KB IO data" \
+ "(rszhi=$rszhi is too big), please use a smaller value."
+ return 1
+ fi
+ return 0
+}
+
# Customisation variables
#####################################################################
# One can change variable values in this section as per requirements
ndevs=$((ndevs+1))
done
if [ $case == "disk" ]; then
- if [ $rszhi -gt 1024 ]; then
- echo "Test disk case support maximum 1024KB IO data" \
- "(rszhi=$rszhi is too big) please use a smaller value."
- exit 1
- fi
for ((i = 0; i < $ndevs; i++)); do
ost_names[$i]=${client_names[$i]}
done
# disable portals debug and get obdecho loaded on all relevant hosts
unique_hosts=(`unique ${host_names[@]}`)
load_obdechos
+
+if [ $case == "disk" ]; then
+ check_record_size || cleanup ${PIPESTATUS[0]}
+fi
+
pidcount=0
for host in ${unique_hosts[@]}; do
host_vmstatf=${vmstatf}_${host}
/* Reset oti otherwise it would confuse ldiskfs. */
memset(oti, 0, sizeof(*oti));
+
+ /* Reuse env context. */
+ lu_context_exit((struct lu_context *)&env->le_ctx);
+ lu_context_enter((struct lu_context *)&env->le_ctx);
}
out:
struct ofd_thread_info *info;
int rc = 0;
+ if (*nr_local > PTLRPC_MAX_BRW_PAGES) {
+ CERROR("%s: bulk has too many pages %d, which exceeds the"
+ "maximum pages per RPC of %d\n",
+ exp->exp_obd->obd_name, *nr_local, PTLRPC_MAX_BRW_PAGES);
+ RETURN(-EPROTO);
+ }
+
rc = lu_env_refill((struct lu_env *)env);
LASSERT(rc == 0);
info = ofd_info_init(env, exp);
obdecho_test() {
local OBD=$1
local node=$2
+ local pages=${3:-64}
local rc=0
local id
do_facet $node "$LCTL attach echo_client ec ec_uuid" || rc=1
echo "New object id is $id"
[ $rc -eq 0 ] && { do_facet $node "$LCTL --device ec getattr $id" ||
rc=4; }
- [ $rc -eq 0 ] && { do_facet $node "$LCTL --device ec test_brw 10 w v 64 $id" ||
- rc=4; }
+ [ $rc -eq 0 ] && { do_facet $node "$LCTL --device ec " \
+ "test_brw 10 w v $pages $id" || rc=4; }
[ $rc -eq 0 ] && { do_facet $node "$LCTL --device ec destroy $id 1" ||
rc=4; }
[ $rc -eq 0 -o $rc -gt 2 ] && { do_facet $node "$LCTL --device ec " \
}
run_test 180b "test obdecho directly on obdfilter"
+test_180c() { # LU-2598
+ [ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.0) ]] &&
+ skip "Need MDS version at least 2.4.0" && return
+
+ local rc=0
+ local rmmod_remote=false
+ local pages=16384 # 64MB bulk I/O RPC size
+ local target
+
+ do_rpc_nodes $(facet_active_host ost1) load_module obdecho/obdecho &&
+ rmmod_remote=true || error "failed to load module obdecho"
+
+ target=$(do_facet ost1 $LCTL dl | awk '/obdfilter/ {print $4}'|head -1)
+ if [[ -n $target ]]; then
+ obdecho_test "$target" ost1 "$pages" ||
+ rc=${PIPESTATUS[0]}
+ else
+ echo "there is no obdfilter target on ost1"
+ rc=2
+ fi
+ $rmmod_remote && do_facet ost1 "rmmod obdecho" || true
+ return $rc
+}
+run_test 180c "test huge bulk I/O size on obdfilter, don't LASSERT"
+
test_181() { # bug 22177
test_mkdir -p $DIR/$tdir || error "creating dir $DIR/$tdir"
# create enough files to index the directory