struct niobuf_remote *rnb,
struct niobuf_local *lnb,
struct obd_ioobj *obj, int npages,
- enum ll_compr_type type, int lvl,
+ int eof_rnb, enum ll_compr_type type, int lvl,
int chunk_bits, bool write);
int (*o_preprw)(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa, int objcount,
int ofd_decompress_read(const struct lu_env *env, struct obd_export *exp,
struct obdo *oa, struct niobuf_remote *rnb,
struct niobuf_local *lnb, struct obd_ioobj *obj,
- int npages, enum ll_compr_type type, int lvl,
- int chunk_bits, bool write);
+ int npages, int eof_rnb, enum ll_compr_type type,
+ int lvl, int chunk_bits, bool write);
int ofd_preprw(const struct lu_env *env, int cmd, struct obd_export *exp,
struct obdo *oa, int objcount, struct obd_ioobj *obj,
struct niobuf_remote *rnb, int *nr_local,
bool compr_unaligned_write = false;
__u64 prev_buf_end = 0;
int maxlnb = *nr_write;
+ int eof_rnb = INT_MAX;
int tot_bytes = 0;
int nr_read = 0;
__u64 begin;
* that data. This shouldn't be too bad, since read beyond EOF
* is basically free.
*/
- if (chunk_size && !(rnb[i].rnb_flags & OBD_BRW_COMPRESSED)) {
+ if (chunk_size && !(rnb[i].rnb_flags & OBD_BRW_COMPRESSED)
+ /* if a previous rnb was past eof, there's no need to keep
+ * checking
+ */
+ && (eof_rnb == INT_MAX)) {
chunk_round(&buf_start, &buf_end, chunk_size);
if (buf_start < prev_buf_end) {
* read-modify-write, so no rounding required
*/
if (buf_start >= la->la_size) {
+ CDEBUG(D_SEC,
+ "rnb %d from %llu to %llu (chunk rounded: %llu to %llu) is past eof\n",
+ i, orig_start, orig_end, buf_start, buf_end);
buf_start = orig_start;
buf_end = orig_end;
+ eof_rnb = i;
} else {
compr_unaligned_write = true;
}
* overlap, then we ignore the first chunk - it's being handled
* as part of the previous rnb
*/
- if (buf_start != orig_start && !start_rounded_up) {
+ CDEBUG(D_SEC, "i: %d, eof_rnb %d, test %d\n", i, eof_rnb, i < eof_rnb);
+ if (i < eof_rnb && buf_start != orig_start && !start_rounded_up) {
first_chunk_start_idx = j;
CDEBUG(D_SEC,
"buf count %d buf_start %llu orig_start %llu, first_chunk_start_idx %d\n",
* the end of this rnb is unaligned, so we need to read the
* chunk there. map it to the read lnb
*/
- if (buf_end != orig_end) {
+ if (i < eof_rnb && buf_end != orig_end) {
/* calculate the start index of the last chunk */
int chunk_start_idx = j + rc - pages_per_chunk;
if (unlikely(rc != 0))
GOTO(err, rc);
rc = ofd_decompress_read(env, exp, oa, rnb, read_lnb, obj,
- nr_read, type, lvl, chunk_bits,
- true);
+ nr_read, eof_rnb, type, lvl,
+ chunk_bits, true);
if (unlikely(rc != 0))
GOTO(err, rc);
/* read_prep sets lnb_rc if it read data, or on error, but the
int ofd_decompress_read(const struct lu_env *env, struct obd_export *exp,
struct obdo *oa, struct niobuf_remote *rnb,
struct niobuf_local *lnb, struct obd_ioobj *obj,
- int npages, enum ll_compr_type type, int lvl,
- int chunk_bits, bool write)
+ int npages, int eof_rnb, enum ll_compr_type type,
+ int lvl, int chunk_bits, bool write)
{
struct ofd_device *ofd = ofd_exp(exp);
struct lu_fid *fid = &oa->o_oi.oi_fid;
}
rnb_start = rnb[i].rnb_offset;
rnb_end = rnb[i].rnb_offset + rnb[i].rnb_len;
+ if (i == eof_rnb) {
+ CDEBUG(D_SEC,
+ "rnb %d at %llu to %llu is past EOF, so no need to decompress\n",
+ i, rnb_start, rnb_end);
+ break;
+ }
chunk_start = rnb_start;
chunk_end = rnb_end;