struct niobuf_local *lnb, int chunk_size)
{
struct ofd_object *fo;
- int i, j, rc, tot_bytes = 0;
enum dt_bufs_type dbt = DT_BUFS_TYPE_READ;
- bool chunk_rounded = false;
+ bool compr_unaligned_read = false;
int maxlnb = *nr_local;
- __u64 begin, end;
__u64 prev_buf_end = 0;
+ int tot_bytes = 0;
+ __u64 begin;
+ __u64 end;
+ int rc;
+ int i;
+ int j;
ENTRY;
LASSERT(env != NULL);
end = 0;
for (*nr_local = 0, i = 0, j = 0; i < niocount; i++) {
- __u64 orig_buf_start;
- __u64 orig_buf_end;
+ __u64 orig_start;
__u64 buf_start;
+ __u64 orig_end;
__u64 buf_end;
int buf_len;
buf_start = rnb[i].rnb_offset;
buf_end = rnb[i].rnb_offset + rnb[i].rnb_len;
- orig_buf_start = buf_start;
- orig_buf_end = buf_end;
+ orig_start = buf_start;
+ orig_end = buf_end;
CDEBUG(D_SEC, "rnb %d buf_start %llu, buf_end %llu\n",
i, buf_start, buf_end);
/* compressed reads must be rounded to cover whole chunks */
if (chunk_size) {
chunk_round(&buf_start, &buf_end, chunk_size);
- /* if we rounded the chunk, then we're going to do
- * decompression and dt_read_prep needs to know this
- */
- if (buf_start != orig_buf_start ||
- buf_end != orig_buf_end)
- chunk_rounded = true;
/* rounded rnbs can overlap at the chunk level, but it's
* important we don't allocate multiple buffers for the
*/
if (buf_start < prev_buf_end) {
CDEBUG(D_SEC,
- "overlaps previous rounded read, start %llu < prev end %llu\n",
- buf_start, prev_buf_end);
+ "overlaps previous rounded IO, start %llu < prev end %llu (end %llu, orig start %llu, orig end %llu\n",
+ buf_start, prev_buf_end, buf_end,
+ orig_start, orig_end);
buf_start = prev_buf_end;
/* two rnbs may be entirely inside the same
* chunk, in which case we're already doing IO
*/
if (buf_start == buf_end) {
CDEBUG(D_SEC,
- "read inside previous rounded read, skipping\n");
+ "IO inside previous compression chunk, skipping\n");
prev_buf_end = buf_end;
continue;
}
}
prev_buf_end = buf_end;
+
+ /* if we rounded the chunk, then we're going to do
+ * decompression and dt_read_prep needs to know this
+ */
+ if (buf_start != orig_start ||
+ buf_end != orig_end)
+ compr_unaligned_read = true;
}
buf_len = buf_end - buf_start;
LASSERT(*nr_local > 0 && *nr_local <= PTLRPC_MAX_BRW_PAGES);
rc = dt_read_prep(env, ofd_object_child(fo), lnb, *nr_local,
- chunk_rounded);
+ compr_unaligned_read);
if (unlikely(rc))
GOTO(buf_put, rc);
ofd_read_unlock(env, fo);
&& (eof_rnb == INT_MAX)) {
chunk_round(&buf_start, &buf_end, chunk_size);
+ /* rounded rnbs can overlap at the chunk level, but it's
+ * important we don't allocate multiple buffers for the
+ * same page, so move the start of this buffer to the
+ * end of the previous one
+ */
if (buf_start < prev_buf_end) {
CDEBUG(D_SEC,
- "buf_start %llu orig_start %llu buf_end %llu orig_end %llu\n",
- buf_start, orig_start, buf_end,
- orig_end);
+ "overlaps previous rounded IO, start %llu < prev end %llu (end %llu, orig start %llu, orig end %llu\n",
+ buf_start, prev_buf_end, buf_end,
+ orig_start, orig_end);
start_rounded_up = true;
buf_start = prev_buf_end;
/* two rnbs may be entirely inside the same
* for that chunk, so skip it
*/
prev_buf_end = buf_end;
- if (buf_start == buf_end)
+ if (buf_start == buf_end) {
+ CDEBUG(D_SEC,
+ "IO inside previous compression chunk, skipping\n");
continue;
+ }
}
CDEBUG(D_SEC,
"buf_start %llu orig_start %llu buf_end %llu orig_end %llu\n",