static bool tried_llz4_load = false;
static bool tried_lgzip_load = false;
+/* rounds buf_start and buf_end to chunk size */
+void chunk_round(__u64 *buf_start, __u64 *buf_end, int chunk_size)
+{
+ __u64 orig_start = *buf_start;
+ __u64 orig_end = *buf_end;
+
+ *buf_start = round_down(*buf_start, chunk_size);
+ *buf_end = round_up(*buf_end, chunk_size);
+
+ if (*buf_start != orig_start || *buf_end != orig_end) {
+ CDEBUG(D_SEC, "ROUNDED: buf_start %llu, buf_end %llu\n",
+ *buf_start, *buf_end);
+ }
+}
+EXPORT_SYMBOL(chunk_round);
+
static inline const char *crypto_name_from_type(enum ll_compr_type type,
unsigned int level)
{
enum dt_bufs_type dbt = DT_BUFS_TYPE_READ;
int maxlnb = *nr_local;
__u64 begin, end;
+ __u64 prev_buf_end = 0;
ENTRY;
LASSERT(env != NULL);
end = 0;
for (*nr_local = 0, i = 0, j = 0; i < niocount; i++) {
+ __u64 buf_start;
+ __u64 buf_end;
+ int buf_len;
+
begin = min_t(__u64, begin, rnb[i].rnb_offset);
end = max_t(__u64, end, rnb[i].rnb_offset + rnb[i].rnb_len);
+ CDEBUG(D_SEC, "begin %llu, end %llu\n", begin, end);
if (OBD_FAIL_CHECK(OBD_FAIL_OST_2BIG_NIOBUF))
rnb[i].rnb_len = 100 * 1024 * 1024;
- rc = dt_bufs_get(env, ofd_object_child(fo), lnb + j,
- rnb[i].rnb_offset, rnb[i].rnb_len, maxlnb,
- dbt);
+ buf_start = rnb[i].rnb_offset;
+ buf_end = rnb[i].rnb_offset + rnb[i].rnb_len;
+
+ CDEBUG(D_SEC, "buf_start %llu, buf_end %llu\n", buf_start,
+ buf_end);
+
+ /* compressd reads must be rounded to cover whole chunks */
+ if (chunk_size) {
+ chunk_round(&buf_start, &buf_end, chunk_size);
+ /* unaligned reads on compressed files are not supported
+ * yet
+ */
+ if (buf_start != rnb[i].rnb_offset ||
+ buf_end != rnb[i].rnb_offset + rnb[i].rnb_len)
+ GOTO(buf_put, rc = -EINVAL);
+
+ /* rounded rnbs can overlap at the chunk level, but it's
+ * important we don't allocate multiple buffers for the
+ * same page, so move the start of this buffer to the
+ * end of the previous one
+ */
+ if (buf_start < prev_buf_end) {
+ buf_start = prev_buf_end;
+ /* two rnbs may be entirely inside the same
+ * chunk, in which case we're already doing IO
+ * for that chunk, so skip it
+ */
+ if (buf_start == buf_end) {
+ prev_buf_end = buf_end;
+ continue;
+ }
+ }
+ prev_buf_end = buf_end;
+ }
+
+ buf_len = buf_end - buf_start;
+
+ rc = dt_bufs_get(env, ofd_object_child(fo), lnb + j, buf_start,
+ buf_len, maxlnb, dbt);
if (unlikely(rc < 0))
GOTO(buf_put, rc);
LASSERT(rc <= PTLRPC_MAX_BRW_PAGES);
io_start = remote_nb[0].rnb_offset;
io_end = remote_nb[niocount - 1].rnb_offset +
remote_nb[niocount - 1].rnb_len;
+ chunk_start = io_start;
+ chunk_end = io_end;
- chunk_start = round_down(io_start, chunk_size);
- chunk_end = round_up(io_end, chunk_size);
+ chunk_round(&chunk_start, &chunk_end, chunk_size);
CDEBUG(D_SEC,
"io_start: %llu io_end: %llu, chunk_start %llu, chunk_end %llu\n",