pg = cpga[dst_page];
pg->off = pga[src_from]->off + chunk_offset;
- if ((dst_size - chunk_offset) < PAGE_SIZE)
- pg->count = dst_size - chunk_offset;
- else
+ /* this asserts that every page in a compressed IO starts at
+ * a page aligned offset, which means 'the client only does
+ * compression for aligned IO'
+ */
+ LASSERTF((pg->off & ~PAGE_MASK) == 0,
+ "pg->off %llu\n", pg->off);
+ /* infiniband requires there be no gaps in memory when
+ * mapping an RDMA, so if we're doing compression, we just set
+ * the reported size to PAGE_SIZE. Technically this means we
+ * will copy extra data for the last page, but this isn't a big
+ * deal, and the compression header saves us from reading
+ * garbage.
+ *
+ * The interesting case is compression of the last chunk, which
+ * may be a partial chunk. In this case, we still just send the
+ * whole page, because the actual file size is set on the
+ * server, so any trailing bytes are ignored.
+ *
+ * TODO: After doing compression, we should zero the remaining
+ * part of the incomplete pages. This is a TODO for another
+ * patch.
+ */
+ if (dst) {
pg->count = PAGE_SIZE;
+ pg->pg = mem_to_page(dst + chunk_offset);
+ } else {
+ pg->count = pga[src_page]->count;
+ pg->pg = pga[src_page]->pg;
+ }
/*
* Compressed pages, flags are lost
* Let's choose first page in chunk
* flag to set to all pages
*/
pg->flag = pga[src_from]->flag;
- if (dst)
- pg->pg = mem_to_page(dst + chunk_offset);
- else
- pg->pg = pga[src_page]->pg;
if (fill_bits & CPGA_FILL_ENCRYPTED) {
if (fill_bits & CPGA_FILL_DIRECTIO) {