* part of the incomplete pages. This is a TODO for another
* patch.
*/
- if (dst) /* compressed page */ {
- pg->count = PAGE_SIZE;
- pg->pg = mem_to_page(dst + chunk_offset);
- /* we get flags from the first page in the chunk and
- * add COMPRESSED
- */
- pg->flag = pga[src_from]->flag | OBD_BRW_COMPRESSED;
- } else {
- pg->count = pga[src_page]->count;
- pg->pg = pga[src_page]->pg;
- pg->flag = pga[src_page]->flag;
- }
+ pg->count = PAGE_SIZE;
+ pg->pg = mem_to_page(dst + chunk_offset);
+ /* we get flags from the first page in the chunk and
+ * add COMPRESSED
+ */
+ pg->flag = pga[src_from]->flag | OBD_BRW_COMPRESSED;
CDEBUG(D_SEC, "off 0x%llx, flag %x, pg %p, count %u\n",
pg->off, pg->flag, pg->pg, pg->count);
struct cl_page *clpage;
struct crypto_comp *cc;
enum ll_compr_type type;
- bool compressed = false;
unsigned int src_size;
unsigned int dst_size;
- int chunk_start = 0;
int chunk_count = 0;
int pages_per_chunk;
int dest_buf_bits;
int chunk_size;
int chunk_bits;
int cpga_i = 0;
+ int pga_i = 0;
int rc = 0;
- int pga_i;
int lvl;
ENTRY;
chunk_size = (1 << chunk_bits);
pages_per_chunk = chunk_size / PAGE_SIZE;
- src_buf_bits = chunk_bits;
+ src_buf_bits = chunk_bits + 1;
dest_buf_bits = chunk_bits + 1;
rc = alloc_compr(obd_name, &type, lvl, &cc, false);
CDEBUG(D_SEC, "Compression type %i, level %i\n", type, lvl);
- for (pga_i = 0; pga_i < *page_count; pga_i++) {
- if ((pga_i + 1 - chunk_start == pages_per_chunk) ||
- (pga_i == *page_count - 1) ||
- !can_merge_pages(pga[pga_i], pga[pga_i + 1])) {
- CDEBUG(D_SEC, "Chunk [%i,%i]\n", chunk_start, pga_i);
+ while (pga_i < *page_count) {
+ struct brw_page *pg = pga[pga_i];
+ struct brw_page *pg_prev = NULL;
+ struct brw_page *pg_last;
+ bool chunk_unmergeable = false;
+ bool compress_this = false;
+ bool compressed = false;
+ int chunk_start = -1;
+ int chunk_len = 1;
+ int curr;
+
+ /* if this page isn't aligned to chunk start, skip it */
+ if ((pg->off & (chunk_size - 1)))
+ goto skip;
+ /* this is a potential chunk start! */
+ CDEBUG(D_SEC, "chunk aligned page %d at %llu\n",
+ pga_i, pg->off);
+ chunk_start = pga_i;
+ for (curr = chunk_start; curr < *page_count; curr++) {
+ struct brw_page *pg_curr;
+
+ pg_curr = pga[curr];
+ chunk_len = curr - chunk_start + 1;
+
+ /* these pages can't be merged, so can't be in the same
+ * chunk
+ */
+ if (pg_prev && !can_merge_pages(pg_prev, pg_curr)) {
+ CDEBUG(D_SEC,
+ "can't merge page %d with prev\n",
+ curr);
+ if (pg_prev->off + pg_prev->count !=
+ pg_curr->off) {
+ CDEBUG(D_SEC,
+ "gap between page %d at %llu and %d at %llu\n",
+ curr - 1, pg_prev->off, curr, pg_curr->off);
+ }
+ chunk_unmergeable = true;
+ break;
+ }
+
+ /* chunk is full, stop here */
+ if (chunk_len == pages_per_chunk) {
+ CDEBUG(D_SEC, "chunk full, [%d, %d]\n",
+ pga_i, curr);
+ break;
+ }
+ pg_prev = pg_curr;
+ }
+ /* last page in this chunk (could be == first, that's OK) */
+ pg_last = pga[chunk_start + chunk_len - 1];
+ /* if the write end is equal to KMS, this write - which we
+ * already verified starts at a chunk boundary - is the
+ * furthest write to this file and can be compressed
+ * (this write has been incorporated in to KMS already, hence
+ * we check the end for equality. We could do >=, but == should
+ * always work.)
+ */
+ if (chunk_len == pages_per_chunk)
+ compress_this = true;
+ else if (!chunk_unmergeable && pg_last->off + pg_last->count == kms) {
+ compress_this = true;
+ CDEBUG(D_SEC,
+ "Chunk starting at %llu (pages [%d, %d]) matches kms %llu, compressing.\n",
+ pga[chunk_start]->off, pga_i, pga_i + chunk_len, kms);
+ }
+ CDEBUG(D_SEC,
+ "chunk start at %llu, chunk end at %llu, kms %llu\n",
+ pga[chunk_start]->off, pg_last->off + pg_last->count, kms);
+
+ if (compress_this) {
+
+ CDEBUG(D_SEC,
+ "compressing chunk from page [%d, %d], off [%llu, %llu]\n",
+ chunk_start, chunk_start + chunk_len - 1,
+ pga[chunk_start]->off, pg_last->off + pg_last->count);
dst_size = 2 * chunk_size;
sptlrpc_pool_get_pages(&dst, dest_buf_bits);
if (dst == NULL)
GOTO(out, rc = -ENOMEM);
- merge_chunk(pga, NULL, chunk_start,
- pga_i + 1 - chunk_start, src, &src_size);
+ merge_chunk(pga, NULL, chunk_start, chunk_len, src,
+ &src_size);
compressed = compress_chunk(obd_name, cc, src,
src_size, dst,
&dst_size, type,
lvl, chunk_bits);
- if (!compressed)
- dst_size = src_size;
-
- CDEBUG(D_SEC,
- "%s: rc %d: inode "DFID"\n",
- obd_name, rc, oa->o_parent_seq,
- oa->o_parent_oid, oa->o_parent_ver);
-
- CDEBUG(D_SEC,
- "Compressed %u, plain %u\n",
- dst_size, src_size);
-
- rc = fill_cpga(*cpga, pga, compressed ? dst : NULL,
- chunk_start, cpga_i, dst_size);
-
- if (compressed) {
- CDEBUG(D_SEC,
- "Compressed %u, plain %u\n",
- dst_size, src_size);
-
- (*cpga)[cpga_i]->bp_cmp_chunk = dst;
- (*cpga)[cpga_i]->bp_cmp_chunk_size =
- dest_buf_bits;
- } else {
+ CDEBUG(D_SEC, "%s chunk [%d, %d]\n",
+ compressed ? "compressed" : "couldn't compress",
+ chunk_start, chunk_start + chunk_len - 1);
+ /* if we failed to compress, free memory and handle
+ * this page as normal
+ */
+ if (!compressed) {
sptlrpc_pool_put_pages(&dst, dest_buf_bits);
+
+ GOTO(skip, compressed);
}
+ rc = fill_cpga(*cpga, pga, dst, chunk_start, cpga_i,
+ dst_size);
if (rc)
GOTO(out, rc);
+ CDEBUG(D_SEC, "Compressed %u, raw %u\n",
+ dst_size, src_size);
+ /* store a pointer to the memory for this chunk so it
+ * can be freed later
+ */
+ (*cpga)[cpga_i]->bp_cmp_chunk = dst;
+ (*cpga)[cpga_i]->bp_cmp_chunk_size =
+ dest_buf_bits;
+ /* move cpga_i along by the number of pages in the
+ * compressed size
+ */
cpga_i += ((dst_size - 1) >> PAGE_SHIFT) + 1;
- chunk_count++;
- chunk_start = pga_i + 1;
+ /* and move pga_i along past the end of this chunk */
+ pga_i += chunk_len;
+ }
+skip:
+ /* if we didn't do compression here, so point this page in the
+ * new (compressed) pga at the data from the original pga
+ */
+ if (!compressed) {
+ struct brw_page *cpg;
+
+ CDEBUG(D_SEC, "did not compress page %d\n", pga_i);
+ OBD_ALLOC_PTR((*cpga)[cpga_i]);
+ if ((*cpga)[cpga_i] == NULL)
+ GOTO(out, rc = -ENOMEM);
+ cpg = (*cpga)[cpga_i];
+ *cpg = *pg;
+ pga_i++;
+ cpga_i++;
}
}