int merge_chunk(struct brw_page **pga, struct niobuf_local *lnb,
int first, int count, char *merged, unsigned int *size);
void unmerge_chunk(struct brw_page **pga, struct niobuf_local *lnb, int first,
- int count, char *merged, unsigned int size);
+ int count, char *merged, unsigned int size,
+ int pages_per_chunk);
struct tgt_thread_big_cache {
/* these work out to MAX_BRW_SIZE / PAGE_SIZE * PTR_SIZE, so for
EXPORT_SYMBOL(merge_chunk);
void unmerge_chunk(struct brw_page **pga, struct niobuf_local *lnb, int first,
- int count, char *merged, unsigned int size)
+ int count, char *merged, unsigned int size,
+ int pages_per_chunk)
{
struct brw_page *brwpg;
struct page *vmpage;
*pg_len = PAGE_SIZE;
left -= PAGE_SIZE;
}
+ /* we just put data in this page, so set the rc */
+ if (lnb)
+ lnb[first + i].lnb_rc = *pg_len;
CDEBUG(D_SEC, "pg_len: %u, left %u\n",
*pg_len, left);
}
LASSERT(left == 0);
+ if (lnb) {
+ for (; first + i < pages_per_chunk; i++) {
+ CDEBUG(D_SEC, "no data in page %d at %llu, lnb_rc %d - clear page\n",
+ i, lnb[first + i].lnb_file_offset, lnb[first + i].lnb_rc);
+ memset(kmap(lnb[i].lnb_page), 0, PAGE_SIZE);
+ kunmap(lnb[i].lnb_page);
+ }
+ }
}
EXPORT_SYMBOL(unmerge_chunk);
unsigned int src_size;
int hdr_size;
int rc = 0;
+ int i;
ENTRY;
/* if this chunk isn't compressed, don't uncompress it */
if (!is_chunk_start(lnbs[lnb_start].lnb_page, &llch))
- RETURN(0);
+ GOTO(out, rc = 0);
/* compression type and level in the compressed data can
* be different from those set in the layout, because the client
*/
unmerge_chunk(NULL, lnbs, lnb_start,
((dst_size - 1) >> PAGE_SHIFT) + 1,
- (char *) bounce_dst, dst_size);
+ (char *) bounce_dst, dst_size, pages_in_chunk);
out:
+ /* even if we don't successfully uncompress, we may have read pages
+ * which were beyond EOF, so we need to clear them in case we're going
+ * to write them out
+ */
+ for(i = lnb_start; i < lnb_start + pages_in_chunk; i++) {
+ /* if there's no data in this page, we must clear it */
+ if (lnbs[i].lnb_rc == 0) {
+ CDEBUG(D_SEC, "no data in page %d at %llu, clearing\n",
+ i, lnbs[i].lnb_file_offset);
+ memset(kmap(lnbs[i].lnb_page), 0, PAGE_SIZE);
+ kunmap(lnbs[i].lnb_page);
+ }
+ }
if (cc)
crypto_free_comp(cc);
RETURN(rc);
CDEBUG(D_SEC, "Decompressed size %u, pages %d\n", dst_size,
decompressed_pages);
- unmerge_chunk(pga, NULL, i, decompressed_pages, dst, dst_size);
+ unmerge_chunk(pga, NULL, i, decompressed_pages, dst, dst_size,
+ 0);
/* start of the next chunk is at least compressed pages away*/
next_chunk_min = i + compressed_pages - 1;