Whamcloud - gitweb
EX-7601 osc: handle partial chunks in decompress_request
authorPatrick Farrell <pfarrell@whamcloud.com>
Mon, 27 Nov 2023 21:07:49 +0000 (16:07 -0500)
committerAndreas Dilger <adilger@whamcloud.com>
Thu, 30 Nov 2023 17:15:07 +0000 (17:15 +0000)
Now that we have compression for incomplete chunks at the
end of files, decompress_request needs to handle these
chunks.  This patch modifies it to understand compressed
chunks which are less than chunk_size pages.

Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I877550fa0d418def406e0308392a5336ec9f3ab6
Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/53160
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Artem Blagodarenko <ablagodarenko@ddn.com>
lustre/osc/osc_compress.c

index 90d878b..26765be 100644 (file)
@@ -319,6 +319,7 @@ int decompress_request(struct osc_brw_async_args *aa, int page_count)
        enum ll_compr_type type;
        struct cl_page *clpage;
        struct crypto_comp *cc;
+       int next_chunk_min = 0;
        unsigned int src_size;
        unsigned int dst_size;
        int pages_per_chunk;
@@ -326,7 +327,6 @@ int decompress_request(struct osc_brw_async_args *aa, int page_count)
        char *dst = NULL;
        int chunk_bits;
        int chunk_size;
-       int pages_left;
        int count = 0;
        int buf_bits;
        int rc = 0;
@@ -350,6 +350,10 @@ int decompress_request(struct osc_brw_async_args *aa, int page_count)
                GOTO(out, rc);
 
        for (i = 0; i < page_count; i++) {
+               int decompressed_pages;
+               int compressed_pages;
+               int compressed_bytes;
+
                oap = brw_page2oap(pga[i]);
                CDEBUG(D_SEC, "checking page %d, offset %llu\n",
                       i, oap->oap_obj_off);
@@ -360,6 +364,8 @@ int decompress_request(struct osc_brw_async_args *aa, int page_count)
                if (oap->oap_obj_off & (chunk_size - 1))
                        continue;
 
+               LASSERT(ergo(next_chunk_min, i >= next_chunk_min));
+
                if (!is_chunk_start(pga[i]->pg, &llch))
                        continue;
 
@@ -376,7 +382,7 @@ int decompress_request(struct osc_brw_async_args *aa, int page_count)
                                CERROR(
                                       "chunk bits from storage (%d) and layout (%d) disagree\n",
                                       rpc_chunk_bits, chunk_bits);
-                               GOTO(out, rc = -EIO);
+                               GOTO(out, rc = -EUCLEAN);
                        }
 
                        CDEBUG(D_SEC, "chunk_size: %i, pages_per_chunk: %i\n",
@@ -389,13 +395,34 @@ int decompress_request(struct osc_brw_async_args *aa, int page_count)
                        if (src == NULL || dst == NULL)
                                GOTO(out, rc = -ENOMEM);
                }
-               pages_left = min_t(int, pages_per_chunk, page_count - i);
+
+               compressed_bytes = llch->llch_compr_size + sizeof(*llch);
+               compressed_pages = (compressed_bytes >> PAGE_SHIFT) + 1;
+               CDEBUG(D_SEC, "compressed bytes %d compressed pages %d\n",
+                      compressed_bytes, compressed_pages);
+               /* must be enough pages left in the RPC to hold the compressed
+                * data, if not, the data from disk is probably corrupt
+                */
+               if (compressed_pages > page_count - 1) {
+                       CERROR("compressed pages from disk %d don't match pages in rpc %d\n",
+                              compressed_pages, page_count - 1);
+                       GOTO(out, rc = -EUCLEAN);
+               }
 
                CDEBUG(D_SEC, "Merge chunk [%i, %i], src: %px\n", i,
-                      i + pages_left - 1, src);
+                      i + compressed_pages - 1, src);
 
-               merge_chunk(pga, NULL, i, pages_left, src, &src_size);
+               merge_chunk(pga, NULL, i, compressed_pages, src, &src_size);
                LASSERT(src_size <= chunk_size);
+               /* if the bytes in the merged buffer don't match like this, we
+                * probably have an incomplete page, which shouldn't occur in
+                * CSDC currently (but could happen if there's bad data)
+                */
+               if (src_size != compressed_pages * PAGE_SIZE) {
+                       CERROR("buffer size from compressed pages (%u bytes) doesn't match number of compressed pages %d\n",
+                              src_size, compressed_pages);
+                       GOTO(out, rc = -EUCLEAN);
+               }
                dst_size = 2 * chunk_size;
                CDEBUG(D_SEC, "Compressed size %lu, type %i\n",
                       llch->llch_compr_size + sizeof(*llch), type);
@@ -407,11 +434,16 @@ int decompress_request(struct osc_brw_async_args *aa, int page_count)
                if (rc)
                        GOTO(out, rc);
 
-               CDEBUG(D_SEC, "Decompressed size %u\n", dst_size);
-
                LASSERT(dst_size <= chunk_size);
-               unmerge_chunk(pga, NULL, i, pages_left, dst, dst_size);
+               decompressed_pages = ((dst_size - 1) >> PAGE_SHIFT) + 1;
+               CDEBUG(D_SEC, "Decompressed size %u, pages %d\n", dst_size,
+                      decompressed_pages);
+
+               unmerge_chunk(pga, NULL, i, decompressed_pages, dst, dst_size);
 
+               /* start of the next chunk is at least compressed pages away*/
+               next_chunk_min = i + compressed_pages - 1;
+               CDEBUG(D_SEC, "next chunk min %d\n", next_chunk_min);
                count++;
        }
        CDEBUG(D_SEC,