Whamcloud - gitweb
EX-7601 llite: round LDLM lock requests to chunk
authorPatrick Farrell <pfarrell@whamcloud.com>
Wed, 26 Jul 2023 16:34:49 +0000 (12:34 -0400)
committerAndreas Dilger <adilger@whamcloud.com>
Thu, 7 Sep 2023 00:40:35 +0000 (00:40 +0000)
When we do IO with compression, we may need to 'fill' the
compression chunk, reading up pages which have already been
written to storage, so we can compress the whole chunk.

Doing this safely requires that any dlmlock we're using
always covers the full chunk.

The easiest way to do this is to round the entire locking
process to include leading or trailing compression chunks.

Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I3c365844561d0da909e6290f4b58ef2211c2d255
Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/51266
Tested-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Sebastien Buisson <sbuisson@ddn.com>
lustre/llite/vvp_io.c

index 6cdfdaa..0f88a40 100644 (file)
@@ -583,6 +583,45 @@ static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
                        ast_flags |= CEF_NEVER;
        }
 
+       /* for compressed files, we must round the locking to cover whole
+        * compression chunks if the IO is not chunk aligned
+        */
+       if (io->ci_compressed_file &&
+           (io->ci_cmpc_leading.e_start != -1 ||
+            io->ci_cmpc_trailing.e_end != -1)) {
+               loff_t leading_chunk_start;
+               loff_t trailing_chunk_end;
+               loff_t orig_start = start;
+               loff_t orig_end = end;
+
+               /* if the IO is not already chunk aligned, the leading or
+                * trailing values will be set, use them accordingly
+                */
+               if (io->ci_cmpc_leading.e_start != -1) {
+                       leading_chunk_start =
+                               io->ci_cmpc_leading.e_start << PAGE_SHIFT;
+                       start = leading_chunk_start;
+               }
+
+               if (io->ci_cmpc_trailing.e_end != -1) {
+                       /* 'end' is the index of the last page of the IO, so
+                        * to lock correctly, we must round it to the last byte
+                        * of that page - this is done by adding 1 to the page
+                        * index before converting it and then subtracting 1
+                        * from the bytes
+                        */
+                       trailing_chunk_end =
+                               (io->ci_cmpc_trailing.e_end + 1) << PAGE_SHIFT;
+                       trailing_chunk_end = trailing_chunk_end - 1;
+                       end = trailing_chunk_end;
+               }
+
+               CDEBUG(D_VFSTRACE,
+                      "Compressed file - rounding lock from [%llu, %llu) to [%llu, %llu)\n",
+                      orig_start, orig_end, start, end);
+
+       }
+
        result = vvp_mmap_locks(env, vio, io);
        if (result == 0)
                result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);