Whamcloud - gitweb
EX-7601 llite: Compute compression chunk ranges
authorPatrick Farrell <pfarrell@whamcloud.com>
Wed, 5 Jul 2023 19:12:07 +0000 (15:12 -0400)
committerAndreas Dilger <adilger@whamcloud.com>
Thu, 7 Sep 2023 00:39:20 +0000 (00:39 +0000)
Determine the edges of any leading and trailing compression
chunks touched by this IO and store them in the cl_io
struct.

The functionality in this patch also allows us to adjust
the lock and read rounding to do them more intelligently,
this will be done in a future patch.

Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I526563ea347fb0246f97f3532b823c4345c3fa27
Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/51324
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Sebastien Buisson <sbuisson@ddn.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
lustre/include/cl_object.h
lustre/llite/file.c
lustre/lov/lov_io.c
lustre/obdclass/cl_io.c

index 2b32eb6..965160c 100644 (file)
@@ -1939,6 +1939,12 @@ struct cl_io {
                        time64_t                 lm_next_rpc_time;
                } ci_misc;
         } u;
+       /* start of compression chunk touched by first byte of this IO, used
+        * to address the full chunk where required
+        */
+       struct lu_extent     ci_cmpc_leading;
+       /* end of compression chunk touched by last byte of this IO */
+       struct lu_extent     ci_cmpc_trailing;
         struct cl_2queue     ci_queue;
         size_t               ci_nob;
         int                  ci_result;
index 3ba170d..f276f1b 100644 (file)
@@ -1792,8 +1792,10 @@ restart:
        else
                per_bytes = min(max_io_pages << PAGE_SHIFT, count);
        partial_io = per_bytes < count;
+       /* this clears the cl_io entirely (ie, memset to 0) */
        io = vvp_env_thread_io(env);
        ll_io_init(io, file, iot, args);
+       /* set cl_io members potentially preserved from previous IO */
        io->ci_dio_aio = ci_dio_aio;
        io->ci_dio_lock = dio_lock;
        io->ci_ndelay_tried = retried;
index 63cc60c..905e60f 100644 (file)
@@ -499,12 +499,14 @@ static int lov_io_mirror_init(struct lov_io *lio, struct lov_object *obj,
        RETURN(0);
 }
 
+static struct lov_stripe_md_entry *lov_io_lsme_at(const struct lov_io *lio,
+                                                 const pgoff_t page_index);
 static int lov_io_slice_init(struct lov_io *lio,
                             struct lov_object *obj, struct cl_io *io)
 {
-       int index;
        int result = 0;
        bool rdonly;
+       int index;
        ENTRY;
 
        io->ci_result = 0;
@@ -679,6 +681,81 @@ static int lov_io_slice_init(struct lov_io *lio,
        EXIT;
 
 out:
+       /* we only do this when there's no need to instantiate the layout, ie,
+        * when result == 0
+        */
+       if (result == 0 &&
+           (io->ci_type == CIT_WRITE || io->ci_type == CIT_READ ||
+           io->ci_type == CIT_FAULT)) {
+               struct lov_stripe_md_entry *lsme;
+               loff_t io_end = lio->lis_endpos - 1;
+               loff_t io_start = lio->lis_pos;
+               loff_t first_chunk_start_bytes = io_start;
+               loff_t last_chunk_end_bytes = io_end;
+               pgoff_t first_chunk_start_idx = 0;
+               pgoff_t last_chunk_end_idx = 0;
+               int chunk_size;
+
+               /* first we check to see if the start of the IO lands in a
+                * compressed extent
+                */
+               lsme = lov_io_lsme_at(lio, io_start >> PAGE_SHIFT);
+
+               /* if so, determine the boundary of the leading chunk
+                *
+                * NB: If an IO is past the end of the layout, then no lsme.
+                */
+               if (lsme && lsme->lsme_pattern & LOV_PATTERN_COMPRESS) {
+                       int chunk_pages;
+
+                       io->ci_compressed_file = true;
+                       chunk_size =
+                               (1 << (lsme->lsme_compr_chunk_log_bits + 16));
+                       chunk_pages = chunk_size >> PAGE_SHIFT;
+
+                       first_chunk_start_bytes = io_start & ~(chunk_size - 1);
+                       first_chunk_start_idx =
+                               first_chunk_start_bytes >> PAGE_SHIFT;
+
+                       if (first_chunk_start_bytes != io_start) {
+                               io->ci_cmpc_leading.e_start =
+                                       first_chunk_start_idx;
+                               io->ci_cmpc_leading.e_end =
+                                       first_chunk_start_idx + chunk_pages - 1;
+                       }
+               }
+
+               /* now we repeat this for the end of the IO */
+               lsme = lov_io_lsme_at(lio, io_end >> PAGE_SHIFT);
+
+               if (lsme && lsme->lsme_pattern & LOV_PATTERN_COMPRESS) {
+                       int chunk_pages;
+
+                       io->ci_compressed_file = true;
+                       chunk_size =
+                               (1 << (lsme->lsme_compr_chunk_log_bits + 16));
+                       chunk_pages = chunk_size >> PAGE_SHIFT;
+
+                       last_chunk_end_bytes = io_end | (chunk_size - 1);
+                       last_chunk_end_idx = last_chunk_end_bytes >> PAGE_SHIFT;
+
+                       if (last_chunk_end_bytes != io_end) {
+                               io->ci_cmpc_trailing.e_start =
+                                       last_chunk_end_idx - chunk_pages + 1;
+                               io->ci_cmpc_trailing.e_end = last_chunk_end_idx;
+                       }
+               }
+
+               if (io->ci_compressed_file) {
+                       CDEBUG(D_VFSTRACE,
+                              "Compressed file, leading chunk and trailing chunk (-1 means aligned): [%lld, %lld] and [%lld, %lld]\n",
+                              io->ci_cmpc_leading.e_start,
+                              io->ci_cmpc_leading.e_end,
+                              io->ci_cmpc_trailing.e_start,
+                              io->ci_cmpc_trailing.e_end);
+               }
+       }
+
        return result;
 }
 
index aae858e..3ac7e7d 100644 (file)
@@ -151,6 +151,11 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
        INIT_LIST_HEAD(&io->ci_lockset.cls_done);
        INIT_LIST_HEAD(&io->ci_layers);
 
+       io->ci_cmpc_leading.e_start = -1;
+       io->ci_cmpc_leading.e_end = -1;
+       io->ci_cmpc_trailing.e_start = -1;
+       io->ci_cmpc_trailing.e_end = -1;
+
         result = 0;
         cl_object_for_each(scan, obj) {
                 if (scan->co_ops->coo_io_init != NULL) {
@@ -210,8 +215,8 @@ int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
 {
        LASSERT(iot == CIT_READ || iot == CIT_WRITE);
        LASSERT(io->ci_obj != NULL);
-       ENTRY;
 
+       ENTRY;
        LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
                         "io range: %u [%llu, %llu) %u %u\n",
                         iot, (__u64)pos, (__u64)pos + count,