Whamcloud - gitweb
LU-13134 clio: cl_page index compaction 17/37417/5
authorPatrick Farrell <farr0186@gmail.com>
Tue, 4 Feb 2020 01:05:27 +0000 (20:05 -0500)
committerOleg Drokin <green@whamcloud.com>
Tue, 7 Apr 2020 17:19:03 +0000 (17:19 +0000)
The cpl_index field is not necessary for the vvp layer,
since that's the same as the vm page index, and it has
already been duplicated for the lov layer.

So, moving the osc usage of this to the top, we can save
24 bytes per page (in the slices) by removing this from
the cl_page_slice, at the cost of 8 bytes in the cl_page.
So a net savings of 16 bytes.

After this patch, cl_page could be reduced from 408 to
392 bytes.

Signed-off-by: Patrick Farrell <farr0186@gmail.com>
Change-Id: Ib4fd75f112afb816443799314cd2ef75ff4f4c84
Reviewed-on: https://review.whamcloud.com/37417
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Wang Shilong <wshilong@ddn.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/cl_object.h
lustre/include/lustre_osc.h
lustre/llite/vvp_internal.h
lustre/llite/vvp_page.c
lustre/lov/lov_page.c
lustre/obdclass/cl_page.c
lustre/obdecho/echo_client.c
lustre/osc/osc_page.c

index a9d8a67..8795c82 100644 (file)
@@ -765,6 +765,7 @@ struct cl_page {
        struct cl_sync_io       *cp_sync_io;
        /** layout_entry + stripe index, composed using lov_comp_index() */
        unsigned int            cp_lov_index;
+       pgoff_t                 cp_osc_index;
 };
 
 /**
@@ -774,7 +775,6 @@ struct cl_page {
  */
 struct cl_page_slice {
         struct cl_page                  *cpl_page;
-       pgoff_t                          cpl_index;
         /**
          * Object slice corresponding to this page slice. Immutable after
          * creation.
@@ -2071,7 +2071,7 @@ static inline void cl_device_fini(struct cl_device *d)
 }
 
 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
-                      struct cl_object *obj, pgoff_t index,
+                      struct cl_object *obj,
                       const struct cl_page_operations *ops);
 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
                        struct cl_object *obj,
index 6601a19..6995aec 100644 (file)
@@ -822,7 +822,7 @@ static inline struct osc_page *oap2osc(struct osc_async_page *oap)
 
 static inline pgoff_t osc_index(struct osc_page *opg)
 {
-       return opg->ops_cl.cpl_index;
+       return opg->ops_cl.cpl_page->cp_osc_index;
 }
 
 static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
index 3062208..ce123d3 100644 (file)
@@ -245,7 +245,7 @@ static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
 
 static inline pgoff_t vvp_index(struct vvp_page *vpg)
 {
-       return vpg->vpg_cl.cpl_index;
+       return vpg->vpg_page->index;
 }
 
 struct vvp_device {
index d7092e4..884db91 100644 (file)
@@ -475,12 +475,12 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
                atomic_inc(&page->cp_ref);
                SetPagePrivate(vmpage);
                vmpage->private = (unsigned long)page;
-               cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
+               cl_page_slice_add(page, &vpg->vpg_cl, obj,
                                &vvp_page_ops);
        } else {
                struct vvp_object *clobj = cl2vvp(obj);
 
-               cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
+               cl_page_slice_add(page, &vpg->vpg_cl, obj,
                                &vvp_transient_page_ops);
                atomic_inc(&clobj->vob_transient_pages);
        }
index 2b9d54a..48ef262 100644 (file)
@@ -98,7 +98,7 @@ int lov_page_init_composite(const struct lu_env *env, struct cl_object *obj,
 
        page->cp_lov_index = lov_comp_index(entry, stripe);
        lpg->lps_layout_gen = loo->lo_lsm->lsm_layout_gen;
-       cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_comp_page_ops);
+       cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_comp_page_ops);
 
        sub = lov_sub_get(env, lio, page->cp_lov_index);
        if (IS_ERR(sub))
@@ -140,7 +140,7 @@ int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
        ENTRY;
 
        page->cp_lov_index = ~0;
-       cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops);
+       cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
        addr = kmap(page->cp_vmpage);
        memset(addr, 0, cl_page_size(obj));
        kunmap(page->cp_vmpage);
index e9d2b84..61c63a4 100644 (file)
@@ -1171,13 +1171,12 @@ EXPORT_SYMBOL(cl_page_size);
  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
  */
 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
-                      struct cl_object *obj, pgoff_t index,
+                      struct cl_object *obj,
                       const struct cl_page_operations *ops)
 {
        ENTRY;
        list_add_tail(&slice->cpl_linkage, &page->cp_layers);
        slice->cpl_obj  = obj;
-       slice->cpl_index = index;
        slice->cpl_ops  = ops;
        slice->cpl_page = page;
        EXIT;
index 9433ec9..4d82652 100644 (file)
@@ -427,7 +427,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
         * wait_on_bit() interface to wait for the bit to be clear.
         */
        ep->ep_lock = 0;
-       cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
+       cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
        atomic_inc(&eco->eo_npages);
        RETURN(0);
 }
index dacd953..d981845 100644 (file)
@@ -267,8 +267,8 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
                return result;
 
        opg->ops_srvlock = osc_io_srvlock(oio);
-       cl_page_slice_add(page, &opg->ops_cl, obj, index,
-                         &osc_page_ops);
+       cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops);
+       page->cp_osc_index = index;
 
 
        /* reserve an LRU space for this page */