Whamcloud - gitweb
LU-10994 clio: remove cpo_assume, cpo_unassume, cpo_fini 73/47373/4
authorJohn L. Hammond <jhammond@whamcloud.com>
Mon, 16 May 2022 21:42:44 +0000 (16:42 -0500)
committerOleg Drokin <green@whamcloud.com>
Mon, 27 Jun 2022 04:49:51 +0000 (04:49 +0000)
Remove the cl_page methods cpo_assume, cpo_unassume, and
cpo_fini. These methods were only implemented by the vvp layer and so
they can be easily inlined into cl_page_assume() and
cl_page_unassume(). Remove vvp_page_delete() by inlining its contents
to cl_page_delete0().

Change-Id: I260c5593983bac6742cf7577c26a4903e95ceb7c
Signed-off-by: John L. Hammond <jhammond@whamcloud.com>
Reviewed-on: https://review.whamcloud.com/47373
Reviewed-by: Patrick Farrell <pfarrell@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/cl_object.h
lustre/llite/vvp_page.c
lustre/obdclass/cl_page.c

index a5422af..5f2c355 100644 (file)
@@ -836,25 +836,6 @@ struct cl_page_operations {
          * provided by the topmost layer, see cl_page_disown0() as an example.
          */
 
-        /**
-         * Called for a page that is already "owned" by \a io from VM point of
-         * view. Optional.
-         *
-         * \see cl_page_assume()
-         * \see vvp_page_assume(), lov_page_assume()
-         */
-        void (*cpo_assume)(const struct lu_env *env,
-                           const struct cl_page_slice *slice, struct cl_io *io);
-        /** Dual to cl_page_operations::cpo_assume(). Optional. Called
-         * bottom-to-top when IO releases a page without actually unlocking
-         * it.
-         *
-         * \see cl_page_unassume()
-         * \see vvp_page_unassume()
-         */
-        void (*cpo_unassume)(const struct lu_env *env,
-                             const struct cl_page_slice *slice,
-                             struct cl_io *io);
        /**
         * Update file attributes when all we have is this page.  Used for tiny
         * writes to update attributes when we don't have a full cl_io.
@@ -883,10 +864,6 @@ struct cl_page_operations {
          */
         void (*cpo_delete)(const struct lu_env *env,
                            const struct cl_page_slice *slice);
-        /** Destructor. Frees resources and slice itself. */
-        void (*cpo_fini)(const struct lu_env *env,
-                        struct cl_page_slice *slice,
-                        struct pagevec *pvec);
         /**
          * Optional debugging helper. Prints given page slice.
          *
index 1e6238c..be5caa3 100644 (file)
  * Page operations.
  *
  */
-static void vvp_page_fini(const struct lu_env *env,
-                         struct cl_page_slice *slice,
-                         struct pagevec *pvec)
-{
-       struct vvp_page *vpg     = cl2vvp_page(slice);
-       struct page     *vmpage  = vpg->vpg_page;
-
-       /*
-        * vmpage->private was already cleared when page was moved into
-        * VPG_FREEING state.
-        */
-       LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
-       LASSERT(vmpage != NULL);
-       if (pvec) {
-               if (!pagevec_add(pvec, vmpage))
-                       pagevec_release(pvec);
-       } else {
-               put_page(vmpage);
-       }
-}
-
-static void vvp_page_assume(const struct lu_env *env,
-                           const struct cl_page_slice *slice,
-                           struct cl_io *unused)
-{
-       struct page *vmpage = cl2vm_page(slice);
-
-       LASSERT(vmpage != NULL);
-       LASSERT(PageLocked(vmpage));
-       wait_on_page_writeback(vmpage);
-}
-
-static void vvp_page_unassume(const struct lu_env *env,
-                             const struct cl_page_slice *slice,
-                             struct cl_io *unused)
-{
-       struct page *vmpage = cl2vm_page(slice);
-
-       LASSERT(vmpage != NULL);
-       LASSERT(PageLocked(vmpage));
-}
-
 static void vvp_page_discard(const struct lu_env *env,
                             const struct cl_page_slice *slice,
                             struct cl_io *unused)
@@ -105,29 +63,6 @@ static void vvp_page_discard(const struct lu_env *env,
                ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
 }
 
-static void vvp_page_delete(const struct lu_env *env,
-                           const struct cl_page_slice *slice)
-{
-       struct page      *vmpage = cl2vm_page(slice);
-       struct cl_page   *page   = slice->cpl_page;
-       int refc;
-
-       LASSERT(PageLocked(vmpage));
-       LASSERT((struct cl_page *)vmpage->private == page);
-
-
-       /* Drop the reference count held in vvp_page_init */
-       refc = atomic_dec_return(&page->cp_ref);
-       LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
-
-       ClearPagePrivate(vmpage);
-       vmpage->private = 0;
-       /*
-        * Reference from vmpage to cl_page is removed, but the reference back
-        * is still here. It is removed later in vvp_page_fini().
-        */
-}
-
 static int vvp_page_prep_read(const struct lu_env *env,
                              const struct cl_page_slice *slice,
                              struct cl_io *unused)
@@ -325,11 +260,7 @@ static int vvp_page_fail(const struct lu_env *env,
 }
 
 static const struct cl_page_operations vvp_page_ops = {
-       .cpo_assume        = vvp_page_assume,
-       .cpo_unassume      = vvp_page_unassume,
        .cpo_discard       = vvp_page_discard,
-       .cpo_delete        = vvp_page_delete,
-       .cpo_fini          = vvp_page_fini,
        .cpo_print         = vvp_page_print,
        .io = {
                [CRT_READ] = {
@@ -367,7 +298,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
                                  &vvp_transient_page_ops);
        } else {
                get_page(vmpage);
-               /* in cache, decref in vvp_page_delete */
+               /* in cache, decref in cl_page_delete() */
                atomic_inc(&page->cp_ref);
                SetPagePrivate(vmpage);
                vmpage->private = (unsigned long)page;
index 1b589bc..5514840 100644 (file)
@@ -181,32 +181,41 @@ static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
        }
 }
 
-static void cl_page_free(const struct lu_env *env, struct cl_page *cl_page,
+static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
                         struct pagevec *pvec)
 {
-       struct cl_object *obj  = cl_page->cp_obj;
+       struct cl_object *obj  = cp->cp_obj;
        unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
-       struct cl_page_slice *slice;
-       int i;
+       struct page *vmpage;
 
        ENTRY;
-       PASSERT(env, cl_page, list_empty(&cl_page->cp_batch));
-       PASSERT(env, cl_page, cl_page->cp_owner == NULL);
-       PASSERT(env, cl_page, cl_page->cp_state == CPS_FREEING);
+       PASSERT(env, cp, list_empty(&cp->cp_batch));
+       PASSERT(env, cp, cp->cp_owner == NULL);
+       PASSERT(env, cp, cp->cp_state == CPS_FREEING);
 
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (unlikely(slice->cpl_ops->cpo_fini != NULL))
-                       slice->cpl_ops->cpo_fini(env, slice, pvec);
+       if (cp->cp_type == CPT_CACHEABLE) {
+               /* vmpage->private was already cleared when page was
+                * moved into CPS_FREEING state. */
+               vmpage = cp->cp_vmpage;
+               LASSERT(vmpage != NULL);
+               LASSERT((struct cl_page *)vmpage->private != cp);
+
+               if (pvec != NULL) {
+                       if (!pagevec_add(pvec, vmpage))
+                               pagevec_release(pvec);
+               } else {
+                       put_page(vmpage);
+               }
        }
-       cl_page->cp_layer_count = 0;
+
+       cp->cp_layer_count = 0;
        cs_page_dec(obj, CS_total);
-       cs_pagestate_dec(obj, cl_page->cp_state);
-       lu_object_ref_del_at(&obj->co_lu, &cl_page->cp_obj_ref,
-                            "cl_page", cl_page);
-       if (cl_page->cp_type != CPT_TRANSIENT)
+       cs_pagestate_dec(obj, cp->cp_state);
+       lu_object_ref_del_at(&obj->co_lu, &cp->cp_obj_ref, "cl_page", cp);
+       if (cp->cp_type != CPT_TRANSIENT)
                cl_object_put(env, obj);
-       lu_ref_fini(&cl_page->cp_reference);
-       __cl_page_free(cl_page, bufsize);
+       lu_ref_fini(&cp->cp_reference);
+       __cl_page_free(cp, bufsize);
        EXIT;
 }
 
@@ -705,32 +714,28 @@ EXPORT_SYMBOL(cl_page_own_try);
  *
  * Called when page is already locked by the hosting VM.
  *
- * \pre !cl_page_is_owned(cl_page, io)
- * \post cl_page_is_owned(cl_page, io)
- *
- * \see cl_page_operations::cpo_assume()
+ * \pre !cl_page_is_owned(cp, io)
+ * \post cl_page_is_owned(cp, io)
  */
 void cl_page_assume(const struct lu_env *env,
-                   struct cl_io *io, struct cl_page *cl_page)
+                   struct cl_io *io, struct cl_page *cp)
 {
-       const struct cl_page_slice *slice;
-       int i;
+       struct page *vmpage;
 
        ENTRY;
+       PINVRNT(env, cp, cl_object_same(cp->cp_obj, io->ci_obj));
 
-       PINVRNT(env, cl_page,
-               cl_object_same(cl_page->cp_obj, io->ci_obj));
-       io = cl_io_top(io);
-
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (slice->cpl_ops->cpo_assume != NULL)
-                       (*slice->cpl_ops->cpo_assume)(env, slice, io);
+       if (cp->cp_type == CPT_CACHEABLE) {
+               vmpage = cp->cp_vmpage;
+               LASSERT(vmpage != NULL);
+               LASSERT(PageLocked(vmpage));
+               wait_on_page_writeback(vmpage);
        }
 
-       PASSERT(env, cl_page, cl_page->cp_owner == NULL);
-       cl_page->cp_owner = cl_io_top(io);
-       cl_page_owner_set(cl_page);
-       cl_page_state_set(env, cl_page, CPS_OWNED);
+       PASSERT(env, cp, cp->cp_owner == NULL);
+       cp->cp_owner = cl_io_top(io);
+       cl_page_owner_set(cp);
+       cl_page_state_set(env, cp, CPS_OWNED);
        EXIT;
 }
 EXPORT_SYMBOL(cl_page_assume);
@@ -741,28 +746,25 @@ EXPORT_SYMBOL(cl_page_assume);
  * Moves cl_page into cl_page_state::CPS_CACHED without releasing a lock
  * on the underlying VM page (as VM is supposed to do this itself).
  *
- * \pre   cl_page_is_owned(cl_page, io)
- * \post !cl_page_is_owned(cl_page, io)
- *
- * \see cl_page_assume()
+ * \pre   cl_page_is_owned(cp, io)
+ * \post !cl_page_is_owned(cp, io)
  */
 void cl_page_unassume(const struct lu_env *env,
-                     struct cl_io *io, struct cl_page *cl_page)
+                     struct cl_io *io, struct cl_page *cp)
 {
-       const struct cl_page_slice *slice;
-       int i;
+       struct page *vmpage;
 
-        ENTRY;
-       PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
-       PINVRNT(env, cl_page, cl_page_invariant(cl_page));
+       ENTRY;
+       PINVRNT(env, cp, cl_page_is_owned(cp, io));
+       PINVRNT(env, cp, cl_page_invariant(cp));
 
-       io = cl_io_top(io);
-       cl_page_owner_clear(cl_page);
-       cl_page_state_set(env, cl_page, CPS_CACHED);
+       cl_page_owner_clear(cp);
+       cl_page_state_set(env, cp, CPS_CACHED);
 
-       cl_page_slice_for_each_reverse(cl_page, slice, i) {
-               if (slice->cpl_ops->cpo_unassume != NULL)
-                       (*slice->cpl_ops->cpo_unassume)(env, slice, io);
+       if (cp->cp_type == CPT_CACHEABLE) {
+               vmpage = cp->cp_vmpage;
+               LASSERT(vmpage != NULL);
+               LASSERT(PageLocked(vmpage));
        }
 
        EXIT;
@@ -830,27 +832,46 @@ EXPORT_SYMBOL(cl_page_discard);
  * cl_pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
  * path. Doesn't check cl_page invariant.
  */
-static void cl_page_delete0(const struct lu_env *env,
-                           struct cl_page *cl_page)
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *cp)
 {
+       struct page *vmpage;
        const struct cl_page_slice *slice;
+       int refc;
        int i;
 
-        ENTRY;
-
-       PASSERT(env, cl_page, cl_page->cp_state != CPS_FREEING);
+       ENTRY;
+       PASSERT(env, cp, cp->cp_state != CPS_FREEING);
 
        /*
         * Severe all ways to obtain new pointers to @pg.
         */
-       cl_page_owner_clear(cl_page);
-       cl_page_state_set0(env, cl_page, CPS_FREEING);
+       cl_page_owner_clear(cp);
+       cl_page_state_set0(env, cp, CPS_FREEING);
 
-       cl_page_slice_for_each_reverse(cl_page, slice, i) {
+       cl_page_slice_for_each_reverse(cp, slice, i) {
                if (slice->cpl_ops->cpo_delete != NULL)
                        (*slice->cpl_ops->cpo_delete)(env, slice);
        }
 
+       if (cp->cp_type == CPT_CACHEABLE) {
+               vmpage = cp->cp_vmpage;
+               LASSERT(PageLocked(vmpage));
+               LASSERT((struct cl_page *)vmpage->private == cp);
+
+               /* Drop the reference count held in vvp_page_init */
+               refc = atomic_dec_return(&cp->cp_ref);
+               LASSERTF(refc >= 1, "page = %p, refc = %d\n", cp, refc);
+
+               ClearPagePrivate(vmpage);
+               vmpage->private = 0;
+
+               /*
+                * The reference from vmpage to cl_page is removed,
+                * but the reference back is still here. It is removed
+                * later in cl_page_free().
+                */
+       }
+
        EXIT;
 }