Whamcloud - gitweb
LU-10994 clio: remove cpo_prep and cpo_make_ready 99/47399/6
authorJohn L. Hammond <jhammond@whamcloud.com>
Mon, 22 Aug 2022 15:56:04 +0000 (11:56 -0400)
committerOleg Drokin <green@whamcloud.com>
Mon, 12 Sep 2022 02:55:20 +0000 (02:55 +0000)
Remove the cpo_prep and cpo_make_ready methods from struct
cl_page_operations. These methods were only implemented by the vvp
layer and so they can be easily inlined into cl_page_prep() and
cl_page_make_ready().

Signed-off-by: John L. Hammond <jhammond@whamcloud.com>
Change-Id: I177fd8d3c3832bcc8f06ed98cdf9d30f18d49e88
Reviewed-on: https://review.whamcloud.com/47399
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/cl_object.h
lustre/llite/vvp_page.c
lustre/obdclass/cl_page.c

index bc534e0..9d4769c 100644 (file)
@@ -894,22 +894,8 @@ struct cl_page_operations {
          */
         struct {
                 /**
-                 * Called when a page is submitted for a transfer as a part of
-                 * cl_page_list.
-                 *
-                 * \return    0         : page is eligible for submission;
-                 * \return    -EALREADY : skip this page;
-                 * \return    -ve       : error.
-                 *
-                 * \see cl_page_prep()
-                 */
-                int  (*cpo_prep)(const struct lu_env *env,
-                                 const struct cl_page_slice *slice,
-                                 struct cl_io *io);
-                /**
                  * Completion handler. This is guaranteed to be eventually
-                 * fired after cl_page_operations::cpo_prep() or
-                 * cl_page_operations::cpo_make_ready() call.
+                * fired after cl_page_prep() or cl_page_make_ready() call.
                  *
                  * This method can be called in a non-blocking context. It is
                  * guaranteed however, that the page involved and its object
@@ -921,18 +907,6 @@ struct cl_page_operations {
                 void (*cpo_completion)(const struct lu_env *env,
                                        const struct cl_page_slice *slice,
                                        int ioret);
-                /**
-                 * Called when cached page is about to be added to the
-                 * ptlrpc request as a part of req formation.
-                 *
-                 * \return    0       : proceed with this page;
-                 * \return    -EAGAIN : skip this page;
-                 * \return    -ve     : error.
-                 *
-                 * \see cl_page_make_ready()
-                 */
-                int  (*cpo_make_ready)(const struct lu_env *env,
-                                       const struct cl_page_slice *slice);
         } io[CRT_NR];
         /**
          * Tell transfer engine that only [to, from] part of a page should be
index f0edcbc..aa494a9 100644 (file)
@@ -63,34 +63,6 @@ static void vvp_page_discard(const struct lu_env *env,
                ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
 }
 
-static int vvp_page_prep_read(const struct lu_env *env,
-                             const struct cl_page_slice *slice,
-                             struct cl_io *unused)
-{
-       ENTRY;
-       /* Skip the page already marked as PG_uptodate. */
-       RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
-}
-
-static int vvp_page_prep_write(const struct lu_env *env,
-                              const struct cl_page_slice *slice,
-                              struct cl_io *unused)
-{
-       struct page *vmpage = cl2vm_page(slice);
-       struct cl_page *pg = slice->cpl_page;
-
-       LASSERT(PageLocked(vmpage));
-       LASSERT(!PageDirty(vmpage));
-
-       /* ll_writepage path is not a sync write, so need to set page writeback
-        * flag
-        */
-       if (pg->cp_sync_io == NULL)
-               set_page_writeback(vmpage);
-
-       return 0;
-}
-
 /**
  * Handles page transfer errors at VM level.
  *
@@ -182,72 +154,14 @@ static void vvp_page_completion_write(const struct lu_env *env,
        EXIT;
 }
 
-/**
- * Implements cl_page_operations::cpo_make_ready() method.
- *
- * This is called to yank a page from the transfer cache and to send it out as
- * a part of transfer. This function try-locks the page. If try-lock failed,
- * page is owned by some concurrent IO, and should be skipped (this is bad,
- * but hopefully rare situation, as it usually results in transfer being
- * shorter than possible).
- *
- * \retval 0      success, page can be placed into transfer
- *
- * \retval -EAGAIN page is either used by concurrent IO has been
- * truncated. Skip it.
- */
-static int vvp_page_make_ready(const struct lu_env *env,
-                              const struct cl_page_slice *slice)
-{
-       struct page *vmpage = cl2vm_page(slice);
-       struct cl_page *pg = slice->cpl_page;
-       int result = 0;
-
-       lock_page(vmpage);
-       if (clear_page_dirty_for_io(vmpage)) {
-               LASSERT(pg->cp_state == CPS_CACHED);
-               /* This actually clears the dirty bit in the radix
-                * tree.
-                */
-               set_page_writeback(vmpage);
-               CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
-       } else if (pg->cp_state == CPS_PAGEOUT) {
-               /* is it possible for osc_flush_async_page() to already
-                * make it ready?
-                */
-               result = -EALREADY;
-       } else {
-               CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
-                             pg->cp_state);
-               LBUG();
-       }
-       unlock_page(vmpage);
-       RETURN(result);
-}
-
-static int vvp_page_fail(const struct lu_env *env,
-                        const struct cl_page_slice *slice)
-{
-       /*
-        * Cached read?
-        */
-       LBUG();
-
-       return 0;
-}
-
 static const struct cl_page_operations vvp_page_ops = {
        .cpo_discard       = vvp_page_discard,
        .io = {
                [CRT_READ] = {
-                       .cpo_prep       = vvp_page_prep_read,
                        .cpo_completion = vvp_page_completion_read,
-                       .cpo_make_ready = vvp_page_fail,
                },
                [CRT_WRITE] = {
-                       .cpo_prep       = vvp_page_prep_write,
                        .cpo_completion = vvp_page_completion_write,
-                       .cpo_make_ready = vvp_page_make_ready,
                },
        },
 };
index 68f61f3..9962de4 100644 (file)
@@ -944,50 +944,41 @@ static void cl_page_io_start(const struct lu_env *env,
 }
 
 /**
- * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
- * called top-to-bottom. Every layer either agrees to submit this page (by
- * returning 0), or requests to omit this page (by returning -EALREADY). Layer
- * handling interactions with the VM also has to inform VM that page is under
- * transfer now.
+ * Prepares page for immediate transfer. Return -EALREADY if this page
+ * should be omitted from transfer.
  */
 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
-                struct cl_page *cl_page, enum cl_req_type crt)
+                struct cl_page *cp, enum cl_req_type crt)
 {
-       const struct cl_page_slice *slice;
-       int result = 0;
-       int i;
+       struct page *vmpage = cp->cp_vmpage;
+       int rc;
 
-       PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
-       PINVRNT(env, cl_page, cl_page_invariant(cl_page));
-       PINVRNT(env, cl_page, crt < CRT_NR);
+       PASSERT(env, cp, crt < CRT_NR);
+       PINVRNT(env, cp, cl_page_is_owned(cp, io));
+       PINVRNT(env, cp, cl_page_invariant(cp));
 
-        /*
-        * this has to be called bottom-to-top, so that llite can set up
-        * PG_writeback without risking other layers deciding to skip this
-        * page.
-        */
-       if (crt >= CRT_NR)
-               return -EINVAL;
-
-       if (cl_page->cp_type != CPT_TRANSIENT) {
-               cl_page_slice_for_each(cl_page, slice, i) {
-                       if (slice->cpl_ops->io[crt].cpo_prep)
-                               result =
-                                (*slice->cpl_ops->io[crt].cpo_prep)(env,
-                                                                    slice,
-                                                                    io);
-                       if (result != 0)
-                               break;
-               }
-       }
+       if (cp->cp_type == CPT_TRANSIENT) {
+               /* Nothing to do. */
+       } else if (crt == CRT_READ) {
+               if (PageUptodate(vmpage))
+                       GOTO(out, rc = -EALREADY);
+       } else {
+               LASSERT(PageLocked(vmpage));
+               LASSERT(!PageDirty(vmpage));
 
-       if (result >= 0) {
-               result = 0;
-               cl_page_io_start(env, cl_page, crt);
+               /* ll_writepage path is not a sync write, so need to
+                * set page writeback flag
+                */
+               if (cp->cp_sync_io == NULL)
+                       set_page_writeback(vmpage);
        }
 
-       CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
-       return result;
+       cl_page_io_start(env, cp, crt);
+       rc = 0;
+out:
+       CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
+
+       return rc;
 }
 EXPORT_SYMBOL(cl_page_prep);
 
@@ -1044,36 +1035,51 @@ EXPORT_SYMBOL(cl_page_completion);
  *
  * \pre  cl_page->cp_state == CPS_CACHED
  * \post cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
- *
- * \see cl_page_operations::cpo_make_ready()
  */
-int cl_page_make_ready(const struct lu_env *env, struct cl_page *cl_page,
-                       enum cl_req_type crt)
+int cl_page_make_ready(const struct lu_env *env, struct cl_page *cp,
+                      enum cl_req_type crt)
 {
-       const struct cl_page_slice *slice;
-       int result = 0;
-       int i;
+       struct page *vmpage = cp->cp_vmpage;
+       int rc;
 
-        ENTRY;
-       PINVRNT(env, cl_page, crt < CRT_NR);
-       if (crt >= CRT_NR)
-               RETURN(-EINVAL);
+       ENTRY;
+       PASSERT(env, cp, crt == CRT_WRITE);
 
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (slice->cpl_ops->io[crt].cpo_make_ready != NULL)
-                       result = (*slice->cpl_ops->io[crt].cpo_make_ready)(env, slice);
-               if (result != 0)
-                       break;
+       if (cp->cp_type == CPT_TRANSIENT)
+               GOTO(out, rc = 0);
+
+       lock_page(vmpage);
+
+       if (clear_page_dirty_for_io(vmpage)) {
+               LASSERT(cp->cp_state == CPS_CACHED);
+               /* This actually clears the dirty bit in the
+                * radix tree.
+                */
+               set_page_writeback(vmpage);
+               CL_PAGE_HEADER(D_PAGE, env, cp, "readied\n");
+               rc = 0;
+       } else if (cp->cp_state == CPS_PAGEOUT) {
+               /* is it possible for osc_flush_async_page()
+                * to already make it ready?
+                */
+               rc = -EALREADY;
+       } else {
+               CL_PAGE_DEBUG(D_ERROR, env, cp,
+                             "unexpecting page state %d\n",
+                             cp->cp_state);
+               LBUG();
        }
 
-       if (result >= 0) {
-               result = 0;
-               PASSERT(env, cl_page, cl_page->cp_state == CPS_CACHED);
-               cl_page_io_start(env, cl_page, crt);
-        }
-       CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
+       unlock_page(vmpage);
+out:
+       if (rc == 0) {
+               PASSERT(env, cp, cp->cp_state == CPS_CACHED);
+               cl_page_io_start(env, cp, crt);
+       }
 
-       RETURN(result);
+       CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
+
+       return rc;
 }
 EXPORT_SYMBOL(cl_page_make_ready);