Whamcloud - gitweb
LU-10994 clio: remove struct vvp_page 00/47400/5
authorJohn L. Hammond <jhammond@whamcloud.com>
Mon, 11 Jul 2022 14:04:12 +0000 (10:04 -0400)
committerOleg Drokin <green@whamcloud.com>
Mon, 12 Sep 2022 02:55:29 +0000 (02:55 +0000)
Remove struct vvp_page and use struct cl_page_slice in its place. Use
cp_vmpage in place of vpg_page and cl_page_index() in place of
vvp_index().

Signed-off-by: John L. Hammond <jhammond@whamcloud.com>
Change-Id: I2cd408f08e6ff9f7686b591c02ea95e31ad2b2ae
Reviewed-on: https://review.whamcloud.com/47400
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/cl_object.h
lustre/llite/rw.c
lustre/llite/rw26.c
lustre/llite/vvp_dev.c
lustre/llite/vvp_internal.h
lustre/llite/vvp_io.c
lustre/llite/vvp_object.c
lustre/llite/vvp_page.c

index 9d4769c..db21536 100644 (file)
@@ -966,6 +966,11 @@ static inline struct page *cl_page_vmpage(const struct cl_page *page)
        return page->cp_vmpage;
 }
 
+static inline pgoff_t cl_page_index(const struct cl_page *cp)
+{
+       return cl_page_vmpage(cp)->index;
+}
+
 /**
  * Check if a cl_page is in use.
  *
index df1be3f..66496e6 100644 (file)
@@ -1625,7 +1625,6 @@ int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
        struct ll_readahead_state *ras    = NULL;
        struct cl_2queue          *queue  = &io->ci_queue;
        struct cl_sync_io         *anchor = NULL;
-       struct vvp_page           *vpg;
        int                        rc = 0, rc2 = 0;
        bool                       uptodate;
        struct vvp_io *vio = vvp_env_io(env);
@@ -1647,7 +1646,6 @@ int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
        if (page->cp_vmpage && PagePrivate2(page->cp_vmpage))
                unlockpage = false;
 
-       vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
        uptodate = page->cp_defer_uptodate;
 
        if (ll_readahead_enabled(sbi) && !page->cp_ra_updated && ras) {
@@ -1657,7 +1655,7 @@ int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
                        flags |= LL_RAS_HIT;
                if (mmap)
                        flags |= LL_RAS_MMAP;
-               ras_update(sbi, inode, ras, vvp_index(vpg), flags, io);
+               ras_update(sbi, inode, ras, cl_page_index(page), flags, io);
        }
 
        cl_2queue_init(queue);
@@ -1679,15 +1677,15 @@ int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
                io_end_index = cl_index(io->ci_obj, io->u.ci_rw.crw_pos +
                                        io->u.ci_rw.crw_count - 1);
        } else {
-               io_start_index = vvp_index(vpg);
-               io_end_index = vvp_index(vpg);
+               io_start_index = cl_page_index(page);
+               io_end_index = cl_page_index(page);
        }
 
        if (ll_readahead_enabled(sbi) && ras && !io->ci_rand_read) {
                pgoff_t skip_index = 0;
 
-               if (ras->ras_next_readahead_idx < vvp_index(vpg))
-                       skip_index = vvp_index(vpg);
+               if (ras->ras_next_readahead_idx < cl_page_index(page))
+                       skip_index = cl_page_index(page);
                rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
                                   uptodate, file, skip_index,
                                   &ra_start_index);
@@ -1697,15 +1695,15 @@ int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
                CDEBUG(D_READA | (rc2 ? D_IOTRACE : 0),
                       DFID " %d pages read ahead at %lu, triggered by user read at %lu, stride offset %lld, stride length %lld, stride bytes %lld\n",
                       PFID(ll_inode2fid(inode)), rc2, ra_start_index,
-                      vvp_index(vpg), ras->ras_stride_offset,
+                      cl_page_index(page), ras->ras_stride_offset,
                       ras->ras_stride_length, ras->ras_stride_bytes);
 
-       } else if (vvp_index(vpg) == io_start_index &&
+       } else if (cl_page_index(page) == io_start_index &&
                   io_end_index - io_start_index > 0) {
                rc2 = ll_readpages(env, io, &queue->c2_qin, io_start_index + 1,
                                   io_end_index);
                CDEBUG(D_READA, DFID " %d pages read at %lu\n",
-                      PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
+                      PFID(ll_inode2fid(inode)), rc2, cl_page_index(page));
        }
 
        if (queue->c2_qin.pl_nr > 0) {
@@ -1865,7 +1863,6 @@ int ll_readpage(struct file *file, struct page *vmpage)
                struct ll_file_data *fd = file->private_data;
                struct ll_readahead_state *ras = &fd->fd_ras;
                struct lu_env  *local_env = NULL;
-               struct vvp_page *vpg;
 
                result = -ENODATA;
 
@@ -1878,7 +1875,6 @@ int ll_readpage(struct file *file, struct page *vmpage)
                        RETURN(result);
                }
 
-               vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
                if (page->cp_defer_uptodate) {
                        enum ras_update_flags flags = LL_RAS_HIT;
 
@@ -1888,11 +1884,11 @@ int ll_readpage(struct file *file, struct page *vmpage)
                        /* For fast read, it updates read ahead state only
                         * if the page is hit in cache because non cache page
                         * case will be handled by slow read later. */
-                       ras_update(sbi, inode, ras, vvp_index(vpg), flags, io);
+                       ras_update(sbi, inode, ras, cl_page_index(page), flags, io);
                        /* avoid duplicate ras_update() call */
                        page->cp_ra_updated = 1;
 
-                       if (ll_use_fast_io(file, ras, vvp_index(vpg)))
+                       if (ll_use_fast_io(file, ras, cl_page_index(page)))
                                result = 0;
                }
 
index 28d6a30..a6e98a2 100644 (file)
@@ -584,8 +584,7 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
 {
        struct cl_attr *attr   = vvp_env_thread_attr(env);
        struct cl_object *obj  = io->ci_obj;
-       struct vvp_page *vpg   = cl_object_page_slice(obj, pg);
-       loff_t          offset = cl_offset(obj, vvp_index(vpg));
+       loff_t          offset = cl_offset(obj, cl_page_index(pg));
        int             result;
        ENTRY;
 
@@ -603,7 +602,7 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
         * purposes here we can treat it like i_size.
         */
        if (attr->cat_kms <= offset) {
-               char *kaddr = kmap_atomic(vpg->vpg_page);
+               char *kaddr = kmap_atomic(pg->cp_vmpage);
 
                memset(kaddr, 0, cl_page_size(obj));
                kunmap_atomic(kaddr);
index 1ea1fde..0a2fc5b 100644 (file)
@@ -441,15 +441,14 @@ static struct page *vvp_pgcache_current(struct vvp_seq_private *priv)
 static void vvp_pgcache_page_show(const struct lu_env *env,
                                  struct seq_file *seq, struct cl_page *page)
 {
-       struct vvp_page *vpg;
-       struct page      *vmpage;
-       int              has_flags;
+       struct page *vmpage;
+       int has_flags;
 
-       vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
-       vmpage = vpg->vpg_page;
+       vmpage = page->cp_vmpage;
        seq_printf(seq, " %5i | %p %p %s %s | %p "DFID"(%p) %lu %u [",
                   0 /* gen */,
-                  vpg, page,
+                  NULL, /* was vvp_page */
+                  page,
                   "none",
                   PageWriteback(vmpage) ? "wb" : "-",
                   vmpage,
index 76085ce..acba772 100644 (file)
@@ -208,23 +208,8 @@ struct vvp_object {
 };
 
 /**
- * VVP-private page state.
+ * There is no VVP-private page state.
  */
-struct vvp_page {
-       struct cl_page_slice vpg_cl;
-       /** VM page */
-       struct page     *vpg_page;
-};
-
-static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
-{
-       return container_of(slice, struct vvp_page, vpg_cl);
-}
-
-static inline pgoff_t vvp_index(struct vvp_page *vpg)
-{
-       return vpg->vpg_page->index;
-}
 
 struct vvp_device {
        struct cl_device    vdv_cl;
@@ -266,7 +251,7 @@ struct vvp_object *cl_inode2vvp(struct inode *inode);
 
 static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
 {
-       return cl2vvp_page(slice)->vpg_page;
+       return slice->cpl_page->cp_vmpage;
 }
 
 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
index 5701737..eda72e8 100644 (file)
@@ -1122,15 +1122,13 @@ static bool page_list_sanity_check(struct cl_object *obj,
        pgoff_t index = CL_PAGE_EOF;
 
        cl_page_list_for_each(page, plist) {
-               struct vvp_page *vpg = cl_object_page_slice(obj, page);
-
                if (index == CL_PAGE_EOF) {
-                       index = vvp_index(vpg);
+                       index = cl_page_index(page);
                        continue;
                }
 
                ++index;
-               if (index == vvp_index(vpg))
+               if (index == cl_page_index(page))
                        continue;
 
                return false;
@@ -1526,7 +1524,6 @@ static int vvp_io_fault_start(const struct lu_env *env,
                wait_on_page_writeback(vmpage);
                if (!PageDirty(vmpage)) {
                        struct cl_page_list *plist = &vio->u.fault.ft_queue;
-                       struct vvp_page *vpg = cl_object_page_slice(obj, page);
                        int to = PAGE_SIZE;
 
                        /* vvp_page_assume() calls wait_on_page_writeback(). */
@@ -1536,7 +1533,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
                        cl_page_list_add(plist, page, true);
 
                        /* size fixup */
-                       if (last_index == vvp_index(vpg))
+                       if (last_index == cl_page_index(page))
                                to = ((size - 1) & ~PAGE_MASK) + 1;
 
                        /* Do not set Dirty bit here so that in case IO is
index ab5c68b..b19b30f 100644 (file)
@@ -234,7 +234,7 @@ static int vvp_object_init0(const struct lu_env *env,
                            const struct cl_object_conf *conf)
 {
        vob->vob_inode = conf->coc_inode;
-       cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
+       cl_object_page_init(&vob->vob_cl, sizeof(struct cl_page_slice));
        return 0;
 }
 
index aa494a9..31edb33 100644 (file)
@@ -131,14 +131,13 @@ static void vvp_page_completion_write(const struct lu_env *env,
                                      const struct cl_page_slice *slice,
                                      int ioret)
 {
-       struct vvp_page *vpg    = cl2vvp_page(slice);
-       struct cl_page  *pg     = slice->cpl_page;
-       struct page     *vmpage = vpg->vpg_page;
+       struct cl_page *cp = slice->cpl_page;
+       struct page *vmpage = cp->cp_vmpage;
 
        ENTRY;
-       CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
+       CL_PAGE_HEADER(D_PAGE, env, cp, "completing WRITE with %d\n", ioret);
 
-       if (pg->cp_sync_io != NULL) {
+       if (cp->cp_sync_io != NULL) {
                LASSERT(PageLocked(vmpage));
                LASSERT(!PageWriteback(vmpage));
        } else {
@@ -147,7 +146,7 @@ static void vvp_page_completion_write(const struct lu_env *env,
                 * Only mark the page error only when it's an async write
                 * because applications won't wait for IO to finish.
                 */
-               vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
+               vvp_vmpage_error(vvp_object_inode(cp->cp_obj), vmpage, ioret);
 
                end_page_writeback(vmpage);
        }
@@ -172,18 +171,16 @@ static const struct cl_page_operations vvp_transient_page_ops = {
 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
                struct cl_page *page, pgoff_t index)
 {
-       struct vvp_page *vpg = cl_object_page_slice(obj, page);
-       struct page     *vmpage = page->cp_vmpage;
+       struct cl_page_slice *cpl = cl_object_page_slice(obj, page);
+       struct page *vmpage = page->cp_vmpage;
 
        CLOBINVRNT(env, obj, vvp_object_invariant(obj));
 
-       vpg->vpg_page = vmpage;
-
        if (page->cp_type == CPT_TRANSIENT) {
                /* DIO pages are referenced by userspace, we don't need to take
                 * a reference on them. (contrast with get_page() call above)
                 */
-               cl_page_slice_add(page, &vpg->vpg_cl, obj,
+               cl_page_slice_add(page, cpl, obj,
                                  &vvp_transient_page_ops);
        } else {
                get_page(vmpage);
@@ -191,8 +188,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
                atomic_inc(&page->cp_ref);
                SetPagePrivate(vmpage);
                vmpage->private = (unsigned long)page;
-               cl_page_slice_add(page, &vpg->vpg_cl, obj,
-                               &vvp_page_ops);
+               cl_page_slice_add(page, cpl, obj, &vvp_page_ops);
        }
 
        return 0;