Whamcloud - gitweb
LU-5971 llite: rename ccc_page to vvp_page 86/13086/2
authorJohn L. Hammond <john.hammond@intel.com>
Tue, 16 Dec 2014 15:01:48 +0000 (09:01 -0600)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 9 Jan 2015 03:10:17 +0000 (03:10 +0000)
Rename struct ccc_page to struct vvp_page and remove obsolete CCC page
methods.

Signed-off-by: John L. Hammond <john.hammond@intel.com>
Change-Id: Ie45b23230fdaed5c3004e73d1faf5d6a4af32447
Reviewed-on: http://review.whamcloud.com/13086
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: James Simmons <uja.ornl@gmail.com>
Reviewed-by: Jinshan Xiong <jinshan.xiong@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
12 files changed:
lustre/doc/clio.txt
lustre/include/cl_object.h
lustre/llite/lcommon_cl.c
lustre/llite/llite_close.c
lustre/llite/llite_internal.h
lustre/llite/rw.c
lustre/llite/rw26.c
lustre/llite/vvp_dev.c
lustre/llite/vvp_internal.h
lustre/llite/vvp_io.c
lustre/llite/vvp_object.c
lustre/llite/vvp_page.c

index 3b32e4b..22cb71a 100644 (file)
@@ -375,7 +375,7 @@ VVP implements VM entry points: ll_{write,invalidate,release}page().
 For file objects, VVP slice (vvp_object) contains a pointer to an
 inode.
 
 For file objects, VVP slice (vvp_object) contains a pointer to an
 inode.
 
-For pages, the VVP slice (ccc_page) contains a pointer to the VM page
+For pages, the VVP slice (vvp_page) contains a pointer to the VM page
 (cfs_page_t), a `defer up to date' flag to track read-ahead hits (similar to
 the pre-CLIO client), and fields necessary for synchronous transfer (see
 below).  VVP is responsible for implementation of the interaction between
 (cfs_page_t), a `defer up to date' flag to track read-ahead hits (similar to
 the pre-CLIO client), and fields necessary for synchronous transfer (see
 below).  VVP is responsible for implementation of the interaction between
@@ -592,7 +592,7 @@ cl_page:
            cl_object_header<------------cl_page<-----------------+
                    |          ->cp_obj     |                     |
                    V                       V                     |
            cl_object_header<------------cl_page<-----------------+
                    |          ->cp_obj     |                     |
                    V                       V                     |
-    inode<----vvp_object<---------------ccc_page---->cfs_page_t  |
+    inode<----vvp_object<---------------vvp_page---->cfs_page_t  |
                    |        ->cpl_obj      |                     |
                    V                       V                     | ->cp_child
               lov_object<---------------lov_page                 |
                    |        ->cpl_obj      |                     |
                    V                       V                     | ->cp_child
               lov_object<---------------lov_page                 |
index 0a97d1c..37f3fc2 100644 (file)
@@ -753,7 +753,7 @@ struct cl_page {
 /**
  * Per-layer part of cl_page.
  *
 /**
  * Per-layer part of cl_page.
  *
- * \see ccc_page, lov_page, osc_page
+ * \see vvp_page, lov_page, osc_page
  */
 struct cl_page_slice {
         struct cl_page                  *cpl_page;
  */
 struct cl_page_slice {
         struct cl_page                  *cpl_page;
index be405e3..01a9a90 100644 (file)
@@ -254,78 +254,6 @@ static void vvp_object_size_unlock(struct cl_object *obj)
 
 /*****************************************************************************
  *
 
 /*****************************************************************************
  *
- * Page operations.
- *
- */
-
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
-{
-        /*
-         * Cached read?
-         */
-        LBUG();
-        return 0;
-}
-
-void ccc_transient_page_verify(const struct cl_page *page)
-{
-}
-
-int ccc_transient_page_own(const struct lu_env *env,
-                                   const struct cl_page_slice *slice,
-                                   struct cl_io *unused,
-                                   int nonblock)
-{
-        ccc_transient_page_verify(slice->cpl_page);
-        return 0;
-}
-
-void ccc_transient_page_assume(const struct lu_env *env,
-                                      const struct cl_page_slice *slice,
-                                      struct cl_io *unused)
-{
-        ccc_transient_page_verify(slice->cpl_page);
-}
-
-void ccc_transient_page_unassume(const struct lu_env *env,
-                                        const struct cl_page_slice *slice,
-                                        struct cl_io *unused)
-{
-        ccc_transient_page_verify(slice->cpl_page);
-}
-
-void ccc_transient_page_disown(const struct lu_env *env,
-                                      const struct cl_page_slice *slice,
-                                      struct cl_io *unused)
-{
-        ccc_transient_page_verify(slice->cpl_page);
-}
-
-void ccc_transient_page_discard(const struct lu_env *env,
-                                       const struct cl_page_slice *slice,
-                                       struct cl_io *unused)
-{
-        struct cl_page *page = slice->cpl_page;
-
-        ccc_transient_page_verify(slice->cpl_page);
-
-        /*
-         * For transient pages, remove it from the radix tree.
-         */
-        cl_page_delete(env, page);
-}
-
-int ccc_transient_page_prep(const struct lu_env *env,
-                                   const struct cl_page_slice *slice,
-                                   struct cl_io *unused)
-{
-        ENTRY;
-        /* transient page should always be sent. */
-        RETURN(0);
-}
-
-/*****************************************************************************
- *
  * Lock operations.
  *
  */
  * Lock operations.
  *
  */
@@ -719,22 +647,6 @@ struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
         return container_of0(slice, struct ccc_req, crq_cl);
 }
 
         return container_of0(slice, struct ccc_req, crq_cl);
 }
 
-struct page *cl2vm_page(const struct cl_page_slice *slice)
-{
-        return cl2ccc_page(slice)->cpg_page;
-}
-
-/**
- * Returns a pointer to cl_page associated with \a vmpage, without acquiring
- * additional reference to the resulting page. This is an unsafe version of
- * cl_vmpage_page() that can only be used under vmpage lock.
- */
-struct cl_page *ccc_vmpage_page_transient(struct page *vmpage)
-{
-        KLASSERT(PageLocked(vmpage));
-        return (struct cl_page *)vmpage->private;
-}
-
 /**
  * Initialize or update CLIO structures for regular files when new
  * meta-data arrives from the server.
 /**
  * Initialize or update CLIO structures for regular files when new
  * meta-data arrives from the server.
index 39eaade..7dd5e2c 100644 (file)
 #include "llite_internal.h"
 
 /** records that a write is in flight */
 #include "llite_internal.h"
 
 /** records that a write is in flight */
-void vvp_write_pending(struct vvp_object *club, struct ccc_page *page)
+void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
 {
        struct ll_inode_info *lli = ll_i2info(club->vob_inode);
 
        ENTRY;
        spin_lock(&lli->lli_lock);
        lli->lli_flags |= LLIF_SOM_DIRTY;
 {
        struct ll_inode_info *lli = ll_i2info(club->vob_inode);
 
        ENTRY;
        spin_lock(&lli->lli_lock);
        lli->lli_flags |= LLIF_SOM_DIRTY;
-       if (page != NULL && list_empty(&page->cpg_pending_linkage))
-               list_add(&page->cpg_pending_linkage,
+       if (page != NULL && list_empty(&page->vpg_pending_linkage))
+               list_add(&page->vpg_pending_linkage,
                             &club->vob_pending_list);
        spin_unlock(&lli->lli_lock);
        EXIT;
 }
 
 /** records that a write has completed */
                             &club->vob_pending_list);
        spin_unlock(&lli->lli_lock);
        EXIT;
 }
 
 /** records that a write has completed */
-void vvp_write_complete(struct vvp_object *club, struct ccc_page *page)
+void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
 {
        struct ll_inode_info *lli = ll_i2info(club->vob_inode);
        int rc = 0;
 
        ENTRY;
        spin_lock(&lli->lli_lock);
 {
        struct ll_inode_info *lli = ll_i2info(club->vob_inode);
        int rc = 0;
 
        ENTRY;
        spin_lock(&lli->lli_lock);
-       if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
-               list_del_init(&page->cpg_pending_linkage);
+       if (page != NULL && !list_empty(&page->vpg_pending_linkage)) {
+               list_del_init(&page->vpg_pending_linkage);
                rc = 1;
        }
        spin_unlock(&lli->lli_lock);
                rc = 1;
        }
        spin_unlock(&lli->lli_lock);
index ddc92a1..f54d95f 100644 (file)
@@ -986,8 +986,8 @@ struct ll_close_queue {
        atomic_t                lcq_stop;
 };
 
        atomic_t                lcq_stop;
 };
 
-void vvp_write_pending(struct vvp_object *club, struct ccc_page *page);
-void vvp_write_complete(struct vvp_object *club, struct ccc_page *page);
+void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
+void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
 
 /* specific achitecture can implement only part of this list */
 enum vvp_io_subtype {
 
 /* specific achitecture can implement only part of this list */
 enum vvp_io_subtype {
index ba1ec84..70953fc 100644 (file)
@@ -243,7 +243,7 @@ static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
                              struct cl_object *clob, pgoff_t *max_index)
 {
        struct page *vmpage = page->cp_vmpage;
                              struct cl_object *clob, pgoff_t *max_index)
 {
        struct page *vmpage = page->cp_vmpage;
-       struct ccc_page *cp;
+       struct vvp_page *vpg;
        int              rc;
 
        ENTRY;
        int              rc;
 
        ENTRY;
@@ -251,18 +251,18 @@ static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
        rc = 0;
        cl_page_assume(env, io, page);
        lu_ref_add(&page->cp_reference, "ra", current);
        rc = 0;
        cl_page_assume(env, io, page);
        lu_ref_add(&page->cp_reference, "ra", current);
-       cp = cl2ccc_page(cl_object_page_slice(clob, page));
-       if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
+       vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+       if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
                CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
                CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
-                      ccc_index(cp), *max_index);
+                      vvp_index(vpg), *max_index);
                /* Disable the optimization on prefetching maximum readahead
                 * index because there is a race with lock cancellation. This
                 * optimization will be revived later.
                /* Disable the optimization on prefetching maximum readahead
                 * index because there is a race with lock cancellation. This
                 * optimization will be revived later.
-                * if (*max_index == 0 || ccc_index(cp) > *max_index) */
+                * if (*max_index == 0 || vvp_index(vpg) > *max_index) */
                rc = cl_page_is_under_lock(env, io, page, max_index);
                if (rc == 0) {
                rc = cl_page_is_under_lock(env, io, page, max_index);
                if (rc == 0) {
-                       cp->cpg_defer_uptodate = 1;
-                       cp->cpg_ra_used = 0;
+                       vpg->vpg_defer_uptodate = 1;
+                       vpg->vpg_ra_used = 0;
                        cl_page_list_add(queue, page);
                        rc = 1;
                } else {
                        cl_page_list_add(queue, page);
                        rc = 1;
                } else {
index b6b4a69..b218f5e 100644 (file)
@@ -487,8 +487,8 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
 {
        struct cl_attr *attr   = ccc_env_thread_attr(env);
        struct cl_object *obj  = io->ci_obj;
 {
        struct cl_attr *attr   = ccc_env_thread_attr(env);
        struct cl_object *obj  = io->ci_obj;
-       struct ccc_page *cp    = cl_object_page_slice(obj, pg);
-       loff_t          offset = cl_offset(obj, ccc_index(cp));
+       struct vvp_page *vpg   = cl_object_page_slice(obj, pg);
+       loff_t          offset = cl_offset(obj, vvp_index(vpg));
        int             result;
 
        cl_object_attr_lock(obj);
        int             result;
 
        cl_object_attr_lock(obj);
@@ -501,12 +501,12 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
                 * purposes here we can treat it like i_size.
                 */
                if (attr->cat_kms <= offset) {
                 * purposes here we can treat it like i_size.
                 */
                if (attr->cat_kms <= offset) {
-                       char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);
+                       char *kaddr = ll_kmap_atomic(vpg->vpg_page, KM_USER0);
 
                        memset(kaddr, 0, cl_page_size(obj));
                        ll_kunmap_atomic(kaddr, KM_USER0);
 
                        memset(kaddr, 0, cl_page_size(obj));
                        ll_kunmap_atomic(kaddr, KM_USER0);
-               } else if (cp->cpg_defer_uptodate)
-                       cp->cpg_ra_used = 1;
+               } else if (vpg->vpg_defer_uptodate)
+                       vpg->vpg_ra_used = 1;
                else
                        result = ll_page_sync_io(env, io, pg, CRT_READ);
        }
                else
                        result = ll_page_sync_io(env, io, pg, CRT_READ);
        }
index df545df..39caedd 100644 (file)
@@ -471,18 +471,18 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
 static void vvp_pgcache_page_show(const struct lu_env *env,
                                  struct seq_file *seq, struct cl_page *page)
 {
 static void vvp_pgcache_page_show(const struct lu_env *env,
                                  struct seq_file *seq, struct cl_page *page)
 {
-       struct ccc_page *cpg;
+       struct vvp_page *vpg;
        struct page      *vmpage;
        int              has_flags;
 
        struct page      *vmpage;
        int              has_flags;
 
-       cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
-       vmpage = cpg->cpg_page;
+       vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
+       vmpage = vpg->vpg_page;
        seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
                   0 /* gen */,
        seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
                   0 /* gen */,
-                  cpg, page,
+                  vpg, page,
                   "none",
                   "none",
-                  cpg->cpg_write_queued ? "wq" : "- ",
-                  cpg->cpg_defer_uptodate ? "du" : "- ",
+                  vpg->vpg_write_queued ? "wq" : "- ",
+                  vpg->vpg_defer_uptodate ? "du" : "- ",
                   PageWriteback(vmpage) ? "wb" : "-",
                   vmpage,
                   PFID(ll_inode2fid(vmpage->mapping->host)),
                   PageWriteback(vmpage) ? "wb" : "-",
                   vmpage,
                   PFID(ll_inode2fid(vmpage->mapping->host)),
index 5b8c747..f441d91 100644 (file)
@@ -215,7 +215,7 @@ struct vvp_object {
         * A list of dirty pages pending IO in the cache. Used by
         * SOM. Protected by ll_inode_info::lli_lock.
         *
         * A list of dirty pages pending IO in the cache. Used by
         * SOM. Protected by ll_inode_info::lli_lock.
         *
-        * \see ccc_page::cpg_pending_linkage
+        * \see vvp_page::vpg_pending_linkage
         */
        struct list_head        vob_pending_list;
 
         */
        struct list_head        vob_pending_list;
 
@@ -246,36 +246,34 @@ struct vvp_object {
 };
 
 /**
 };
 
 /**
- * ccc-private page state.
+ * VVP-private page state.
  */
  */
-struct ccc_page {
-       struct cl_page_slice cpg_cl;
-       unsigned        cpg_defer_uptodate:1,
-                       cpg_ra_used:1,
-                       cpg_write_queued:1;
+struct vvp_page {
+       struct cl_page_slice vpg_cl;
+       unsigned        vpg_defer_uptodate:1,
+                       vpg_ra_used:1,
+                       vpg_write_queued:1;
        /**
         * Non-empty iff this page is already counted in
         * vvp_object::vob_pending_list. This list is only used as a flag,
         * that is, never iterated through, only checked for list_empty(), but
         * having a list is useful for debugging.
         */
        /**
         * Non-empty iff this page is already counted in
         * vvp_object::vob_pending_list. This list is only used as a flag,
         * that is, never iterated through, only checked for list_empty(), but
         * having a list is useful for debugging.
         */
-       struct list_head cpg_pending_linkage;
+       struct list_head vpg_pending_linkage;
        /** VM page */
        /** VM page */
-       struct page     *cpg_page;
+       struct page     *vpg_page;
 };
 
 };
 
-static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
+static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
 {
 {
-       return container_of(slice, struct ccc_page, cpg_cl);
+       return container_of(slice, struct vvp_page, vpg_cl);
 }
 
 }
 
-static inline pgoff_t ccc_index(struct ccc_page *ccc)
+static inline pgoff_t vvp_index(struct vvp_page *vpg)
 {
 {
-       return ccc->cpg_cl.cpl_index;
+       return vpg->vpg_cl.cpl_index;
 }
 
 }
 
-struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
-
 struct vvp_device {
        struct cl_device    vdv_cl;
        struct super_block *vdv_sb;
 struct vvp_device {
        struct cl_device    vdv_cl;
        struct super_block *vdv_sb;
@@ -306,26 +304,6 @@ void ccc_global_fini(struct lu_device_type *device_type);
 int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
                  struct cl_lock *lock, const struct cl_io *io,
                  const struct cl_lock_operations *lkops);
 int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
                  struct cl_lock *lock, const struct cl_io *io,
                  const struct cl_lock_operations *lkops);
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
-void ccc_transient_page_verify(const struct cl_page *page);
-int  ccc_transient_page_own(const struct lu_env *env,
-                           const struct cl_page_slice *slice,
-                           struct cl_io *io, int nonblock);
-void ccc_transient_page_assume(const struct lu_env *env,
-                              const struct cl_page_slice *slice,
-                              struct cl_io *io);
-void ccc_transient_page_unassume(const struct lu_env *env,
-                                const struct cl_page_slice *slice,
-                                struct cl_io *io);
-void ccc_transient_page_disown(const struct lu_env *env,
-                              const struct cl_page_slice *slice,
-                              struct cl_io *io);
-void ccc_transient_page_discard(const struct lu_env *env,
-                               const struct cl_page_slice *slice,
-                               struct cl_io *io);
-int ccc_transient_page_prep(const struct lu_env *env,
-                           const struct cl_page_slice *slice,
-                           struct cl_io *io);
 void ccc_lock_delete(const struct lu_env *env,
                     const struct cl_lock_slice *slice);
 void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
 void ccc_lock_delete(const struct lu_env *env,
                     const struct cl_lock_slice *slice);
 void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
@@ -386,16 +364,19 @@ static inline struct inode *vvp_object_inode(const struct cl_object *obj)
 int vvp_object_invariant(const struct cl_object *obj);
 struct vvp_object *cl_inode2vvp(struct inode *inode);
 
 int vvp_object_invariant(const struct cl_object *obj);
 struct vvp_object *cl_inode2vvp(struct inode *inode);
 
+static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
+{
+       return cl2vvp_page(slice)->vpg_page;
+}
+
 struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice);
 struct ccc_io *cl2ccc_io(const struct lu_env *env,
                         const struct cl_io_slice *slice);
 struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice);
 struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice);
 struct ccc_io *cl2ccc_io(const struct lu_env *env,
                         const struct cl_io_slice *slice);
 struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice);
-struct page *cl2vm_page(const struct cl_page_slice *slice);
 
 int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
                   struct obd_capa *capa);
 
 
 int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
                   struct obd_capa *capa);
 
-struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
 void cl_inode_fini(struct inode *inode);
 int cl_local_size(struct inode *inode);
 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
 void cl_inode_fini(struct inode *inode);
 int cl_local_size(struct inode *inode);
index bc77a92..88ccaa5 100644 (file)
@@ -650,15 +650,15 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
                                struct cl_page *page)
 {
 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
                                struct cl_page *page)
 {
-       struct ccc_page *cp;
+       struct vvp_page *vpg;
        struct page *vmpage = page->cp_vmpage;
        struct cl_object *clob = cl_io_top(io)->ci_obj;
 
        SetPageUptodate(vmpage);
        set_page_dirty(vmpage);
 
        struct page *vmpage = page->cp_vmpage;
        struct cl_object *clob = cl_io_top(io)->ci_obj;
 
        SetPageUptodate(vmpage);
        set_page_dirty(vmpage);
 
-       cp = cl2ccc_page(cl_object_page_slice(clob, page));
-       vvp_write_pending(cl2vvp(clob), cp);
+       vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+       vvp_write_pending(cl2vvp(clob), vpg);
 
        cl_page_disown(env, io, page);
 
 
        cl_page_disown(env, io, page);
 
@@ -675,15 +675,15 @@ static bool page_list_sanity_check(struct cl_object *obj,
        pgoff_t index = CL_PAGE_EOF;
 
        cl_page_list_for_each(page, plist) {
        pgoff_t index = CL_PAGE_EOF;
 
        cl_page_list_for_each(page, plist) {
-               struct ccc_page *cp = cl_object_page_slice(obj, page);
+               struct vvp_page *vpg = cl_object_page_slice(obj, page);
 
                if (index == CL_PAGE_EOF) {
 
                if (index == CL_PAGE_EOF) {
-                       index = ccc_index(cp);
+                       index = vvp_index(vpg);
                        continue;
                }
 
                ++index;
                        continue;
                }
 
                ++index;
-               if (index == ccc_index(cp))
+               if (index == vvp_index(vpg))
                        continue;
 
                return false;
                        continue;
 
                return false;
@@ -893,13 +893,13 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
                                    struct cl_page *page)
 {
 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
                                    struct cl_page *page)
 {
-       struct ccc_page *cp;
+       struct vvp_page *vpg;
        struct cl_object *clob = cl_io_top(io)->ci_obj;
 
        set_page_dirty(page->cp_vmpage);
 
        struct cl_object *clob = cl_io_top(io)->ci_obj;
 
        set_page_dirty(page->cp_vmpage);
 
-       cp = cl2ccc_page(cl_object_page_slice(clob, page));
-       vvp_write_pending(cl2vvp(clob), cp);
+       vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+       vvp_write_pending(cl2vvp(clob), vpg);
 }
 
 static int vvp_io_fault_start(const struct lu_env *env,
 }
 
 static int vvp_io_fault_start(const struct lu_env *env,
@@ -999,7 +999,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
                wait_on_page_writeback(vmpage);
                if (!PageDirty(vmpage)) {
                        struct cl_page_list *plist = &io->ci_queue.c2_qin;
                wait_on_page_writeback(vmpage);
                if (!PageDirty(vmpage)) {
                        struct cl_page_list *plist = &io->ci_queue.c2_qin;
-                       struct ccc_page *cp = cl_object_page_slice(obj, page);
+                       struct vvp_page *vpg = cl_object_page_slice(obj, page);
                        int to = PAGE_SIZE;
 
                        /* vvp_page_assume() calls wait_on_page_writeback(). */
                        int to = PAGE_SIZE;
 
                        /* vvp_page_assume() calls wait_on_page_writeback(). */
@@ -1009,7 +1009,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
                        cl_page_list_add(plist, page);
 
                        /* size fixup */
                        cl_page_list_add(plist, page);
 
                        /* size fixup */
-                       if (last_index == ccc_index(cp))
+                       if (last_index == vvp_index(vpg))
                                to = size & ~CFS_PAGE_MASK;
 
                        /* Do not set Dirty bit here so that in case IO is
                                to = size & ~CFS_PAGE_MASK;
 
                        /* Do not set Dirty bit here so that in case IO is
@@ -1077,7 +1077,7 @@ static int vvp_io_read_page(const struct lu_env *env,
                             const struct cl_page_slice *slice)
 {
        struct cl_io              *io     = ios->cis_io;
                             const struct cl_page_slice *slice)
 {
        struct cl_io              *io     = ios->cis_io;
-       struct ccc_page           *cp     = cl2ccc_page(slice);
+       struct vvp_page           *vpg    = cl2vvp_page(slice);
        struct cl_page            *page   = slice->cpl_page;
        struct inode              *inode  = vvp_object_inode(slice->cpl_obj);
        struct ll_sb_info         *sbi    = ll_i2sbi(inode);
        struct cl_page            *page   = slice->cpl_page;
        struct inode              *inode  = vvp_object_inode(slice->cpl_obj);
        struct ll_sb_info         *sbi    = ll_i2sbi(inode);
@@ -1089,23 +1089,23 @@ static int vvp_io_read_page(const struct lu_env *env,
 
        if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
            sbi->ll_ra_info.ra_max_pages > 0)
 
        if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
            sbi->ll_ra_info.ra_max_pages > 0)
-               ras_update(sbi, inode, ras, ccc_index(cp),
-                          cp->cpg_defer_uptodate);
+               ras_update(sbi, inode, ras, vvp_index(vpg),
+                          vpg->vpg_defer_uptodate);
 
 
-        if (cp->cpg_defer_uptodate) {
-                cp->cpg_ra_used = 1;
-                cl_page_export(env, page, 1);
-        }
+       if (vpg->vpg_defer_uptodate) {
+               vpg->vpg_ra_used = 1;
+               cl_page_export(env, page, 1);
+       }
 
 
-        /*
-         * Add page into the queue even when it is marked uptodate above.
-         * this will unlock it automatically as part of cl_page_list_disown().
-         */
-        cl_2queue_add(queue, page);
+       /*
+        * Add page into the queue even when it is marked uptodate above.
+        * this will unlock it automatically as part of cl_page_list_disown().
+        */
+       cl_2queue_add(queue, page);
        if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
            sbi->ll_ra_info.ra_max_pages > 0)
                ll_readahead(env, io, &queue->c2_qin, ras,
        if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
            sbi->ll_ra_info.ra_max_pages > 0)
                ll_readahead(env, io, &queue->c2_qin, ras,
-                            cp->cpg_defer_uptodate);
+                            vpg->vpg_defer_uptodate);
 
        RETURN(0);
 }
 
        RETURN(0);
 }
index f1a97f7..55f4e26 100644 (file)
@@ -209,7 +209,7 @@ static int vvp_object_init0(const struct lu_env *env,
 {
        vob->vob_inode = conf->coc_inode;
        atomic_set(&vob->vob_transient_pages, 0);
 {
        vob->vob_inode = conf->coc_inode;
        atomic_set(&vob->vob_transient_pages, 0);
-       cl_object_page_init(&vob->vob_cl, sizeof(struct ccc_page));
+       cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
        return 0;
 }
 
        return 0;
 }
 
index dccfb12..d8ebd14 100644 (file)
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-
-#include <obd.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+
+#include <libcfs/libcfs.h>
 #include "llite_internal.h"
 #include "vvp_internal.h"
 
 #include "llite_internal.h"
 #include "vvp_internal.h"
 
@@ -52,9 +58,9 @@
  *
  */
 
  *
  */
 
-static void vvp_page_fini_common(struct ccc_page *cp)
+static void vvp_page_fini_common(struct vvp_page *vpg)
 {
 {
-       struct page *vmpage = cp->cpg_page;
+       struct page *vmpage = vpg->vpg_page;
 
        LASSERT(vmpage != NULL);
        page_cache_release(vmpage);
 
        LASSERT(vmpage != NULL);
        page_cache_release(vmpage);
@@ -63,40 +69,41 @@ static void vvp_page_fini_common(struct ccc_page *cp)
 static void vvp_page_fini(const struct lu_env *env,
                          struct cl_page_slice *slice)
 {
 static void vvp_page_fini(const struct lu_env *env,
                          struct cl_page_slice *slice)
 {
-       struct ccc_page *cp = cl2ccc_page(slice);
-       struct page *vmpage  = cp->cpg_page;
+       struct vvp_page *vpg     = cl2vvp_page(slice);
+       struct page     *vmpage  = vpg->vpg_page;
 
        /*
         * vmpage->private was already cleared when page was moved into
         * VPG_FREEING state.
         */
        LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
 
        /*
         * vmpage->private was already cleared when page was moved into
         * VPG_FREEING state.
         */
        LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
-       vvp_page_fini_common(cp);
+       vvp_page_fini_common(vpg);
 }
 
 static int vvp_page_own(const struct lu_env *env,
 }
 
 static int vvp_page_own(const struct lu_env *env,
-                        const struct cl_page_slice *slice, struct cl_io *io,
-                        int nonblock)
+                       const struct cl_page_slice *slice, struct cl_io *io,
+                       int nonblock)
 {
 {
-       struct ccc_page *vpg    = cl2ccc_page(slice);
-       struct page      *vmpage = vpg->cpg_page;
+       struct vvp_page *vpg    = cl2vvp_page(slice);
+       struct page     *vmpage = vpg->vpg_page;
 
        LASSERT(vmpage != NULL);
        if (nonblock) {
                if (!trylock_page(vmpage))
                        return -EAGAIN;
 
 
        LASSERT(vmpage != NULL);
        if (nonblock) {
                if (!trylock_page(vmpage))
                        return -EAGAIN;
 
-                if (unlikely(PageWriteback(vmpage))) {
-                        unlock_page(vmpage);
-                        return -EAGAIN;
-                }
+               if (unlikely(PageWriteback(vmpage))) {
+                       unlock_page(vmpage);
+                       return -EAGAIN;
+               }
 
 
-                return 0;
-        }
+               return 0;
+       }
 
 
-        lock_page(vmpage);
-        wait_on_page_writeback(vmpage);
-        return 0;
+       lock_page(vmpage);
+       wait_on_page_writeback(vmpage);
+
+       return 0;
 }
 
 static void vvp_page_assume(const struct lu_env *env,
 }
 
 static void vvp_page_assume(const struct lu_env *env,
@@ -132,23 +139,23 @@ static void vvp_page_disown(const struct lu_env *env,
 }
 
 static void vvp_page_discard(const struct lu_env *env,
 }
 
 static void vvp_page_discard(const struct lu_env *env,
-                             const struct cl_page_slice *slice,
-                             struct cl_io *unused)
+                            const struct cl_page_slice *slice,
+                            struct cl_io *unused)
 {
        struct page     *vmpage = cl2vm_page(slice);
 {
        struct page     *vmpage = cl2vm_page(slice);
-       struct ccc_page *cpg    = cl2ccc_page(slice);
+       struct vvp_page *vpg    = cl2vvp_page(slice);
 
        LASSERT(vmpage != NULL);
        LASSERT(PageLocked(vmpage));
 
 
        LASSERT(vmpage != NULL);
        LASSERT(PageLocked(vmpage));
 
-       if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
+       if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
                ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
 
        ll_invalidate_page(vmpage);
 }
 
 static void vvp_page_delete(const struct lu_env *env,
                ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
 
        ll_invalidate_page(vmpage);
 }
 
 static void vvp_page_delete(const struct lu_env *env,
-                            const struct cl_page_slice *slice)
+                           const struct cl_page_slice *slice)
 {
        struct page      *vmpage = cl2vm_page(slice);
        struct inode     *inode  = vmpage->mapping->host;
 {
        struct page      *vmpage = cl2vm_page(slice);
        struct inode     *inode  = vmpage->mapping->host;
@@ -160,7 +167,7 @@ static void vvp_page_delete(const struct lu_env *env,
        LASSERT((struct cl_page *)vmpage->private == page);
        LASSERT(inode == vvp_object_inode(obj));
 
        LASSERT((struct cl_page *)vmpage->private == page);
        LASSERT(inode == vvp_object_inode(obj));
 
-       vvp_write_complete(cl2vvp(obj), cl2ccc_page(slice));
+       vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
 
        /* Drop the reference count held in vvp_page_init */
        refc = atomic_dec_return(&page->cp_ref);
 
        /* Drop the reference count held in vvp_page_init */
        refc = atomic_dec_return(&page->cp_ref);
@@ -214,7 +221,7 @@ static int vvp_page_prep_write(const struct lu_env *env,
        LASSERT(!PageDirty(vmpage));
 
        set_page_writeback(vmpage);
        LASSERT(!PageDirty(vmpage));
 
        set_page_writeback(vmpage);
-       vvp_write_pending(cl2vvp(slice->cpl_obj), cl2ccc_page(slice));
+       vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
 
        return 0;
 }
 
        return 0;
 }
@@ -248,69 +255,70 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
 }
 
 static void vvp_page_completion_read(const struct lu_env *env,
 }
 
 static void vvp_page_completion_read(const struct lu_env *env,
-                                     const struct cl_page_slice *slice,
-                                     int ioret)
+                                    const struct cl_page_slice *slice,
+                                    int ioret)
 {
 {
-        struct ccc_page *cp     = cl2ccc_page(slice);
-       struct page     *vmpage = cp->cpg_page;
+       struct vvp_page *vpg    = cl2vvp_page(slice);
+       struct page     *vmpage = vpg->vpg_page;
        struct cl_page  *page   = slice->cpl_page;
        struct inode    *inode  = vvp_object_inode(page->cp_obj);
        struct cl_page  *page   = slice->cpl_page;
        struct inode    *inode  = vvp_object_inode(page->cp_obj);
-        ENTRY;
+       ENTRY;
 
 
-        LASSERT(PageLocked(vmpage));
-        CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
+       LASSERT(PageLocked(vmpage));
+       CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
 
 
-        if (cp->cpg_defer_uptodate)
-                ll_ra_count_put(ll_i2sbi(inode), 1);
+       if (vpg->vpg_defer_uptodate)
+               ll_ra_count_put(ll_i2sbi(inode), 1);
 
 
-        if (ioret == 0)  {
-                if (!cp->cpg_defer_uptodate)
-                        cl_page_export(env, page, 1);
-        } else
-                cp->cpg_defer_uptodate = 0;
+       if (ioret == 0)  {
+               if (!vpg->vpg_defer_uptodate)
+                       cl_page_export(env, page, 1);
+       } else {
+               vpg->vpg_defer_uptodate = 0;
+       }
 
 
-        if (page->cp_sync_io == NULL)
-                unlock_page(vmpage);
+       if (page->cp_sync_io == NULL)
+               unlock_page(vmpage);
 
 
-        EXIT;
+       EXIT;
 }
 
 static void vvp_page_completion_write(const struct lu_env *env,
 }
 
 static void vvp_page_completion_write(const struct lu_env *env,
-                                      const struct cl_page_slice *slice,
-                                      int ioret)
+                                     const struct cl_page_slice *slice,
+                                     int ioret)
 {
 {
-       struct ccc_page *cp     = cl2ccc_page(slice);
+       struct vvp_page *vpg    = cl2vvp_page(slice);
        struct cl_page  *pg     = slice->cpl_page;
        struct cl_page  *pg     = slice->cpl_page;
-       struct page      *vmpage = cp->cpg_page;
+       struct page     *vmpage = vpg->vpg_page;
        ENTRY;
 
        ENTRY;
 
-        LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
-        LASSERT(PageWriteback(vmpage));
+       LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
+       LASSERT(PageWriteback(vmpage));
 
 
-        CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
+       CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
 
 
-        /*
-         * TODO: Actually it makes sense to add the page into oap pending
-         * list again and so that we don't need to take the page out from
-         * SoM write pending list, if we just meet a recoverable error,
-         * -ENOMEM, etc.
-         * To implement this, we just need to return a non zero value in
-         * ->cpo_completion method. The underlying transfer should be notified
-         * and then re-add the page into pending transfer queue.  -jay
-         */
+       /*
+        * TODO: Actually it makes sense to add the page into oap pending
+        * list again and so that we don't need to take the page out from
+        * SoM write pending list, if we just meet a recoverable error,
+        * -ENOMEM, etc.
+        * To implement this, we just need to return a non zero value in
+        * ->cpo_completion method. The underlying transfer should be notified
+        * and then re-add the page into pending transfer queue.  -jay
+        */
 
 
-        cp->cpg_write_queued = 0;
-       vvp_write_complete(cl2vvp(slice->cpl_obj), cp);
+       vpg->vpg_write_queued = 0;
+       vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
 
 
-        /*
-         * Only mark the page error only when it's an async write because
-         * applications won't wait for IO to finish.
-         */
-        if (pg->cp_sync_io == NULL)
+       /*
+        * Only mark the page error only when it's an async write because
+        * applications won't wait for IO to finish.
+        */
+       if (pg->cp_sync_io == NULL)
                vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
 
                vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
 
-        end_page_writeback(vmpage);
-        EXIT;
+       end_page_writeback(vmpage);
+       EXIT;
 }
 
 /**
 }
 
 /**
@@ -341,7 +349,7 @@ static int vvp_page_make_ready(const struct lu_env *env,
                 * tree. */
                set_page_writeback(vmpage);
                vvp_write_pending(cl2vvp(slice->cpl_obj),
                 * tree. */
                set_page_writeback(vmpage);
                vvp_write_pending(cl2vvp(slice->cpl_obj),
-                               cl2ccc_page(slice));
+                               cl2vvp_page(slice));
                CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
        } else if (pg->cp_state == CPS_PAGEOUT) {
                /* is it possible for osc_flush_async_page() to already
                CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
        } else if (pg->cp_state == CPS_PAGEOUT) {
                /* is it possible for osc_flush_async_page() to already
@@ -374,53 +382,76 @@ static int vvp_page_is_under_lock(const struct lu_env *env,
 
 
 static int vvp_page_print(const struct lu_env *env,
 
 
 static int vvp_page_print(const struct lu_env *env,
-                          const struct cl_page_slice *slice,
-                          void *cookie, lu_printer_t printer)
+                         const struct cl_page_slice *slice,
+                         void *cookie, lu_printer_t printer)
 {
 {
-       struct ccc_page *vp = cl2ccc_page(slice);
-       struct page      *vmpage = vp->cpg_page;
-
-        (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
-                   "vm@%p ",
-                   vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
-                   vp->cpg_write_queued, vmpage);
-        if (vmpage != NULL) {
-                (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
-                           (long)vmpage->flags, page_count(vmpage),
-                           page_mapcount(vmpage), vmpage->private,
-                           page_index(vmpage),
-                           list_empty(&vmpage->lru) ? "not-" : "");
-        }
-        (*printer)(env, cookie, "\n");
-        return 0;
+       struct vvp_page *vpg    = cl2vvp_page(slice);
+       struct page     *vmpage = vpg->vpg_page;
+
+       (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
+                  "vm@%p ",
+                  vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
+                  vpg->vpg_write_queued, vmpage);
+
+       if (vmpage != NULL) {
+               (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
+                          (long)vmpage->flags, page_count(vmpage),
+                          page_mapcount(vmpage), vmpage->private,
+                          page_index(vmpage),
+                          list_empty(&vmpage->lru) ? "not-" : "");
+       }
+
+       (*printer)(env, cookie, "\n");
+
+       return 0;
+}
+
+static int vvp_page_fail(const struct lu_env *env,
+                        const struct cl_page_slice *slice)
+{
+       /*
+        * Cached read?
+        */
+       LBUG();
+
+       return 0;
 }
 
 static const struct cl_page_operations vvp_page_ops = {
 }
 
 static const struct cl_page_operations vvp_page_ops = {
-        .cpo_own           = vvp_page_own,
-        .cpo_assume        = vvp_page_assume,
-        .cpo_unassume      = vvp_page_unassume,
-        .cpo_disown        = vvp_page_disown,
-        .cpo_discard       = vvp_page_discard,
-        .cpo_delete        = vvp_page_delete,
-        .cpo_export        = vvp_page_export,
-        .cpo_is_vmlocked   = vvp_page_is_vmlocked,
-        .cpo_fini          = vvp_page_fini,
-        .cpo_print         = vvp_page_print,
-        .cpo_is_under_lock = vvp_page_is_under_lock,
-        .io = {
-                [CRT_READ] = {
-                        .cpo_prep        = vvp_page_prep_read,
-                        .cpo_completion  = vvp_page_completion_read,
-                        .cpo_make_ready  = ccc_fail,
-                },
-                [CRT_WRITE] = {
-                        .cpo_prep        = vvp_page_prep_write,
-                        .cpo_completion  = vvp_page_completion_write,
-                        .cpo_make_ready  = vvp_page_make_ready,
-                }
-        }
+       .cpo_own           = vvp_page_own,
+       .cpo_assume        = vvp_page_assume,
+       .cpo_unassume      = vvp_page_unassume,
+       .cpo_disown        = vvp_page_disown,
+       .cpo_discard       = vvp_page_discard,
+       .cpo_delete        = vvp_page_delete,
+       .cpo_export        = vvp_page_export,
+       .cpo_is_vmlocked   = vvp_page_is_vmlocked,
+       .cpo_fini          = vvp_page_fini,
+       .cpo_print         = vvp_page_print,
+       .cpo_is_under_lock = vvp_page_is_under_lock,
+       .io = {
+               [CRT_READ] = {
+                       .cpo_prep       = vvp_page_prep_read,
+                       .cpo_completion = vvp_page_completion_read,
+                       .cpo_make_ready = vvp_page_fail,
+               },
+               [CRT_WRITE] = {
+                       .cpo_prep       = vvp_page_prep_write,
+                       .cpo_completion = vvp_page_completion_write,
+                       .cpo_make_ready = vvp_page_make_ready,
+               },
+       },
 };
 
 };
 
+static int vvp_transient_page_prep(const struct lu_env *env,
+                                  const struct cl_page_slice *slice,
+                                  struct cl_io *unused)
+{
+       ENTRY;
+       /* transient page should always be sent. */
+       RETURN(0);
+}
+
 static void vvp_transient_page_verify(const struct cl_page *page)
 {
 }
 static void vvp_transient_page_verify(const struct cl_page *page)
 {
 }
@@ -491,11 +522,11 @@ vvp_transient_page_completion(const struct lu_env *env,
 static void vvp_transient_page_fini(const struct lu_env *env,
                                    struct cl_page_slice *slice)
 {
 static void vvp_transient_page_fini(const struct lu_env *env,
                                    struct cl_page_slice *slice)
 {
-       struct ccc_page *cp = cl2ccc_page(slice);
+       struct vvp_page *vpg = cl2vvp_page(slice);
        struct cl_page *clp = slice->cpl_page;
        struct vvp_object *clobj = cl2vvp(clp->cp_obj);
 
        struct cl_page *clp = slice->cpl_page;
        struct vvp_object *clobj = cl2vvp(clp->cp_obj);
 
-       vvp_page_fini_common(cp);
+       vvp_page_fini_common(vpg);
        atomic_dec(&clobj->vob_transient_pages);
 }
 
        atomic_dec(&clobj->vob_transient_pages);
 }
 
@@ -511,11 +542,11 @@ static const struct cl_page_operations vvp_transient_page_ops = {
        .cpo_is_under_lock      = vvp_page_is_under_lock,
        .io = {
                [CRT_READ] = {
        .cpo_is_under_lock      = vvp_page_is_under_lock,
        .io = {
                [CRT_READ] = {
-                       .cpo_prep       = ccc_transient_page_prep,
+                       .cpo_prep       = vvp_transient_page_prep,
                        .cpo_completion = vvp_transient_page_completion,
                },
                [CRT_WRITE] = {
                        .cpo_completion = vvp_transient_page_completion,
                },
                [CRT_WRITE] = {
-                       .cpo_prep       = ccc_transient_page_prep,
+                       .cpo_prep       = vvp_transient_page_prep,
                        .cpo_completion = vvp_transient_page_completion,
                }
        }
                        .cpo_completion = vvp_transient_page_completion,
                }
        }
@@ -524,29 +555,28 @@ static const struct cl_page_operations vvp_transient_page_ops = {
 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
                struct cl_page *page, pgoff_t index)
 {
 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
                struct cl_page *page, pgoff_t index)
 {
-       struct ccc_page *cpg = cl_object_page_slice(obj, page);
+       struct vvp_page *vpg = cl_object_page_slice(obj, page);
        struct page     *vmpage = page->cp_vmpage;
 
        CLOBINVRNT(env, obj, vvp_object_invariant(obj));
 
        struct page     *vmpage = page->cp_vmpage;
 
        CLOBINVRNT(env, obj, vvp_object_invariant(obj));
 
-       cpg->cpg_page = vmpage;
+       vpg->vpg_page = vmpage;
        page_cache_get(vmpage);
 
        page_cache_get(vmpage);
 
-       INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+       INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
        if (page->cp_type == CPT_CACHEABLE) {
                /* in cache, decref in vvp_page_delete */
                atomic_inc(&page->cp_ref);
                SetPagePrivate(vmpage);
                vmpage->private = (unsigned long)page;
        if (page->cp_type == CPT_CACHEABLE) {
                /* in cache, decref in vvp_page_delete */
                atomic_inc(&page->cp_ref);
                SetPagePrivate(vmpage);
                vmpage->private = (unsigned long)page;
-               cl_page_slice_add(page, &cpg->cpg_cl, obj, index,
+               cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
                                &vvp_page_ops);
        } else {
                struct vvp_object *clobj = cl2vvp(obj);
 
                                &vvp_page_ops);
        } else {
                struct vvp_object *clobj = cl2vvp(obj);
 
-               cl_page_slice_add(page, &cpg->cpg_cl, obj, index,
+               cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
                                &vvp_transient_page_ops);
                atomic_inc(&clobj->vob_transient_pages);
        }
        return 0;
 }
                                &vvp_transient_page_ops);
                atomic_inc(&clobj->vob_transient_pages);
        }
        return 0;
 }
-