Whamcloud - gitweb
LU-3321 clio: remove stackable cl_page completely 95/7895/17
authorJinshan Xiong <jinshan.xiong@intel.com>
Wed, 6 Nov 2013 05:51:39 +0000 (21:51 -0800)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 11 Dec 2013 07:54:00 +0000 (07:54 +0000)
From now on, cl_page becomes one to one mapping of vmpage.

Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Change-Id: I13156d9f10ee43c614d1e43eed6743db1574069a
Reviewed-on: http://review.whamcloud.com/7895
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Bobi Jam <bobijam@gmail.com>
Reviewed-by: Lai Siyao <lai.siyao@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
25 files changed:
libcfs/include/libcfs/posix/posix-types.h
lustre/include/cl_object.h
lustre/include/lclient.h
lustre/lclient/lcommon_cl.c
lustre/liblustre/llite_cl.c
lustre/llite/llite_internal.h
lustre/llite/rw.c
lustre/llite/rw26.c
lustre/llite/vvp_dev.c
lustre/llite/vvp_internal.h
lustre/llite/vvp_io.c
lustre/llite/vvp_page.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_io.c
lustre/lov/lov_object.c
lustre/lov/lov_page.c
lustre/lov/lovsub_page.c
lustre/obdclass/cl_io.c
lustre/obdclass/cl_object.c
lustre/obdclass/cl_page.c
lustre/obdecho/echo_client.c
lustre/osc/osc_cache.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_io.c
lustre/osc/osc_page.c

index d4a378d..446db6e 100644 (file)
@@ -115,4 +115,6 @@ typedef long long_ptr_t;
 #define __release(x) ((void)0)
 #define __cond_lock(x, c) (c)
 
+typedef unsigned long pgoff_t;
+
 #endif
index ea0597a..0aedd62 100644 (file)
@@ -321,7 +321,7 @@ struct cl_object_operations {
          *         to be used instead of newly created.
          */
        int  (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
-                               struct cl_page *page, struct page *vmpage);
+                               struct cl_page *page, pgoff_t index);
         /**
          * Initialize lock slice for this layer. Called top-to-bottom through
          * every object layer when a new cl_lock is instantiated. Layer
@@ -458,10 +458,6 @@ struct cl_object_header {
                                         co_lu.lo_linkage)
 /** @} cl_object */
 
-#ifndef pgoff_t
-#define pgoff_t unsigned long
-#endif
-
 #define CL_PAGE_EOF ((pgoff_t)~0ull)
 
 /** \addtogroup cl_page cl_page
@@ -722,20 +718,14 @@ struct cl_page {
         cfs_atomic_t             cp_ref;
         /** An object this page is a part of. Immutable after creation. */
         struct cl_object        *cp_obj;
-        /** Logical page index within the object. Immutable after creation. */
-        pgoff_t                  cp_index;
         /** List of slices. Immutable after creation. */
         cfs_list_t               cp_layers;
-        /** Parent page, NULL for top-level page. Immutable after creation. */
-        struct cl_page          *cp_parent;
-        /** Lower-layer page. NULL for bottommost page. Immutable after
-         * creation. */
-        struct cl_page          *cp_child;
-        /**
-         * Page state. This field is const to avoid accidental update, it is
-         * modified only internally within cl_page.c. Protected by a VM lock.
-         */
-        const enum cl_page_state cp_state;
+       struct page             *cp_vmpage;
+       /**
+        * Page state. This field is const to avoid accidental update, it is
+        * modified only internally within cl_page.c. Protected by a VM lock.
+        */
+       const enum cl_page_state cp_state;
        /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
        cfs_list_t              cp_batch;
        /** Mutex serializing membership of a page in a batch. */
@@ -785,6 +775,7 @@ struct cl_page {
  */
 struct cl_page_slice {
         struct cl_page                  *cpl_page;
+       pgoff_t                          cpl_index;
         /**
          * Object slice corresponding to this page slice. Immutable after
          * creation.
@@ -840,11 +831,6 @@ struct cl_page_operations {
          */
 
         /**
-         * \return the underlying VM page. Optional.
-         */
-       struct page *(*cpo_vmpage)(const struct lu_env *env,
-                                  const struct cl_page_slice *slice);
-        /**
          * Called when \a io acquires this page into the exclusive
          * ownership. When this method returns, it is guaranteed that the is
          * not owned by other io, and no transfer is going on against
@@ -1093,6 +1079,12 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
 #define cl_page_in_use(pg)       __page_in_use(pg, 1)
 #define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
 
+static inline struct page *cl_page_vmpage(struct cl_page *page)
+{
+       LASSERT(page->cp_vmpage != NULL);
+       return page->cp_vmpage;
+}
+
 /** @} cl_page */
 
 /** \addtogroup cl_lock cl_lock
@@ -2722,7 +2714,7 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
 static inline void cl_object_page_init(struct cl_object *clob, int size)
 {
        clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
-       cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+       cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
 }
 
 static inline void *cl_object_page_slice(struct cl_object *clob,
@@ -2769,8 +2761,6 @@ void            cl_page_print       (const struct lu_env *env, void *cookie,
 void            cl_page_header_print(const struct lu_env *env, void *cookie,
                                      lu_printer_t printer,
                                      const struct cl_page *pg);
-struct page     *cl_page_vmpage      (const struct lu_env *env,
-                                     struct cl_page *page);
 struct cl_page *cl_vmpage_page      (struct page *vmpage, struct cl_object *obj);
 struct cl_page *cl_page_top         (struct cl_page *page);
 
@@ -2866,17 +2856,6 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
                                 struct cl_object *obj, pgoff_t index,
                                 struct cl_lock *except, int pending,
                                 int canceld);
-static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
-                                             struct cl_object *obj,
-                                             struct cl_page *page,
-                                             struct cl_lock *except,
-                                             int pending, int canceld)
-{
-       LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
-       return cl_lock_at_pgoff(env, obj, page->cp_index, except,
-                               pending, canceld);
-}
-
 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
                                        const struct lu_device_type *dtype);
 
index fe59be2..b21778a 100644 (file)
@@ -246,6 +246,11 @@ static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
         return container_of(slice, struct ccc_page, cpg_cl);
 }
 
+static inline pgoff_t ccc_index(struct ccc_page *ccc)
+{
+       return ccc->cpg_cl.cpl_index;
+}
+
 struct cl_page    *ccc_vmpage_page_transient(struct page *vmpage);
 
 struct ccc_device {
@@ -308,8 +313,6 @@ int ccc_object_glimpse(const struct lu_env *env,
                        const struct cl_object *obj, struct ost_lvb *lvb);
 int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
                  const struct cl_object_conf *conf);
-struct page *ccc_page_vmpage(const struct lu_env *env,
-                            const struct cl_page_slice *slice);
 int ccc_page_is_under_lock(const struct lu_env *env,
                            const struct cl_page_slice *slice, struct cl_io *io);
 int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
index 1eb6b95..979d3de 100644 (file)
@@ -352,6 +352,8 @@ struct lu_object *ccc_object_alloc(const struct lu_env *env,
                 obj = ccc2lu(vob);
                 hdr = &vob->cob_header;
                 cl_object_header_init(hdr);
+               hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
+
                 lu_object_init(obj, &hdr->coh_lu, dev);
                 lu_object_add_top(&hdr->coh_lu, obj);
 
@@ -479,12 +481,6 @@ static void ccc_object_size_unlock(struct cl_object *obj)
  *
  */
 
-struct page *ccc_page_vmpage(const struct lu_env *env,
-                            const struct cl_page_slice *slice)
-{
-        return cl2vm_page(slice);
-}
-
 int ccc_page_is_under_lock(const struct lu_env *env,
                            const struct cl_page_slice *slice,
                            struct cl_io *io)
@@ -502,8 +498,8 @@ int ccc_page_is_under_lock(const struct lu_env *env,
                 if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
                         result = -EBUSY;
                 else {
-                        desc->cld_start = page->cp_index;
-                        desc->cld_end   = page->cp_index;
+                       desc->cld_start = ccc_index(cl2ccc_page(slice));
+                       desc->cld_end   = ccc_index(cl2ccc_page(slice));
                         desc->cld_obj   = page->cp_obj;
                         desc->cld_mode  = CLM_READ;
                         result = cl_queue_match(&io->ci_lockset.cls_done,
index 5c7ce91..722113d 100644 (file)
@@ -58,7 +58,7 @@ static int   slp_type_init     (struct lu_device_type *t);
 static void  slp_type_fini     (struct lu_device_type *t);
 
 static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
-                        struct cl_page *page, struct page *vmpage);
+                        struct cl_page *page, pgoff_t index);
 static int   slp_attr_get     (const struct lu_env *env, struct cl_object *obj,
                                struct cl_attr *attr);
 
@@ -224,13 +224,13 @@ void slp_global_fini(void)
  */
 
 static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
-                       struct cl_page *page, struct page *vmpage)
+                       struct cl_page *page, pgoff_t index)
 {
-        struct ccc_page *cpg = cl_object_page_slice(obj, page);
+       struct ccc_page *cpg = cl_object_page_slice(obj, page);
 
-        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+       CLOBINVRNT(env, obj, ccc_object_invariant(obj));
 
-       cpg->cpg_page = vmpage;
+       cpg->cpg_page = page->cp_vmpage;
 
        if (page->cp_type == CPT_CACHEABLE) {
                LBUG();
@@ -242,7 +242,7 @@ static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
                clobj->cob_transient_pages++;
        }
 
-        return 0;
+       return 0;
 }
 
 static int slp_io_init(const struct lu_env *env, struct cl_object *obj,
@@ -359,7 +359,6 @@ static const struct cl_page_operations slp_transient_page_ops = {
         .cpo_unassume      = ccc_transient_page_unassume,
         .cpo_disown        = ccc_transient_page_disown,
         .cpo_discard       = ccc_transient_page_discard,
-        .cpo_vmpage        = ccc_page_vmpage,
         .cpo_is_vmlocked   = slp_page_is_vmlocked,
         .cpo_fini          = slp_transient_page_fini,
         .cpo_is_under_lock = ccc_page_is_under_lock,
index d9087e1..92c1de3 100644 (file)
@@ -1115,15 +1115,19 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
 
 static inline void ll_invalidate_page(struct page *vmpage)
 {
-        struct address_space *mapping = vmpage->mapping;
-        loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
+       struct address_space *mapping = vmpage->mapping;
+       loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
 
-        LASSERT(PageLocked(vmpage));
-        if (mapping == NULL)
-                return;
+       LASSERT(PageLocked(vmpage));
+       if (mapping == NULL)
+               return;
 
+       /*
+        * truncate_complete_page() calls
+        * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
+        */
        ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
-        truncate_complete_page(mapping, vmpage);
+       truncate_complete_page(mapping, vmpage);
 }
 
 #define    ll_s2sbi(sb)        (s2lsi(sb)->lsi_llsbi)
index bff9798..be4c55c 100644 (file)
@@ -325,8 +325,9 @@ struct ll_ra_read *ll_ra_read_get(struct file *f)
 
 static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
                              struct cl_page_list *queue, struct cl_page *page,
-                             struct page *vmpage)
+                             struct cl_object *clob)
 {
+       struct page *vmpage = page->cp_vmpage;
        struct ccc_page *cp;
        int              rc;
 
@@ -335,7 +336,7 @@ static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
        rc = 0;
        cl_page_assume(env, io, page);
        lu_ref_add(&page->cp_reference, "ra", current);
-       cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
+       cp = cl2ccc_page(cl_object_page_slice(clob, page));
        if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
                rc = cl_page_is_under_lock(env, io, page);
                if (rc == -EBUSY) {
@@ -392,7 +393,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
                                             vmpage, CPT_CACHEABLE);
                         if (!IS_ERR(page)) {
                                 rc = cl_read_ahead_page(env, io, queue,
-                                                        page, vmpage);
+                                                       page, clob);
                                 if (rc == -ENOLCK) {
                                         which = RA_STAT_FAILED_MATCH;
                                         msg   = "lock match failed";
index e68d1c4..9365c74 100644 (file)
@@ -176,28 +176,6 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
        return result;
 }
 
-static int ll_set_page_dirty(struct page *vmpage)
-{
-#if 0
-        struct cl_page    *page = vvp_vmpage_page_transient(vmpage);
-        struct vvp_object *obj  = cl_inode2vvp(vmpage->mapping->host);
-        struct vvp_page   *cpg;
-
-        /*
-         * XXX should page method be called here?
-         */
-        LASSERT(&obj->co_cl == page->cp_obj);
-        cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
-        /*
-         * XXX cannot do much here, because page is possibly not locked:
-         * sys_munmap()->...
-         *     ->unmap_page_range()->zap_pte_range()->set_page_dirty().
-         */
-        vvp_write_pending(obj, cpg);
-#endif
-        RETURN(__set_page_dirty_nobuffers(vmpage));
-}
-
 #define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL
 
 static inline int ll_get_user_pages(int rw, unsigned long user_addr,
@@ -291,7 +269,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
                 /* check the page type: if the page is a host page, then do
                  * write directly */
                 if (clp->cp_type == CPT_CACHEABLE) {
-                       struct page *vmpage = cl_page_vmpage(env, clp);
+                       struct page *vmpage = cl_page_vmpage(clp);
                        struct page *src_page;
                        struct page *dst_page;
                         void       *src;
@@ -512,19 +490,16 @@ out:
 static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
                                   struct cl_page *pg)
 {
-       struct cl_object *obj  = io->ci_obj;
        struct cl_attr *attr   = ccc_env_thread_attr(env);
-       loff_t          offset = cl_offset(obj, pg->cp_index);
+       struct cl_object *obj  = io->ci_obj;
+       struct ccc_page *cp    = cl_object_page_slice(obj, pg);
+       loff_t          offset = cl_offset(obj, ccc_index(cp));
        int             result;
 
        cl_object_attr_lock(obj);
        result = cl_object_attr_get(env, obj, attr);
        cl_object_attr_unlock(obj);
        if (result == 0) {
-               struct ccc_page *cp;
-
-               cp = cl2ccc_page(cl_page_at(pg, &vvp_device_type));
-
                /*
                 * If are writing to a new page, no need to read old data.
                 * The extent locking will have updated the KMS, and for our
@@ -719,7 +694,7 @@ struct address_space_operations ll_aops = {
         .direct_IO      = ll_direct_IO_26,
         .writepage      = ll_writepage,
        .writepages     = ll_writepages,
-        .set_page_dirty = ll_set_page_dirty,
+        .set_page_dirty = __set_page_dirty_nobuffers,
         .write_begin    = ll_write_begin,
         .write_end      = ll_write_end,
         .invalidatepage = ll_invalidatepage,
@@ -735,7 +710,7 @@ struct address_space_operations_ext ll_aops = {
        .orig_aops.direct_IO            = ll_direct_IO_26,
        .orig_aops.writepage            = ll_writepage,
        .orig_aops.writepages           = ll_writepages,
-       .orig_aops.set_page_dirty       = ll_set_page_dirty,
+       .orig_aops.set_page_dirty       = __set_page_dirty_nobuffers,
        .orig_aops.invalidatepage       = ll_invalidatepage,
        .orig_aops.releasepage          = ll_releasepage,
 #ifdef CONFIG_MIGRATION
index 8b9e09a..2fdb613 100644 (file)
@@ -397,7 +397,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
 } while(0)
 
 static void vvp_pgcache_page_show(const struct lu_env *env,
-                                  struct seq_file *seq, struct cl_page *page)
+                                 struct seq_file *seq, struct cl_page *page)
 {
        struct ccc_page *cpg;
        struct page      *vmpage;
@@ -416,14 +416,14 @@ static void vvp_pgcache_page_show(const struct lu_env *env,
                   PFID(ll_inode2fid(vmpage->mapping->host)),
                   vmpage->mapping->host, vmpage->index,
                   page_count(vmpage));
-        has_flags = 0;
-        seq_page_flag(seq, vmpage, locked, has_flags);
-        seq_page_flag(seq, vmpage, error, has_flags);
-        seq_page_flag(seq, vmpage, referenced, has_flags);
-        seq_page_flag(seq, vmpage, uptodate, has_flags);
-        seq_page_flag(seq, vmpage, dirty, has_flags);
-        seq_page_flag(seq, vmpage, writeback, has_flags);
-        seq_printf(seq, "%s]\n", has_flags ? "" : "-");
+       has_flags = 0;
+       seq_page_flag(seq, vmpage, locked, has_flags);
+       seq_page_flag(seq, vmpage, error, has_flags);
+       seq_page_flag(seq, vmpage, referenced, has_flags);
+       seq_page_flag(seq, vmpage, uptodate, has_flags);
+       seq_page_flag(seq, vmpage, dirty, has_flags);
+       seq_page_flag(seq, vmpage, writeback, has_flags);
+       seq_printf(seq, "%s]\n", has_flags ? "" : "-");
 }
 
 static int vvp_pgcache_show(struct seq_file *f, void *v)
index 63172d3..4b907b1 100644 (file)
@@ -53,7 +53,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
 int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
                  struct cl_lock *lock, const struct cl_io *io);
 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
-                 struct cl_page *page, struct page *vmpage);
+                 struct cl_page *page, pgoff_t index);
 struct lu_object *vvp_object_alloc(const struct lu_env *env,
                                   const struct lu_object_header *hdr,
                                   struct lu_device *dev);
index 380a7e5..7522a48 100644 (file)
@@ -652,7 +652,7 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
 
                        cl_page_clip(env, page, 0, PAGE_SIZE);
 
-                       SetPageUptodate(cl_page_vmpage(env, page));
+                       SetPageUptodate(cl_page_vmpage(page));
                        cl_page_disown(env, io, page);
 
                        /* held in ll_cl_init() */
@@ -667,17 +667,15 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
                                struct cl_page *page)
 {
-       const struct cl_page_slice *slice;
        struct ccc_page *cp;
-       struct page *vmpage;
-
-       slice = cl_page_at(page, &vvp_device_type);
-       cp = cl2ccc_page(slice);
-       vmpage = cp->cpg_page;
+       struct page *vmpage = page->cp_vmpage;
+       struct cl_object *clob = cl_io_top(io)->ci_obj;
 
        SetPageUptodate(vmpage);
        set_page_dirty(vmpage);
-       vvp_write_pending(cl2ccc(slice->cpl_obj), cp);
+
+       cp = cl2ccc_page(cl_object_page_slice(clob, page));
+       vvp_write_pending(cl2ccc(clob), cp);
 
        cl_page_disown(env, io, page);
 
@@ -687,19 +685,22 @@ static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
 }
 
 /* make sure the page list is contiguous */
-static bool page_list_sanity_check(struct cl_page_list *plist)
+static bool page_list_sanity_check(struct cl_object *obj,
+                                  struct cl_page_list *plist)
 {
        struct cl_page *page;
        pgoff_t index = CL_PAGE_EOF;
 
        cl_page_list_for_each(page, plist) {
+               struct ccc_page *cp = cl_object_page_slice(obj, page);
+
                if (index == CL_PAGE_EOF) {
-                       index = page->cp_index;
+                       index = ccc_index(cp);
                        continue;
                }
 
                ++index;
-               if (index == page->cp_index)
+               if (index == ccc_index(cp))
                        continue;
 
                return false;
@@ -726,7 +727,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
        CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
                npages, cio->u.write.cui_from, cio->u.write.cui_to);
 
-       LASSERT(page_list_sanity_check(queue));
+       LASSERT(page_list_sanity_check(obj, queue));
 
        /* submit IO with async write */
        rc = cl_io_commit_async(env, io, queue,
@@ -751,7 +752,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
                /* the first page must have been written. */
                cio->u.write.cui_from = 0;
        }
-       LASSERT(page_list_sanity_check(queue));
+       LASSERT(page_list_sanity_check(obj, queue));
        LASSERT(ergo(rc == 0, queue->pl_nr == 0));
 
        /* out of quota, try sync write */
@@ -774,7 +775,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
                page = cl_page_list_first(queue);
                cl_page_list_del(env, queue, page);
 
-               if (!PageDirty(cl_page_vmpage(env, page)))
+               if (!PageDirty(cl_page_vmpage(page)))
                        cl_page_discard(env, io, page);
 
                cl_page_disown(env, io, page);
@@ -886,16 +887,13 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
                                    struct cl_page *page)
 {
-       const struct cl_page_slice *slice;
        struct ccc_page *cp;
-       struct page *vmpage;
+       struct cl_object *clob = cl_io_top(io)->ci_obj;
 
-       slice = cl_page_at(page, &vvp_device_type);
-       cp = cl2ccc_page(slice);
-       vmpage = cp->cpg_page;
+       set_page_dirty(page->cp_vmpage);
 
-       set_page_dirty(vmpage);
-       vvp_write_pending(cl2ccc(slice->cpl_obj), cp);
+       cp = cl2ccc_page(cl_object_page_slice(clob, page));
+       vvp_write_pending(cl2ccc(clob), cp);
 }
 
 static int vvp_io_fault_start(const struct lu_env *env,
@@ -995,6 +993,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
                wait_on_page_writeback(vmpage);
                if (!PageDirty(vmpage)) {
                        struct cl_page_list *plist = &io->ci_queue.c2_qin;
+                       struct ccc_page *cp = cl_object_page_slice(obj, page);
                        int to = PAGE_SIZE;
 
                        /* vvp_page_assume() calls wait_on_page_writeback(). */
@@ -1004,7 +1003,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
                        cl_page_list_add(plist, page);
 
                        /* size fixup */
-                       if (last_index == page->cp_index)
+                       if (last_index == ccc_index(cp))
                                to = size & ~CFS_PAGE_MASK;
 
                        /* Do not set Dirty bit here so that in case IO is
@@ -1090,8 +1089,8 @@ static int vvp_io_read_page(const struct lu_env *env,
 
         if (sbi->ll_ra_info.ra_max_pages_per_file &&
             sbi->ll_ra_info.ra_max_pages)
-                ras_update(sbi, inode, ras, page->cp_index,
-                           cp->cpg_defer_uptodate);
+               ras_update(sbi, inode, ras, ccc_index(cp),
+                          cp->cpg_defer_uptodate);
 
         /* Sanity check whether the page is protected by a lock. */
         rc = cl_page_is_under_lock(env, io, page);
index 76bed3c..2b54a5e 100644 (file)
@@ -139,27 +139,16 @@ static void vvp_page_discard(const struct lu_env *env,
                              const struct cl_page_slice *slice,
                              struct cl_io *unused)
 {
-       struct page          *vmpage  = cl2vm_page(slice);
-       struct address_space *mapping;
-       struct ccc_page      *cpg     = cl2ccc_page(slice);
-       __u64 offset;
+       struct page     *vmpage = cl2vm_page(slice);
+       struct ccc_page *cpg    = cl2ccc_page(slice);
 
        LASSERT(vmpage != NULL);
        LASSERT(PageLocked(vmpage));
 
-       mapping = vmpage->mapping;
-
        if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
-               ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
-
-       offset = vmpage->index << PAGE_SHIFT;
-       ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
+               ll_ra_stats_inc(vmpage->mapping, RA_STAT_DISCARDED);
 
-       /*
-        * truncate_complete_page() calls
-        * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
-        */
-       truncate_complete_page(mapping, vmpage);
+       ll_invalidate_page(vmpage);
 }
 
 static void vvp_page_delete(const struct lu_env *env,
@@ -267,8 +256,8 @@ static void vvp_page_completion_read(const struct lu_env *env,
                                      int ioret)
 {
         struct ccc_page *cp     = cl2ccc_page(slice);
-       struct page      *vmpage = cp->cpg_page;
-        struct cl_page  *page   = cl_page_top(slice->cpl_page);
+       struct page     *vmpage = cp->cpg_page;
+       struct cl_page  *page   = slice->cpl_page;
         struct inode    *inode  = ccc_object_inode(page->cp_obj);
         ENTRY;
 
@@ -398,7 +387,6 @@ static const struct cl_page_operations vvp_page_ops = {
         .cpo_assume        = vvp_page_assume,
         .cpo_unassume      = vvp_page_unassume,
         .cpo_disown        = vvp_page_disown,
-        .cpo_vmpage        = ccc_page_vmpage,
         .cpo_discard       = vvp_page_discard,
         .cpo_delete        = vvp_page_delete,
         .cpo_export        = vvp_page_export,
@@ -508,7 +496,6 @@ static const struct cl_page_operations vvp_transient_page_ops = {
         .cpo_unassume      = vvp_transient_page_unassume,
         .cpo_disown        = vvp_transient_page_disown,
         .cpo_discard       = vvp_transient_page_discard,
-        .cpo_vmpage        = ccc_page_vmpage,
         .cpo_fini          = vvp_transient_page_fini,
         .cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
         .cpo_print         = vvp_page_print,
@@ -526,12 +513,14 @@ static const struct cl_page_operations vvp_transient_page_ops = {
 };
 
 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
-               struct cl_page *page, struct page *vmpage)
+               struct cl_page *page, pgoff_t index)
 {
        struct ccc_page *cpg = cl_object_page_slice(obj, page);
+       struct page     *vmpage = page->cp_vmpage;
 
        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
 
+       cpg->cpg_cl.cpl_index = index;
        cpg->cpg_page = vmpage;
        page_cache_get(vmpage);
 
index e37504f..566246f 100644 (file)
@@ -620,16 +620,13 @@ int   lov_sublock_modify  (const struct lu_env *env, struct lov_lock *lov,
 
 
 int   lov_page_init       (const struct lu_env *env, struct cl_object *ob,
-                          struct cl_page *page, struct page *vmpage);
+                          struct cl_page *page, pgoff_t index);
 int   lovsub_page_init    (const struct lu_env *env, struct cl_object *ob,
-                          struct cl_page *page, struct page *vmpage);
-
-int   lov_page_init_empty (const struct lu_env *env,
-                           struct cl_object *obj,
-                          struct cl_page *page, struct page *vmpage);
-int   lov_page_init_raid0 (const struct lu_env *env,
-                           struct cl_object *obj,
-                          struct cl_page *page, struct page *vmpage);
+                          struct cl_page *page, pgoff_t index);
+int   lov_page_init_empty (const struct lu_env *env, struct cl_object *obj,
+                          struct cl_page *page, pgoff_t index);
+int   lov_page_init_raid0 (const struct lu_env *env, struct cl_object *obj,
+                          struct cl_page *page, pgoff_t index);
 struct lu_object *lov_object_alloc   (const struct lu_env *env,
                                       const struct lu_object_header *hdr,
                                       struct lu_device *dev);
@@ -804,11 +801,6 @@ static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
         return container_of0(slice, struct lovsub_req, lsrq_cl);
 }
 
-static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
-{
-        return slice->cpl_page->cp_child;
-}
-
 static inline struct lov_io *cl2lov_io(const struct lu_env *env,
                                 const struct cl_io_slice *ios)
 {
index b6608d8..4253e21 100644 (file)
@@ -248,14 +248,16 @@ void lov_sub_put(struct lov_io_sub *sub)
 
 static int lov_page_stripe(const struct cl_page *page)
 {
-        struct lovsub_object *subobj;
+       struct lovsub_object *subobj;
+       const struct cl_page_slice *slice;
+       ENTRY;
 
-        ENTRY;
-        subobj = lu2lovsub(
-                lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
-                                 &lovsub_device_type));
-        LASSERT(subobj != NULL);
-        RETURN(subobj->lso_index);
+       slice = cl_page_at(page, &lovsub_device_type);
+       LASSERT(slice != NULL);
+       LASSERT(slice->cpl_obj != NULL);
+
+       subobj = cl2lovsub(slice->cpl_obj);
+       RETURN(subobj->lso_index);
 }
 
 struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
index 8e1cd43..a33848b 100644 (file)
@@ -68,7 +68,7 @@ struct lov_layout_operations {
         int  (*llo_print)(const struct lu_env *env, void *cookie,
                           lu_printer_t p, const struct lu_object *o);
         int  (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
-                               struct cl_page *page, struct page *vmpage);
+                             struct cl_page *page, pgoff_t index);
         int  (*llo_lock_init)(const struct lu_env *env,
                               struct cl_object *obj, struct cl_lock *lock,
                               const struct cl_io *io);
@@ -193,6 +193,18 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
        return result;
 }
 
+static int lov_page_slice_fixup(struct lov_object *lov,
+                               struct cl_object *stripe)
+{
+       struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
+       struct cl_object *o;
+
+       cl_object_for_each(o, stripe)
+               o->co_slice_off += hdr->coh_page_bufsize;
+
+       return cl_object_header(stripe)->coh_page_bufsize;
+}
+
 static int lov_init_raid0(const struct lu_env *env,
                           struct lov_device *dev, struct lov_object *lov,
                           const struct cl_object_conf *conf,
@@ -219,12 +231,14 @@ static int lov_init_raid0(const struct lu_env *env,
        LASSERT(lov->lo_lsm == NULL);
        lov->lo_lsm = lsm_addref(lsm);
        r0->lo_nr  = lsm->lsm_stripe_count;
-        LASSERT(r0->lo_nr <= lov_targets_nr(dev));
+       LASSERT(r0->lo_nr <= lov_targets_nr(dev));
 
-        OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
-        if (r0->lo_sub != NULL) {
-                result = 0;
-                subconf->coc_inode = conf->coc_inode;
+       OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
+       if (r0->lo_sub != NULL) {
+               int psz = 0;
+
+               result = 0;
+               subconf->coc_inode = conf->coc_inode;
                spin_lock_init(&r0->lo_sub_lock);
                 /*
                  * Create stripe cl_objects.
@@ -234,32 +248,41 @@ static int lov_init_raid0(const struct lu_env *env,
                         struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
                         int ost_idx = oinfo->loi_ost_idx;
 
-                        result = ostid_to_fid(ofid, &oinfo->loi_oi,
+                       result = ostid_to_fid(ofid, &oinfo->loi_oi,
                                              oinfo->loi_ost_idx);
                        if (result != 0)
                                GOTO(out, result);
 
-                        subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
-                        subconf->u.coc_oinfo = oinfo;
-                        LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
+                       subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
+                       subconf->u.coc_oinfo = oinfo;
+                       LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
                        /* In the function below, .hs_keycmp resolves to
                         * lu_obj_hop_keycmp() */
                        /* coverity[overrun-buffer-val] */
-                        stripe = lov_sub_find(env, subdev, ofid, subconf);
-                        if (!IS_ERR(stripe)) {
-                                result = lov_init_sub(env, lov, stripe, r0, i);
+                       stripe = lov_sub_find(env, subdev, ofid, subconf);
+                       if (!IS_ERR(stripe)) {
+                               result = lov_init_sub(env, lov, stripe, r0, i);
                                if (result == -EAGAIN) { /* try again */
                                        --i;
                                        result = 0;
+                                       continue;
                                }
-                        } else {
-                                result = PTR_ERR(stripe);
+                       } else {
+                               result = PTR_ERR(stripe);
+                       }
+
+                       if (result == 0) {
+                               int sz = lov_page_slice_fixup(lov, stripe);
+                               LASSERT(ergo(psz > 0, psz == sz));
+                               psz = sz;
                        }
                 }
-        } else
-                result = -ENOMEM;
+               if (result == 0)
+                       cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
+       } else
+               result = -ENOMEM;
 out:
-        RETURN(result);
+       RETURN(result);
 }
 
 static int lov_init_released(const struct lu_env *env,
@@ -832,10 +855,10 @@ static int lov_object_print(const struct lu_env *env, void *cookie,
 }
 
 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
-                 struct cl_page *page, struct page *vmpage)
+                 struct cl_page *page, pgoff_t index)
 {
-        return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
-                                   llo_page_init, env, obj, page, vmpage);
+       return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
+                                   index);
 }
 
 /**
index 0ee6b61..e5b3221 100644 (file)
  *
  */
 
-static int lov_page_invariant(const struct cl_page_slice *slice)
-{
-        const struct cl_page  *page = slice->cpl_page;
-        const struct cl_page  *sub  = lov_sub_page(slice);
-
-        return ergo(sub != NULL,
-                    page->cp_child == sub &&
-                    sub->cp_parent == page &&
-                    page->cp_state == sub->cp_state);
-}
-
-static void lov_page_fini(const struct lu_env *env,
-                          struct cl_page_slice *slice)
-{
-        struct cl_page  *sub = lov_sub_page(slice);
-
-        LINVRNT(lov_page_invariant(slice));
-        ENTRY;
-
-        if (sub != NULL) {
-                LASSERT(sub->cp_state == CPS_FREEING);
-                lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
-                sub->cp_parent = NULL;
-                slice->cpl_page->cp_child = NULL;
-                cl_page_put(env, sub);
-        }
-        EXIT;
-}
-
-static int lov_page_own(const struct lu_env *env,
-                        const struct cl_page_slice *slice, struct cl_io *io,
-                        int nonblock)
-{
-        struct lov_io     *lio = lov_env_io(env);
-        struct lov_io_sub *sub;
-
-        LINVRNT(lov_page_invariant(slice));
-        LINVRNT(!cl2lov_page(slice)->lps_invalid);
-        ENTRY;
-
-        sub = lov_page_subio(env, lio, slice);
-        if (!IS_ERR(sub)) {
-                lov_sub_page(slice)->cp_owner = sub->sub_io;
-                lov_sub_put(sub);
-        } else
-                LBUG(); /* Arrgh */
-        RETURN(0);
-}
-
-static void lov_page_assume(const struct lu_env *env,
-                            const struct cl_page_slice *slice, struct cl_io *io)
-{
-        lov_page_own(env, slice, io, 0);
-}
-
 static int lov_page_print(const struct lu_env *env,
                           const struct cl_page_slice *slice,
                           void *cookie, lu_printer_t printer)
@@ -118,42 +63,32 @@ static int lov_page_print(const struct lu_env *env,
 }
 
 static const struct cl_page_operations lov_page_ops = {
-       .cpo_fini       = lov_page_fini,
-       .cpo_own        = lov_page_own,
-       .cpo_assume     = lov_page_assume,
-       .cpo_print      = lov_page_print
+       .cpo_print = lov_page_print
 };
 
-static void lov_empty_page_fini(const struct lu_env *env,
-                                struct cl_page_slice *slice)
-{
-        LASSERT(slice->cpl_page->cp_child == NULL);
-}
-
 int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
-                       struct cl_page *page, struct page *vmpage)
+                       struct cl_page *page, pgoff_t index)
 {
-        struct lov_object *loo = cl2lov(obj);
-        struct lov_layout_raid0 *r0 = lov_r0(loo);
-        struct lov_io     *lio = lov_env_io(env);
-        struct cl_page    *subpage;
-        struct cl_object  *subobj;
-        struct lov_io_sub *sub;
-        struct lov_page   *lpg = cl_object_page_slice(obj, page);
-        loff_t             offset;
-        obd_off            suboff;
-        int                stripe;
-        int                rc;
-        ENTRY;
-
-        offset = cl_offset(obj, page->cp_index);
+       struct lov_object *loo = cl2lov(obj);
+       struct lov_layout_raid0 *r0 = lov_r0(loo);
+       struct lov_io     *lio = lov_env_io(env);
+       struct cl_object  *subobj;
+       struct cl_object  *o;
+       struct lov_io_sub *sub;
+       struct lov_page   *lpg = cl_object_page_slice(obj, page);
+       loff_t             offset;
+       obd_off            suboff;
+       int                stripe;
+       int                rc;
+       ENTRY;
+
+       offset = cl_offset(obj, index);
        stripe = lov_stripe_number(loo->lo_lsm, offset);
        LASSERT(stripe < r0->lo_nr);
        rc = lov_stripe_offset(loo->lo_lsm, offset, stripe,
                               &suboff);
        LASSERT(rc == 0);
 
-       lpg->lps_invalid = 1;
        cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
 
        sub = lov_sub_get(env, lio, stripe);
@@ -161,36 +96,45 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
                RETURN(PTR_ERR(sub));
 
        subobj = lovsub2cl(r0->lo_sub[stripe]);
-       subpage = cl_page_alloc(sub->sub_env, subobj, cl_index(subobj, suboff),
-                               vmpage, page->cp_type);
-       if (!IS_ERR(subpage)) {
-               subpage->cp_parent = page;
-               page->cp_child = subpage;
-               lpg->lps_invalid = 0;
-       } else
-               rc = PTR_ERR(subpage);
+       cfs_list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
+                               co_lu.lo_linkage) {
+               if (o->co_ops->coo_page_init != NULL) {
+                       rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
+                                                     cl_index(subobj, suboff));
+                       if (rc != 0)
+                               break;
+               }
+       }
        lov_sub_put(sub);
        RETURN(rc);
 }
 
+static int lov_page_empty_print(const struct lu_env *env,
+                               const struct cl_page_slice *slice,
+                               void *cookie, lu_printer_t printer)
+{
+        struct lov_page *lp = cl2lov_page(slice);
+
+        return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p, empty.\n", lp);
+}
+
 static const struct cl_page_operations lov_empty_page_ops = {
-        .cpo_fini   = lov_empty_page_fini,
-        .cpo_print  = lov_page_print
+       .cpo_print = lov_page_empty_print
 };
 
 int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
-                       struct cl_page *page, struct page *vmpage)
+                       struct cl_page *page, pgoff_t index)
 {
-        struct lov_page *lpg = cl_object_page_slice(obj, page);
+       struct lov_page *lpg = cl_object_page_slice(obj, page);
        void *addr;
-        ENTRY;
+       ENTRY;
 
        cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
-       addr = kmap(vmpage);
+       addr = kmap(page->cp_vmpage);
        memset(addr, 0, cl_page_size(obj));
-       kunmap(vmpage);
+       kunmap(page->cp_vmpage);
        cl_page_export(env, page, 1);
-        RETURN(0);
+       RETURN(0);
 }
 
 
index d14ce67..ee1eadf 100644 (file)
@@ -62,13 +62,13 @@ static const struct cl_page_operations lovsub_page_ops = {
 };
 
 int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
-                       struct cl_page *page, struct page *unused)
+                    struct cl_page *page, pgoff_t ind)
 {
-        struct lovsub_page *lsb = cl_object_page_slice(obj, page);
-        ENTRY;
+       struct lovsub_page *lsb = cl_object_page_slice(obj, page);
+       ENTRY;
 
        cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
-        RETURN(0);
+       RETURN(0);
 }
 
 /** @} lov */
index 751ddbb..0f1cb2c 100644 (file)
@@ -724,41 +724,6 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
 }
 
 /**
- * True iff \a page is within \a io range.
- */
-static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
-{
-        int     result = 1;
-        loff_t  start;
-        loff_t  end;
-        pgoff_t idx;
-
-        idx = page->cp_index;
-        switch (io->ci_type) {
-        case CIT_READ:
-        case CIT_WRITE:
-                /*
-                 * check that [start, end) and [pos, pos + count) extents
-                 * overlap.
-                 */
-                if (!cl_io_is_append(io)) {
-                        const struct cl_io_rw_common *crw = &(io->u.ci_rw);
-                        start = cl_offset(page->cp_obj, idx);
-                        end   = cl_offset(page->cp_obj, idx + 1);
-                        result = crw->crw_pos < end &&
-                                 start < crw->crw_pos + crw->crw_count;
-                }
-                break;
-        case CIT_FAULT:
-                result = io->u.ci_fault.ft_index == idx;
-                break;
-        default:
-                LBUG();
-        }
-        return result;
-}
-
-/**
  * Called by read io, when page has to be read from the server.
  *
  * \see cl_io_operations::cio_read_page()
@@ -773,7 +738,6 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
         LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
         LINVRNT(cl_page_is_owned(page, io));
         LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
-        LINVRNT(cl_page_in_io(page, io));
         LINVRNT(cl_io_invariant(io));
         ENTRY;
 
@@ -926,7 +890,6 @@ int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
         cl_page_list_for_each(page, queue) {
                 int rc;
 
-                LINVRNT(cl_page_in_io(page, io));
                 rc = cl_page_cancel(env, page);
                 result = result ?: rc;
         }
@@ -1205,7 +1168,6 @@ int cl_page_list_own(const struct lu_env *env,
 {
        struct cl_page *page;
        struct cl_page *temp;
-       pgoff_t index = 0;
        int result;
 
        LINVRNT(plist->pl_owner == current);
@@ -1213,8 +1175,6 @@ int cl_page_list_own(const struct lu_env *env,
        ENTRY;
        result = 0;
        cl_page_list_for_each_safe(page, temp, plist) {
-               LASSERT(index <= page->cp_index);
-               index = page->cp_index;
                if (cl_page_own(env, io, page) == 0)
                        result = result ?: page->cp_error;
                else
@@ -1342,7 +1302,7 @@ EXPORT_SYMBOL(cl_2queue_init_page);
 /**
  * Returns top-level io.
  *
- * \see cl_object_top(), cl_page_top().
+ * \see cl_object_top()
  */
 struct cl_io *cl_io_top(struct cl_io *io)
 {
@@ -1412,26 +1372,22 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req)
 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
                        struct cl_page *page)
 {
-        struct cl_device     *dev;
-        struct cl_page_slice *slice;
-        int result;
+       struct cl_device     *dev;
+       struct cl_page_slice *slice;
+       int result;
 
-        ENTRY;
-        result = 0;
-        page = cl_page_top(page);
-        do {
-                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
-                        dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
-                        if (dev->cd_ops->cdo_req_init != NULL) {
-                                result = dev->cd_ops->cdo_req_init(env,
-                                                                   dev, req);
-                                if (result != 0)
-                                        break;
-                        }
-                }
-                page = page->cp_child;
-        } while (page != NULL && result == 0);
-        RETURN(result);
+       ENTRY;
+       result = 0;
+       cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+               dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
+               if (dev->cd_ops->cdo_req_init != NULL) {
+                       result = dev->cd_ops->cdo_req_init(env,
+                                       dev, req);
+                       if (result != 0)
+                               break;
+               }
+       }
+       RETURN(result);
 }
 
 /**
@@ -1498,12 +1454,11 @@ EXPORT_SYMBOL(cl_req_alloc);
 void cl_req_page_add(const struct lu_env *env,
                      struct cl_req *req, struct cl_page *page)
 {
-        struct cl_object  *obj;
-        struct cl_req_obj *rqo;
-        int i;
+       struct cl_object  *obj;
+       struct cl_req_obj *rqo;
+       int i;
 
-        ENTRY;
-        page = cl_page_top(page);
+       ENTRY;
 
         LASSERT(cfs_list_empty(&page->cp_flight));
         LASSERT(page->cp_req == NULL);
@@ -1521,11 +1476,11 @@ void cl_req_page_add(const struct lu_env *env,
                         cl_object_get(obj);
                        lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
                                             "cl_req", req);
-                        break;
-                }
-        }
-        LASSERT(i < req->crq_nrobjs);
-        EXIT;
+                       break;
+               }
+       }
+       LASSERT(i < req->crq_nrobjs);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_req_page_add);
 
@@ -1534,18 +1489,17 @@ EXPORT_SYMBOL(cl_req_page_add);
  */
 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_req *req = page->cp_req;
+       struct cl_req *req = page->cp_req;
 
-        ENTRY;
-        page = cl_page_top(page);
+       ENTRY;
 
-        LASSERT(!cfs_list_empty(&page->cp_flight));
-        LASSERT(req->crq_nrpages > 0);
+       LASSERT(!cfs_list_empty(&page->cp_flight));
+       LASSERT(req->crq_nrpages > 0);
 
-        cfs_list_del_init(&page->cp_flight);
-        --req->crq_nrpages;
-        page->cp_req = NULL;
-        EXIT;
+       cfs_list_del_init(&page->cp_flight);
+       --req->crq_nrpages;
+       page->cp_req = NULL;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_req_page_done);
 
index 78bc76c..1984836 100644 (file)
@@ -75,19 +75,19 @@ extern __u32 lu_session_tags_default;
  */
 int cl_object_header_init(struct cl_object_header *h)
 {
-        int result;
+       int result;
 
-        ENTRY;
-        result = lu_object_header_init(&h->coh_lu);
-        if (result == 0) {
+       ENTRY;
+       result = lu_object_header_init(&h->coh_lu);
+       if (result == 0) {
                spin_lock_init(&h->coh_lock_guard);
                spin_lock_init(&h->coh_attr_guard);
                lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
                lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
-                CFS_INIT_LIST_HEAD(&h->coh_locks);
-               h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
-        }
-        RETURN(result);
+               CFS_INIT_LIST_HEAD(&h->coh_locks);
+               h->coh_page_bufsize = 0;
+       }
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_object_header_init);
 
@@ -149,7 +149,7 @@ EXPORT_SYMBOL(cl_object_get);
 /**
  * Returns the top-object for a given \a o.
  *
- * \see cl_page_top(), cl_io_top()
+ * \see cl_io_top()
  */
 struct cl_object *cl_object_top(struct cl_object *o)
 {
index dc4037b..a42d12f 100644 (file)
@@ -95,18 +95,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
 #endif
 
 /**
- * Internal version of cl_page_top, it should be called if the page is
- * known to be not freed, says with page referenced, or radix tree lock held,
- * or page owned.
- */
-static struct cl_page *cl_page_top_trusted(struct cl_page *page)
-{
-        while (page->cp_parent != NULL)
-                page = page->cp_parent;
-        return page;
-}
-
-/**
  * Internal version of cl_page_get().
  *
  * This function can be used to obtain initial reference to previously
@@ -132,30 +120,25 @@ static const struct cl_page_slice *
 cl_page_at_trusted(const struct cl_page *page,
                    const struct lu_device_type *dtype)
 {
-        const struct cl_page_slice *slice;
-        ENTRY;
+       const struct cl_page_slice *slice;
+       ENTRY;
 
-        page = cl_page_top_trusted((struct cl_page *)page);
-        do {
-                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
-                        if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
-                                RETURN(slice);
-                }
-                page = page->cp_child;
-        } while (page != NULL);
-        RETURN(NULL);
+       cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+               if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
+                       RETURN(slice);
+       }
+       RETURN(NULL);
 }
 
 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_object *obj  = page->cp_obj;
+       struct cl_object *obj  = page->cp_obj;
        int pagesize = cl_object_header(obj)->coh_page_bufsize;
 
-        PASSERT(env, page, cfs_list_empty(&page->cp_batch));
-        PASSERT(env, page, page->cp_owner == NULL);
-        PASSERT(env, page, page->cp_req == NULL);
-        PASSERT(env, page, page->cp_parent == NULL);
-        PASSERT(env, page, page->cp_state == CPS_FREEING);
+       PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+       PASSERT(env, page, page->cp_owner == NULL);
+       PASSERT(env, page, page->cp_req == NULL);
+       PASSERT(env, page, page->cp_state == CPS_FREEING);
 
        ENTRY;
        while (!cfs_list_empty(&page->cp_layers)) {
@@ -164,15 +147,16 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
                slice = cfs_list_entry(page->cp_layers.next,
                                       struct cl_page_slice, cpl_linkage);
                cfs_list_del_init(page->cp_layers.next);
-               slice->cpl_ops->cpo_fini(env, slice);
+               if (unlikely(slice->cpl_ops->cpo_fini != NULL))
+                       slice->cpl_ops->cpo_fini(env, slice);
        }
        CS_PAGE_DEC(obj, total);
        CS_PAGESTATE_DEC(obj, page->cp_state);
        lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
-        cl_object_put(env, obj);
-        lu_ref_fini(&page->cp_reference);
-        OBD_FREE(page, pagesize);
-        EXIT;
+       cl_object_put(env, obj);
+       lu_ref_fini(&page->cp_reference);
+       OBD_FREE(page, pagesize);
+       EXIT;
 }
 
 /**
@@ -203,7 +187,7 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
                cl_object_get(o);
                lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
                                     page);
-               page->cp_index = ind;
+               page->cp_vmpage = vmpage;
                cl_page_state_set_trust(page, CPS_CACHED);
                page->cp_type = type;
                CFS_INIT_LIST_HEAD(&page->cp_layers);
@@ -215,8 +199,8 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
                cfs_list_for_each_entry(o, &head->loh_layers,
                                        co_lu.lo_linkage) {
                        if (o->co_ops->coo_page_init != NULL) {
-                               result = o->co_ops->coo_page_init(env, o,
-                                                                 page, vmpage);
+                               result = o->co_ops->coo_page_init(env, o, page,
+                                                                 ind);
                                if (result != 0) {
                                        cl_page_delete0(env, page);
                                        cl_page_free(env, page);
@@ -295,28 +279,12 @@ EXPORT_SYMBOL(cl_page_find);
 
 static inline int cl_page_invariant(const struct cl_page *pg)
 {
-        struct cl_page          *parent;
-        struct cl_page          *child;
-        struct cl_io            *owner;
+       /*
+        * Page invariant is protected by a VM lock.
+        */
+       LINVRNT(cl_page_is_vmlocked(NULL, pg));
 
-        /*
-         * Page invariant is protected by a VM lock.
-         */
-        LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
-        parent = pg->cp_parent;
-        child  = pg->cp_child;
-        owner  = pg->cp_owner;
-
-        return cl_page_in_use_noref(pg) &&
-                ergo(parent != NULL, parent->cp_child == pg) &&
-                ergo(child != NULL, child->cp_parent == pg) &&
-                ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
-                ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
-                ergo(owner != NULL && parent != NULL,
-                     parent->cp_owner == pg->cp_owner->ci_parent) &&
-                ergo(owner != NULL && child != NULL,
-                     child->cp_owner->ci_parent == owner);
+       return cl_page_in_use_noref(pg);
 }
 
 static void cl_page_state_set0(const struct lu_env *env,
@@ -370,16 +338,13 @@ static void cl_page_state_set0(const struct lu_env *env,
         old = page->cp_state;
         PASSERT(env, page, allowed_transitions[old][state]);
         CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
-        for (; page != NULL; page = page->cp_child) {
-                PASSERT(env, page, page->cp_state == old);
-                PASSERT(env, page,
-                        equi(state == CPS_OWNED, page->cp_owner != NULL));
-
-               CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
-               CS_PAGESTATE_INC(page->cp_obj, state);
-                cl_page_state_set_trust(page, state);
-        }
-        EXIT;
+       PASSERT(env, page, page->cp_state == old);
+       PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner != NULL));
+
+       CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
+       CS_PAGESTATE_INC(page->cp_obj, state);
+       cl_page_state_set_trust(page, state);
+       EXIT;
 }
 
 static void cl_page_state_set(const struct lu_env *env,
@@ -415,8 +380,6 @@ EXPORT_SYMBOL(cl_page_get);
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
-        PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
-
         ENTRY;
         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
                        cfs_atomic_read(&page->cp_ref));
@@ -439,75 +402,30 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
 EXPORT_SYMBOL(cl_page_put);
 
 /**
- * Returns a VM page associated with a given cl_page.
- */
-struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
-{
-        const struct cl_page_slice *slice;
-
-        /*
-         * Find uppermost layer with ->cpo_vmpage() method, and return its
-         * result.
-         */
-        page = cl_page_top(page);
-        do {
-                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
-                        if (slice->cpl_ops->cpo_vmpage != NULL)
-                                RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
-                }
-                page = page->cp_child;
-        } while (page != NULL);
-        LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
-}
-EXPORT_SYMBOL(cl_page_vmpage);
-
-/**
  * Returns a cl_page associated with a VM page, and given cl_object.
  */
 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
 {
-       struct cl_page *top;
        struct cl_page *page;
 
-        ENTRY;
-        KLASSERT(PageLocked(vmpage));
+       ENTRY;
+       KLASSERT(PageLocked(vmpage));
 
-        /*
-         * NOTE: absence of races and liveness of data are guaranteed by page
-         *       lock on a "vmpage". That works because object destruction has
-         *       bottom-to-top pass.
-         */
+       /*
+        * NOTE: absence of races and liveness of data are guaranteed by page
+        *       lock on a "vmpage". That works because object destruction has
+        *       bottom-to-top pass.
+        */
 
-        /*
-         * This loop assumes that ->private points to the top-most page. This
-         * can be rectified easily.
-         */
-        top = (struct cl_page *)vmpage->private;
-       if (top == NULL)
-               RETURN(NULL);
-
-       for (page = top; page != NULL; page = page->cp_child) {
-               if (cl_object_same(page->cp_obj, obj)) {
-                       cl_page_get_trust(page);
-                       break;
-               }
+       page = (struct cl_page *)vmpage->private;
+       if (page != NULL) {
+               cl_page_get_trust(page);
+               LASSERT(page->cp_type == CPT_CACHEABLE);
        }
-       LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
        RETURN(page);
 }
 EXPORT_SYMBOL(cl_vmpage_page);
 
-/**
- * Returns the top-page for a given page.
- *
- * \see cl_object_top(), cl_io_top()
- */
-struct cl_page *cl_page_top(struct cl_page *page)
-{
-        return cl_page_top_trusted(page);
-}
-EXPORT_SYMBOL(cl_page_top);
-
 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
                                        const struct lu_device_type *dtype)
 {
@@ -519,78 +437,58 @@ EXPORT_SYMBOL(cl_page_at);
 
 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
 ({                                                                      \
-        const struct lu_env        *__env  = (_env);                    \
-        struct cl_page             *__page = (_page);                   \
-        const struct cl_page_slice *__scan;                             \
-        int                         __result;                           \
-        ptrdiff_t                   __op   = (_op);                     \
-        int                       (*__method)_proto;                    \
-                                                                        \
-        __result = 0;                                                   \
-        __page = cl_page_top(__page);                                   \
-        do {                                                            \
-                cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
-                                        cpl_linkage) {                  \
-                        __method = *(void **)((char *)__scan->cpl_ops + \
-                                              __op);                    \
-                        if (__method != NULL) {                         \
-                                __result = (*__method)(__env, __scan,   \
-                                                       ## __VA_ARGS__); \
-                                if (__result != 0)                      \
-                                        break;                          \
-                        }                                               \
-                }                                                       \
-                __page = __page->cp_child;                              \
-        } while (__page != NULL && __result == 0);                      \
-        if (__result > 0)                                               \
-                __result = 0;                                           \
-        __result;                                                       \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
+       const struct cl_page_slice *__scan;                             \
+       int                         __result;                           \
+       ptrdiff_t                   __op   = (_op);                     \
+       int                       (*__method)_proto;                    \
+                                                                       \
+       __result = 0;                                                   \
+       cfs_list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {     \
+               __method = *(void **)((char *)__scan->cpl_ops +  __op);        \
+               if (__method != NULL) {                                        \
+                       __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+                       if (__result != 0)                              \
+                               break;                                  \
+               }                                                       \
+       }                                                               \
+       if (__result > 0)                                               \
+               __result = 0;                                           \
+       __result;                                                       \
 })
 
-#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                   \
-do {                                                                    \
-        const struct lu_env        *__env  = (_env);                    \
-        struct cl_page             *__page = (_page);                   \
-        const struct cl_page_slice *__scan;                             \
-        ptrdiff_t                   __op   = (_op);                     \
-        void                      (*__method)_proto;                    \
-                                                                        \
-        __page = cl_page_top(__page);                                   \
-        do {                                                            \
-                cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
-                                        cpl_linkage) {                  \
-                        __method = *(void **)((char *)__scan->cpl_ops + \
-                                              __op);                    \
-                        if (__method != NULL)                           \
-                                (*__method)(__env, __scan,              \
-                                            ## __VA_ARGS__);            \
-                }                                                       \
-                __page = __page->cp_child;                              \
-        } while (__page != NULL);                                       \
+#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                  \
+do {                                                                   \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
+       const struct cl_page_slice *__scan;                             \
+       ptrdiff_t                   __op   = (_op);                     \
+       void                      (*__method)_proto;                    \
+                                                                       \
+       cfs_list_for_each_entry(__scan, &__page->cp_layers,             \
+                               cpl_linkage) {                          \
+               __method = *(void **)((char *)__scan->cpl_ops +  __op); \
+               if (__method != NULL)                                   \
+                       (*__method)(__env, __scan, ## __VA_ARGS__);     \
+       }                                                               \
 } while (0)
 
-#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)               \
-do {                                                                        \
-        const struct lu_env        *__env  = (_env);                        \
-        struct cl_page             *__page = (_page);                       \
-        const struct cl_page_slice *__scan;                                 \
-        ptrdiff_t                   __op   = (_op);                         \
-        void                      (*__method)_proto;                        \
-                                                                            \
-        /* get to the bottom page. */                                       \
-        while (__page->cp_child != NULL)                                    \
-                __page = __page->cp_child;                                  \
-        do {                                                                \
-                cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
-                                                cpl_linkage) {              \
-                        __method = *(void **)((char *)__scan->cpl_ops +     \
-                                              __op);                        \
-                        if (__method != NULL)                               \
-                                (*__method)(__env, __scan,                  \
-                                            ## __VA_ARGS__);                \
-                }                                                           \
-                __page = __page->cp_parent;                                 \
-        } while (__page != NULL);                                           \
+#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)          \
+do {                                                                   \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
+       const struct cl_page_slice *__scan;                             \
+       ptrdiff_t                   __op   = (_op);                     \
+       void                      (*__method)_proto;                    \
+                                                                       \
+       /* get to the bottom page. */                                   \
+       cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers,     \
+                                       cpl_linkage) {                  \
+               __method = *(void **)((char *)__scan->cpl_ops + __op);  \
+               if (__method != NULL)                                   \
+                       (*__method)(__env, __scan, ## __VA_ARGS__);     \
+       }                                                               \
 } while (0)
 
 static int cl_page_invoke(const struct lu_env *env,
@@ -619,26 +517,22 @@ static void cl_page_invoid(const struct lu_env *env,
 
 static void cl_page_owner_clear(struct cl_page *page)
 {
-        ENTRY;
-        for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
-                if (page->cp_owner != NULL) {
-                        LASSERT(page->cp_owner->ci_owned_nr > 0);
-                        page->cp_owner->ci_owned_nr--;
-                        page->cp_owner = NULL;
-                        page->cp_task = NULL;
-                }
-        }
-        EXIT;
+       ENTRY;
+       if (page->cp_owner != NULL) {
+               LASSERT(page->cp_owner->ci_owned_nr > 0);
+               page->cp_owner->ci_owned_nr--;
+               page->cp_owner = NULL;
+               page->cp_task = NULL;
+       }
+       EXIT;
 }
 
 static void cl_page_owner_set(struct cl_page *page)
 {
-        ENTRY;
-        for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
-                LASSERT(page->cp_owner != NULL);
-                page->cp_owner->ci_owned_nr++;
-        }
-        EXIT;
+       ENTRY;
+       LASSERT(page->cp_owner != NULL);
+       page->cp_owner->ci_owned_nr++;
+       EXIT;
 }
 
 void cl_page_disown0(const struct lu_env *env,
@@ -671,9 +565,10 @@ void cl_page_disown0(const struct lu_env *env,
  */
 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
 {
-        LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
-        ENTRY;
-        RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
+       struct cl_io *top = cl_io_top((struct cl_io *)io);
+       LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
+       ENTRY;
+       RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
 }
 EXPORT_SYMBOL(cl_page_is_owned);
 
@@ -705,7 +600,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
         PINVRNT(env, pg, !cl_page_is_owned(pg, io));
 
         ENTRY;
-        pg = cl_page_top(pg);
         io = cl_io_top(io);
 
         if (pg->cp_state == CPS_FREEING) {
@@ -719,7 +613,7 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
                 if (result == 0) {
                         PASSERT(env, pg, pg->cp_owner == NULL);
                         PASSERT(env, pg, pg->cp_req == NULL);
-                        pg->cp_owner = io;
+                       pg->cp_owner = cl_io_top(io);;
                         pg->cp_task  = current;
                         cl_page_owner_set(pg);
                         if (pg->cp_state != CPS_FREEING) {
@@ -771,19 +665,18 @@ EXPORT_SYMBOL(cl_page_own_try);
 void cl_page_assume(const struct lu_env *env,
                     struct cl_io *io, struct cl_page *pg)
 {
-        PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
+       PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
 
-        ENTRY;
-        pg = cl_page_top(pg);
-        io = cl_io_top(io);
-
-        cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
-        PASSERT(env, pg, pg->cp_owner == NULL);
-        pg->cp_owner = io;
-        pg->cp_task = current;
-        cl_page_owner_set(pg);
-        cl_page_state_set(env, pg, CPS_OWNED);
-        EXIT;
+       ENTRY;
+       io = cl_io_top(io);
+
+       cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
+       PASSERT(env, pg, pg->cp_owner == NULL);
+       pg->cp_owner = cl_io_top(io);
+       pg->cp_task = current;
+       cl_page_owner_set(pg);
+       cl_page_state_set(env, pg, CPS_OWNED);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_assume);
 
@@ -805,7 +698,6 @@ void cl_page_unassume(const struct lu_env *env,
         PINVRNT(env, pg, cl_page_invariant(pg));
 
         ENTRY;
-        pg = cl_page_top(pg);
         io = cl_io_top(io);
         cl_page_owner_clear(pg);
         cl_page_state_set(env, pg, CPS_CACHED);
@@ -831,14 +723,13 @@ EXPORT_SYMBOL(cl_page_unassume);
 void cl_page_disown(const struct lu_env *env,
                     struct cl_io *io, struct cl_page *pg)
 {
-        PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
-                        pg->cp_state == CPS_FREEING);
+       PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
+               pg->cp_state == CPS_FREEING);
 
-        ENTRY;
-        pg = cl_page_top(pg);
-        io = cl_io_top(io);
-        cl_page_disown0(env, io, pg);
-        EXIT;
+       ENTRY;
+       io = cl_io_top(io);
+       cl_page_disown0(env, io, pg);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_disown);
 
@@ -855,10 +746,10 @@ EXPORT_SYMBOL(cl_page_disown);
 void cl_page_discard(const struct lu_env *env,
                      struct cl_io *io, struct cl_page *pg)
 {
-        PINVRNT(env, pg, cl_page_is_owned(pg, io));
-        PINVRNT(env, pg, cl_page_invariant(pg));
+       PINVRNT(env, pg, cl_page_is_owned(pg, io));
+       PINVRNT(env, pg, cl_page_invariant(pg));
 
-        cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
+       cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
 }
 EXPORT_SYMBOL(cl_page_discard);
 
@@ -871,7 +762,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
 {
         ENTRY;
 
-        PASSERT(env, pg, pg == cl_page_top(pg));
         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
 
         /*
@@ -906,7 +796,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
  * drain after some time, at which point page will be recycled.
  *
- * \pre  pg == cl_page_top(pg)
  * \pre  VM page is locked
  * \post pg->cp_state == CPS_FREEING
  *
@@ -949,7 +838,6 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
         const struct cl_page_slice *slice;
 
         ENTRY;
-        pg = cl_page_top_trusted((struct cl_page *)pg);
         slice = container_of(pg->cp_layers.next,
                              const struct cl_page_slice, cpl_linkage);
         PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
@@ -1005,15 +893,15 @@ int cl_page_prep(const struct lu_env *env, struct cl_io *io,
          */
        if (crt >= CRT_NR)
                return -EINVAL;
-        result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
-        if (result == 0)
-                cl_page_io_start(env, pg, crt);
-
-        KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
-                      equi(result == 0,
-                           PageWriteback(cl_page_vmpage(env, pg)))));
-        CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
-        return result;
+       result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
+       if (result == 0)
+               cl_page_io_start(env, pg, crt);
+
+       KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
+                     equi(result == 0,
+                          PageWriteback(cl_page_vmpage(pg)))));
+       CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
+       return result;
 }
 EXPORT_SYMBOL(cl_page_prep);
 
@@ -1178,12 +1066,11 @@ EXPORT_SYMBOL(cl_page_clip);
 void cl_page_header_print(const struct lu_env *env, void *cookie,
                           lu_printer_t printer, const struct cl_page *pg)
 {
-        (*printer)(env, cookie,
-                   "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
-                   pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
-                   pg->cp_index, pg->cp_parent, pg->cp_child,
-                   pg->cp_state, pg->cp_error, pg->cp_type,
-                   pg->cp_owner, pg->cp_req, pg->cp_flags);
+       (*printer)(env, cookie,
+                  "page@%p[%d %p %d %d %d %p %p %#x]\n",
+                  pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
+                  pg->cp_state, pg->cp_error, pg->cp_type,
+                  pg->cp_owner, pg->cp_req, pg->cp_flags);
 }
 EXPORT_SYMBOL(cl_page_header_print);
 
@@ -1193,16 +1080,12 @@ EXPORT_SYMBOL(cl_page_header_print);
 void cl_page_print(const struct lu_env *env, void *cookie,
                    lu_printer_t printer, const struct cl_page *pg)
 {
-        struct cl_page *scan;
-
-        for (scan = cl_page_top((struct cl_page *)pg);
-             scan != NULL; scan = scan->cp_child)
-                cl_page_header_print(env, cookie, printer, scan);
-        CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
-                       (const struct lu_env *env,
-                        const struct cl_page_slice *slice,
-                        void *cookie, lu_printer_t p), cookie, printer);
-        (*printer)(env, cookie, "end page@%p\n", pg);
+       cl_page_header_print(env, cookie, printer, pg);
+       CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
+                      (const struct lu_env *env,
+                       const struct cl_page_slice *slice,
+                       void *cookie, lu_printer_t p), cookie, printer);
+       (*printer)(env, cookie, "end page@%p\n", pg);
 }
 EXPORT_SYMBOL(cl_page_print);
 
index de8b69c..06ed2d5 100644 (file)
@@ -88,9 +88,8 @@ struct echo_object_conf {
 };
 
 struct echo_page {
-        struct cl_page_slice   ep_cl;
+       struct cl_page_slice    ep_cl;
        struct mutex            ep_lock;
-       struct page            *ep_vmpage;
 };
 
 struct echo_lock {
@@ -243,12 +242,6 @@ static struct lu_kmem_descr echo_caches[] = {
  *
  * @{
  */
-static struct page *echo_page_vmpage(const struct lu_env *env,
-                                    const struct cl_page_slice *slice)
-{
-        return cl2echo_page(slice)->ep_vmpage;
-}
-
 static int echo_page_own(const struct lu_env *env,
                          const struct cl_page_slice *slice,
                          struct cl_io *io, int nonblock)
@@ -301,7 +294,7 @@ static void echo_page_fini(const struct lu_env *env,
        ENTRY;
 
        cfs_atomic_dec(&eco->eo_npages);
-       page_cache_release(cl2echo_page(slice)->ep_vmpage);
+       page_cache_release(slice->cpl_page->cp_vmpage);
        EXIT;
 }
 
@@ -316,18 +309,18 @@ static int echo_page_print(const struct lu_env *env,
                            const struct cl_page_slice *slice,
                            void *cookie, lu_printer_t printer)
 {
-        struct echo_page *ep = cl2echo_page(slice);
+       struct echo_page *ep = cl2echo_page(slice);
 
-        (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
-                  ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
-        return 0;
+       (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
+                  ep, mutex_is_locked(&ep->ep_lock),
+                  slice->cpl_page->cp_vmpage);
+       return 0;
 }
 
 static const struct cl_page_operations echo_page_ops = {
         .cpo_own           = echo_page_own,
         .cpo_disown        = echo_page_disown,
         .cpo_discard       = echo_page_discard,
-        .cpo_vmpage        = echo_page_vmpage,
         .cpo_fini          = echo_page_fini,
         .cpo_print         = echo_page_print,
         .cpo_is_vmlocked   = echo_page_is_vmlocked,
@@ -390,18 +383,17 @@ static struct cl_lock_operations echo_lock_ops = {
  * @{
  */
 static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
-                       struct cl_page *page, struct page *vmpage)
+                         struct cl_page *page, pgoff_t index)
 {
-        struct echo_page *ep = cl_object_page_slice(obj, page);
+       struct echo_page *ep = cl_object_page_slice(obj, page);
        struct echo_object *eco = cl2echo_obj(obj);
-        ENTRY;
+       ENTRY;
 
-       ep->ep_vmpage = vmpage;
-       page_cache_get(vmpage);
+       page_cache_get(page->cp_vmpage);
        mutex_init(&ep->ep_lock);
        cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
        cfs_atomic_inc(&eco->eo_npages);
-        RETURN(0);
+       RETURN(0);
 }
 
 static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
@@ -602,6 +594,8 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env,
 
                 obj = &echo_obj2cl(eco)->co_lu;
                 cl_object_header_init(hdr);
+               hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
+
                 lu_object_init(obj, &hdr->coh_lu, dev);
                 lu_object_add_top(&hdr->coh_lu, obj);
 
index 5c25dfd..e6b111b 100644 (file)
@@ -241,7 +241,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
 
        page_count = 0;
        cfs_list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
-               pgoff_t index = oap2cl_page(oap)->cp_index;
+               pgoff_t index = osc_index(oap2osc(oap));
                ++page_count;
                if (index > ext->oe_end || index < ext->oe_start)
                        GOTO(out, rc = 110);
@@ -947,18 +947,18 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
        /* discard all pages with index greater then trunc_index */
        cfs_list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
                                     oap_pending_item) {
-               struct cl_page  *sub  = oap2cl_page(oap);
-               struct cl_page  *page = cl_page_top(sub);
+               pgoff_t index = osc_index(oap2osc(oap));
+               struct cl_page  *page = oap2cl_page(oap);
 
                LASSERT(cfs_list_empty(&oap->oap_rpc_item));
 
                /* only discard the pages with their index greater than
                 * trunc_index, and ... */
-               if (sub->cp_index < trunc_index ||
-                   (sub->cp_index == trunc_index && partial)) {
+               if (index < trunc_index ||
+                   (index == trunc_index && partial)) {
                        /* accounting how many pages remaining in the chunk
                         * so that we can calculate grants correctly. */
-                       if (sub->cp_index >> ppc_bits == trunc_chunk)
+                       if (index >> ppc_bits == trunc_chunk)
                                ++pages_in_chunk;
                        continue;
                }
@@ -1208,7 +1208,7 @@ static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
                          int cmd)
 {
        struct osc_page *opg  = oap2osc_page(oap);
-       struct cl_page  *page = cl_page_top(oap2cl_page(oap));
+       struct cl_page  *page = oap2cl_page(oap);
        int result;
 
        LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
@@ -1224,7 +1224,7 @@ static int osc_refresh_count(const struct lu_env *env,
                             struct osc_async_page *oap, int cmd)
 {
        struct osc_page  *opg = oap2osc_page(oap);
-       struct cl_page   *page = oap2cl_page(oap);
+       pgoff_t index = osc_index(oap2osc(oap));
        struct cl_object *obj;
        struct cl_attr   *attr = &osc_env_info(env)->oti_attr;
 
@@ -1242,10 +1242,10 @@ static int osc_refresh_count(const struct lu_env *env,
        if (result < 0)
                return result;
        kms = attr->cat_kms;
-       if (cl_offset(obj, page->cp_index) >= kms)
+       if (cl_offset(obj, index) >= kms)
                /* catch race with truncate */
                return 0;
-       else if (cl_offset(obj, page->cp_index + 1) > kms)
+       else if (cl_offset(obj, index + 1) > kms)
                /* catch sub-page write at end of file */
                return kms % PAGE_CACHE_SIZE;
        else
@@ -1256,7 +1256,7 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
                          int cmd, int rc)
 {
        struct osc_page   *opg  = oap2osc_page(oap);
-       struct cl_page    *page = cl_page_top(oap2cl_page(oap));
+       struct cl_page    *page = oap2cl_page(oap);
        struct osc_object *obj  = cl2osc(opg->ops_cl.cpl_obj);
        enum cl_req_type   crt;
        int srvlock;
@@ -2380,7 +2380,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
        OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
                     oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
 
-       index = oap2cl_page(oap)->cp_index;
+       index = osc_index(oap2osc(oap));
 
        /* Add this page into extent by the following steps:
         * 1. if there exists an active extent for this IO, mostly this page
@@ -2490,20 +2490,20 @@ int osc_teardown_async_page(const struct lu_env *env,
        LASSERT(oap->oap_magic == OAP_MAGIC);
 
        CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
-              oap, ops, oap2cl_page(oap)->cp_index);
+              oap, ops, osc_index(oap2osc(oap)));
 
        osc_object_lock(obj);
        if (!cfs_list_empty(&oap->oap_rpc_item)) {
                CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
                rc = -EBUSY;
        } else if (!cfs_list_empty(&oap->oap_pending_item)) {
-               ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
+               ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
                /* only truncated pages are allowed to be taken out.
                 * See osc_extent_truncate() and osc_cache_truncate_start()
                 * for details. */
                if (ext != NULL && ext->oe_state != OES_TRUNC) {
                        OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
-                                       oap2cl_page(oap)->cp_index);
+                                       osc_index(oap2osc(oap)));
                        rc = -EBUSY;
                }
        }
@@ -2526,7 +2526,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
        struct osc_extent *ext   = NULL;
        struct osc_object *obj   = cl2osc(ops->ops_cl.cpl_obj);
        struct cl_page    *cp    = ops->ops_cl.cpl_page;
-       pgoff_t            index = cp->cp_index;
+       pgoff_t            index = osc_index(ops);
        struct osc_async_page *oap = &ops->ops_oap;
        bool unplug = false;
        int rc = 0;
@@ -2542,8 +2542,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
        switch (ext->oe_state) {
        case OES_RPC:
        case OES_LOCK_DONE:
-               CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(cp),
-                             "flush an in-rpc page?\n");
+               CL_PAGE_DEBUG(D_ERROR, env, cp, "flush an in-rpc page?\n");
                LASSERT(0);
                break;
        case OES_LOCKING:
@@ -2566,7 +2565,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
                break;
        }
 
-       rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
+       rc = cl_page_prep(env, io, cp, CRT_WRITE);
        if (rc)
                GOTO(out, rc);
 
@@ -2611,7 +2610,7 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
        struct osc_extent     *ext;
        struct osc_extent     *found = NULL;
        cfs_list_t            *plist;
-       pgoff_t index = oap2cl_page(oap)->cp_index;
+       pgoff_t index = osc_index(ops);
        int     rc = -EBUSY;
        int     cmd;
        ENTRY;
@@ -2674,11 +2673,11 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
        ENTRY;
 
        cfs_list_for_each_entry(oap, list, oap_pending_item) {
-               struct cl_page *cp = oap2cl_page(oap);
-               if (cp->cp_index > end)
-                       end = cp->cp_index;
-               if (cp->cp_index < start)
-                       start = cp->cp_index;
+               pgoff_t index = osc_index(oap2osc(oap));
+               if (index > end)
+                       end = index;
+               if (index < start)
+                       start = index;
                ++page_count;
                mppr <<= (page_count > mppr);
        }
@@ -3089,7 +3088,7 @@ int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
                                break;
                        }
 
-                       page = cl_page_top(ops->ops_cl.cpl_page);
+                       page = ops->ops_cl.cpl_page;
                        LASSERT(page->cp_type == CPT_CACHEABLE);
                        if (page->cp_state == CPS_FREEING)
                                continue;
@@ -3117,7 +3116,7 @@ int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
                        if (res == CLP_GANG_OKAY)
                                res = (*cb)(env, io, ops, cbdata);
 
-                       page = cl_page_top(ops->ops_cl.cpl_page);
+                       page = ops->ops_cl.cpl_page;
                        lu_ref_del(&page->cp_reference, "gang_lookup", current);
                        cl_page_put(env, page);
                }
@@ -3150,7 +3149,7 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
        index = osc_index(ops);
        if (index >= info->oti_fn_index) {
                struct cl_lock *tmp;
-               struct cl_page *page = cl_page_top(ops->ops_cl.cpl_page);
+               struct cl_page *page = ops->ops_cl.cpl_page;
 
                /* refresh non-overlapped index */
                tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
@@ -3182,7 +3181,7 @@ static int discard_cb(const struct lu_env *env, struct cl_io *io,
 {
        struct osc_thread_info *info = osc_env_info(env);
        struct cl_lock *lock = cbdata;
-       struct cl_page *page = cl_page_top(ops->ops_cl.cpl_page);
+       struct cl_page *page = ops->ops_cl.cpl_page;
 
        LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
 
@@ -3190,7 +3189,7 @@ static int discard_cb(const struct lu_env *env, struct cl_io *io,
        info->oti_next_index = osc_index(ops) + 1;
        if (cl_page_own(env, io, page) == 0) {
                KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
-                             !PageDirty(cl_page_vmpage(env, page))));
+                             !PageDirty(cl_page_vmpage(page))));
 
                /* discard the page */
                cl_page_discard(env, io, page);
index 406cafe..b8d0e3c 100644 (file)
@@ -418,7 +418,7 @@ struct lu_object *osc_object_alloc(const struct lu_env *env,
                                    const struct lu_object_header *hdr,
                                    struct lu_device *dev);
 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
-                 struct cl_page *page, struct page *vmpage);
+                 struct cl_page *page, pgoff_t ind);
 
 void osc_index2policy  (ldlm_policy_data_t *policy, const struct cl_object *obj,
                         pgoff_t start, pgoff_t end);
@@ -555,6 +555,11 @@ static inline struct osc_page *oap2osc(struct osc_async_page *oap)
        return container_of0(oap, struct osc_page, ops_oap);
 }
 
+static inline pgoff_t osc_index(struct osc_page *opg)
+{
+       return opg->ops_cl.cpl_index;
+}
+
 static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
 {
        return oap2osc(oap)->ops_cl.cpl_page;
@@ -565,11 +570,6 @@ static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
        return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
 }
 
-static inline pgoff_t osc_index(struct osc_page *opg)
-{
-       return opg->ops_cl.cpl_page->cp_index;
-}
-
 static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
 {
         LINVRNT(osc_is_object(&slice->cls_obj->co_lu));
index a9bad93..3ded607 100644 (file)
@@ -67,14 +67,18 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
         return oio;
 }
 
-static struct osc_page *osc_cl_page_osc(struct cl_page *page)
+static struct osc_page *osc_cl_page_osc(struct cl_page *page,
+                                       struct osc_object *osc)
 {
-        const struct cl_page_slice *slice;
+       const struct cl_page_slice *slice;
 
-        slice = cl_page_at(page, &osc_device_type);
-        LASSERT(slice != NULL);
+       if (osc != NULL)
+               slice = cl_object_page_slice(&osc->oo_cl, page);
+       else
+               slice = cl_page_at(page, &osc_device_type);
+       LASSERT(slice != NULL);
 
-        return cl2osc_page(slice);
+       return cl2osc_page(slice);
 }
 
 
@@ -137,8 +141,8 @@ static int osc_io_submit(const struct lu_env *env,
                 io = page->cp_owner;
                 LASSERT(io != NULL);
 
-                opg = osc_cl_page_osc(page);
-                oap = &opg->ops_oap;
+               opg = osc_cl_page_osc(page, osc);
+               oap = &opg->ops_oap;
                LASSERT(osc == oap->oap_obj);
 
                if (!cfs_list_empty(&oap->oap_pending_item) ||
@@ -261,15 +265,11 @@ static int osc_io_commit_async(const struct lu_env *env,
                }
        }
 
-       /*
-        * NOTE: here @page is a top-level page. This is done to avoid
-        * creation of sub-page-list.
-        */
        while (qin->pl_nr > 0) {
                struct osc_async_page *oap;
 
                page = cl_page_list_first(qin);
-               opg = osc_cl_page_osc(page);
+               opg = osc_cl_page_osc(page, osc);
                oap = &opg->ops_oap;
 
                if (!cfs_list_empty(&oap->oap_rpc_item)) {
@@ -286,8 +286,7 @@ static int osc_io_commit_async(const struct lu_env *env,
                                break;
                }
 
-               osc_page_touch_at(env, osc2cl(osc),
-                                 opg->ops_cl.cpl_page->cp_index,
+               osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
                                  page == last_page ? to : PAGE_SIZE);
 
                cl_page_list_del(env, qin, page);
@@ -409,13 +408,9 @@ static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
                                start, current->comm);
 
 #ifdef __linux__
-       {
-               struct page *vmpage = cl_page_vmpage(env, page);
-               if (PageLocked(vmpage))
-                       CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
-                              ops, page->cp_index,
-                              (oap->oap_cmd & OBD_BRW_RWMASK));
-       }
+       if (PageLocked(page->cp_vmpage))
+               CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
+                      ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
 #endif
 
        return CLP_GANG_OKAY;
@@ -814,18 +809,21 @@ static void osc_req_attr_set(const struct lu_env *env,
                oa->o_valid |= OBD_MD_FLID;
        }
        if (flags & OBD_MD_FLHANDLE) {
-                clerq = slice->crs_req;
-                LASSERT(!cfs_list_empty(&clerq->crq_pages));
-                apage = container_of(clerq->crq_pages.next,
-                                     struct cl_page, cp_flight);
-                opg = osc_cl_page_osc(apage);
-                apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
-                lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
-                if (lock == NULL) {
-                        struct cl_object_header *head;
-                        struct cl_lock          *scan;
-
-                        head = cl_object_header(apage->cp_obj);
+               struct cl_object *subobj;
+
+               clerq = slice->crs_req;
+               LASSERT(!cfs_list_empty(&clerq->crq_pages));
+               apage = container_of(clerq->crq_pages.next,
+                                    struct cl_page, cp_flight);
+               opg = osc_cl_page_osc(apage, NULL);
+               subobj = opg->ops_cl.cpl_obj;
+               lock = cl_lock_at_pgoff(env, subobj, osc_index(opg),
+                                       NULL, 1, 1);
+               if (lock == NULL) {
+                       struct cl_object_header *head;
+                       struct cl_lock          *scan;
+
+                       head = cl_object_header(subobj);
                         cfs_list_for_each_entry(scan, &head->coh_locks,
                                                 cll_linkage)
                                 CL_LOCK_DEBUG(D_ERROR, env, scan,
index a4dc541..97ad0d0 100644 (file)
@@ -82,15 +82,15 @@ static int osc_page_is_dlocked(const struct lu_env *env,
        page = opg->ops_cl.cpl_page;
        obj = cl2osc(opg->ops_cl.cpl_obj);
 
-        flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
-        if (pending)
-                flags |= LDLM_FL_CBPENDING;
-
-        dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
-        osc_lock_build_res(env, obj, resname);
-        osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
-        return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
-                              dlmmode, &flags, NULL, lockh, unref);
+       flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
+       if (pending)
+               flags |= LDLM_FL_CBPENDING;
+
+       dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
+       osc_lock_build_res(env, obj, resname);
+       osc_index2policy(policy, page->cp_obj, osc_index(opg), osc_index(opg));
+       return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
+                             dlmmode, &flags, NULL, lockh, unref);
 }
 
 /**
@@ -125,8 +125,8 @@ static int osc_page_protected(const struct lu_env *env,
                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
                 descr = &osc_env_info(env)->oti_descr;
                 descr->cld_mode = mode;
-                descr->cld_start = page->cp_index;
-                descr->cld_end   = page->cp_index;
+               descr->cld_start = osc_index(opg);
+               descr->cld_end   = osc_index(opg);
                spin_lock(&hdr->coh_lock_guard);
                 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
                         /*
@@ -163,25 +163,20 @@ static int osc_page_protected(const struct lu_env *env,
  * Page operations.
  *
  */
-static void osc_page_fini(const struct lu_env *env,
-                          struct cl_page_slice *slice)
-{
-}
-
 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
 {
-        struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+       struct cl_page *page = opg->ops_cl.cpl_page;
 
-        LASSERT(!opg->ops_transfer_pinned);
-        cl_page_get(page);
-        lu_ref_add_atomic(&page->cp_reference, label, page);
-        opg->ops_transfer_pinned = 1;
+       LASSERT(!opg->ops_transfer_pinned);
+       cl_page_get(page);
+       lu_ref_add_atomic(&page->cp_reference, label, page);
+       opg->ops_transfer_pinned = 1;
 }
 
 static void osc_page_transfer_put(const struct lu_env *env,
                                  struct osc_page *opg)
 {
-       struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+       struct cl_page *page = opg->ops_cl.cpl_page;
 
        if (opg->ops_transfer_pinned) {
                opg->ops_transfer_pinned = 0;
@@ -241,17 +236,18 @@ static int osc_page_is_under_lock(const struct lu_env *env,
                                   const struct cl_page_slice *slice,
                                   struct cl_io *unused)
 {
-        struct cl_lock *lock;
-        int             result = -ENODATA;
+       struct osc_page *opg = cl2osc_page(slice);
+       struct cl_lock *lock;
+       int             result = -ENODATA;
 
-        ENTRY;
-        lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
-                               NULL, 1, 0);
-        if (lock != NULL) {
+       ENTRY;
+       lock = cl_lock_at_pgoff(env, slice->cpl_obj, osc_index(opg),
+                       NULL, 1, 0);
+       if (lock != NULL) {
                cl_lock_put(env, lock);
                result = -EBUSY;
        }
-        RETURN(result);
+       RETURN(result);
 }
 
 static const char *osc_list(cfs_list_t *head)
@@ -276,13 +272,13 @@ static int osc_page_print(const struct lu_env *env,
         struct osc_object     *obj = cl2osc(slice->cpl_obj);
         struct client_obd     *cli = &osc_export(obj)->exp_obd->u.cli;
 
-        return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
+       return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p %lu: "
                          "1< %#x %d %u %s %s > "
                          "2< "LPU64" %u %u %#x %#x | %p %p %p > "
                          "3< %s %p %d %lu %d > "
                          "4< %d %d %d %lu %s | %s %s %s %s > "
                          "5< %s %s %s %s | %d %s | %d %s %s>\n",
-                          opg,
+                         opg, osc_index(opg),
                           /* 1 */
                           oap->oap_magic, oap->oap_cmd,
                           oap->oap_interrupted,
@@ -330,11 +326,11 @@ static void osc_page_delete(const struct lu_env *env,
         CDEBUG(D_TRACE, "%p\n", opg);
         osc_page_transfer_put(env, opg);
        rc = osc_teardown_async_page(env, obj, opg);
-        if (rc) {
-                CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
-                              "Trying to teardown failed: %d\n", rc);
-                LASSERT(0);
-        }
+       if (rc) {
+               CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
+                             "Trying to teardown failed: %d\n", rc);
+               LASSERT(0);
+       }
 
        spin_lock(&obj->oo_seatbelt);
        if (opg->ops_submitter != NULL) {
@@ -405,7 +401,6 @@ static int osc_page_flush(const struct lu_env *env,
 }
 
 static const struct cl_page_operations osc_page_ops = {
-       .cpo_fini          = osc_page_fini,
        .cpo_print         = osc_page_print,
        .cpo_delete        = osc_page_delete,
        .cpo_is_under_lock = osc_page_is_under_lock,
@@ -415,7 +410,7 @@ static const struct cl_page_operations osc_page_ops = {
 };
 
 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
-                 struct cl_page *page, struct page *vmpage)
+                 struct cl_page *page, pgoff_t index)
 {
        struct osc_object *osc = cl2osc(obj);
        struct osc_page   *opg = cl_object_page_slice(obj, page);
@@ -423,9 +418,10 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
 
        opg->ops_from = 0;
        opg->ops_to   = PAGE_CACHE_SIZE;
+       opg->ops_cl.cpl_index = index;
 
-       result = osc_prep_async_page(osc, opg, vmpage,
-                                       cl_offset(obj, page->cp_index));
+       result = osc_prep_async_page(osc, opg, page->cp_vmpage,
+                                    cl_offset(obj, index));
        if (result == 0) {
                struct osc_io *oio = osc_env_io(env);
                opg->ops_srvlock = osc_io_srvlock(oio);
@@ -449,8 +445,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
                result = osc_lru_reserve(env, osc, opg);
                if (result == 0) {
                        spin_lock(&osc->oo_tree_lock);
-                       result = radix_tree_insert(&osc->oo_tree,
-                                                  page->cp_index, opg);
+                       result = radix_tree_insert(&osc->oo_tree, index, opg);
                        if (result == 0)
                                ++osc->oo_npages;
                        spin_unlock(&osc->oo_tree_lock);
@@ -720,7 +715,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
 
                opg = cfs_list_entry(cli->cl_lru_list.next, struct osc_page,
                                     ops_lru);
-               page = cl_page_top(opg->ops_cl.cpl_page);
+               page = opg->ops_cl.cpl_page;
                if (cl_page_in_use_noref(page)) {
                        cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
                        continue;