Whamcloud - gitweb
LU-3963 obdclass: convert to linux list api
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
index 926035e..6e70d46 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -28,6 +26,8 @@
 /*
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  * Client Lustre Page.
  *
  *   Author: Nikita Danilov <nikita.danilov@sun.com>
+ *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
  */
 
 #define DEBUG_SUBSYSTEM S_CLASS
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 
 #include <libcfs/libcfs.h>
 #include <obd_class.h>
 #include <cl_object.h>
 #include "cl_internal.h"
 
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
-                            int radix);
-
-static cfs_mem_cache_t      *cl_page_kmem = NULL;
-
-static struct lu_kmem_descr cl_page_caches[] = {
-        {
-                .ckd_cache = &cl_page_kmem,
-                .ckd_name  = "cl_page_kmem",
-                .ckd_size  = sizeof (struct cl_page)
-        },
-        {
-                .ckd_cache = NULL
-        }
-};
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
 
 #ifdef LIBCFS_DEBUG
 # define PASSERT(env, page, expr)                                       \
@@ -80,7 +64,7 @@ static struct lu_kmem_descr cl_page_caches[] = {
         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
 #endif /* !LIBCFS_DEBUG */
 
-#ifdef INVARIANT_CHECK
+#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
 # define PINVRNT(env, page, expr)                                       \
   do {                                                                    \
           if (unlikely(!(expr))) {                                      \
@@ -88,40 +72,42 @@ static struct lu_kmem_descr cl_page_caches[] = {
                   LINVRNT(0);                                           \
           }                                                             \
   } while (0)
-#else /* !INVARIANT_CHECK */
+#else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
 # define PINVRNT(env, page, exp) \
-        ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
-#endif /* !INVARIANT_CHECK */
-
-/**
- * Internal version of cl_page_top, it should be called with page referenced,
- * or coh_page_guard held.
- */
-static struct cl_page *cl_page_top_trusted(struct cl_page *page)
-{
-        while (page->cp_parent != NULL)
-                page = page->cp_parent;
-        return page;
-}
+        ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
+#endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
+
+/* Disable page statistic by default due to huge performance penalty. */
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+#define CS_PAGE_INC(o, item) \
+       atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+#define CS_PAGE_DEC(o, item) \
+       atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+#define CS_PAGESTATE_INC(o, state) \
+       atomic_inc(&cl_object_site(o)->cs_pages_state[state])
+#define CS_PAGESTATE_DEC(o, state) \
+       atomic_dec(&cl_object_site(o)->cs_pages_state[state])
+#else
+#define CS_PAGE_INC(o, item)
+#define CS_PAGE_DEC(o, item)
+#define CS_PAGESTATE_INC(o, state)
+#define CS_PAGESTATE_DEC(o, state)
+#endif
 
 /**
  * Internal version of cl_page_get().
  *
  * This function can be used to obtain initial reference to previously
  * unreferenced cached object. It can be called only if concurrent page
- * reclamation is somehow prevented, e.g., by locking page radix-tree
- * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
+ * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
  * associated with \a page.
  *
  * Use with care! Not exported.
  */
 static void cl_page_get_trust(struct cl_page *page)
 {
-        /*
-         * Checkless version for trusted users.
-         */
-        if (cfs_atomic_inc_return(&page->cp_ref) == 1)
-                cfs_atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
+       LASSERT(atomic_read(&page->cp_ref) > 0);
+       atomic_inc(&page->cp_ref);
 }
 
 /**
@@ -134,182 +120,43 @@ static const struct cl_page_slice *
 cl_page_at_trusted(const struct cl_page *page,
                    const struct lu_device_type *dtype)
 {
-        const struct cl_page_slice *slice;
-
-#ifdef INVARIANT_CHECK
-        struct cl_object_header *ch = cl_object_header(page->cp_obj);
-
-        if (!cfs_atomic_read(&page->cp_ref))
-                LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
-#endif
-        ENTRY;
-
-        page = cl_page_top_trusted((struct cl_page *)page);
-        do {
-                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
-                        if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
-                                RETURN(slice);
-                }
-                page = page->cp_child;
-        } while (page != NULL);
-        RETURN(NULL);
+       const struct cl_page_slice *slice;
+       ENTRY;
+
+       list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+               if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
+                       RETURN(slice);
+       }
+       RETURN(NULL);
 }
 
-/**
- * Returns a page with given index in the given object, or NULL if no page is
- * found. Acquires a reference on \a page.
- *
- * Locking: called under cl_object_header::coh_page_guard spin-lock.
- */
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
-{
-        struct cl_page *page;
-
-        LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
-
-        page = radix_tree_lookup(&hdr->coh_tree, index);
-        if (page != NULL) {
-                cl_page_get_trust(page);
-        }
-        return page;
-}
-EXPORT_SYMBOL(cl_page_lookup);
-
-/**
- * Returns a list of pages by a given [start, end] of \a obj.
- *
- * \param resched If not NULL, then we give up before hogging CPU for too
- * long and set *resched = 1, in that case caller should implement a retry
- * logic.
- *
- * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
- * crucial in the face of [offset, EOF] locks.
- */
-void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
-                         struct cl_io *io, pgoff_t start, pgoff_t end,
-                         struct cl_page_list *queue, int nonblock,
-                         int *resched)
-{
-        struct cl_object_header *hdr;
-        struct cl_page          *page;
-        struct cl_page         **pvec;
-        const struct cl_page_slice  *slice;
-        const struct lu_device_type *dtype;
-        pgoff_t                  idx;
-        unsigned int             nr;
-        unsigned int             i;
-        unsigned int             j;
-        int                    (*page_own)(const struct lu_env *env,
-                                           struct cl_io *io,
-                                           struct cl_page *pg);
-        ENTRY;
-
-        if (resched != NULL)
-                *resched = 0;
-        page_own = nonblock ? cl_page_own_try : cl_page_own;
-
-        idx = start;
-        hdr = cl_object_header(obj);
-        pvec = cl_env_info(env)->clt_pvec;
-        dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
-        cfs_spin_lock(&hdr->coh_page_guard);
-        while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
-                                            idx, CLT_PVEC_SIZE)) > 0) {
-                idx = pvec[nr - 1]->cp_index + 1;
-                for (i = 0, j = 0; i < nr; ++i) {
-                        page = pvec[i];
-                        pvec[i] = NULL;
-                        if (page->cp_index > end)
-                                break;
-                        if (page->cp_state == CPS_FREEING)
-                                continue;
-                        if (page->cp_type == CPT_TRANSIENT) {
-                                /* God, we found a transient page!*/
-                                continue;
-                        }
-
-                        slice = cl_page_at_trusted(page, dtype);
-                        /*
-                         * Pages for lsm-less file has no underneath sub-page
-                         * for osc, in case of ...
-                         */
-                        PASSERT(env, page, slice != NULL);
-
-                        page = slice->cpl_page;
-                        /*
-                         * Can safely call cl_page_get_trust() under
-                         * radix-tree spin-lock.
-                         *
-                         * XXX not true, because @page is from object another
-                         * than @hdr and protected by different tree lock.
-                         */
-                        cl_page_get_trust(page);
-                        lu_ref_add_atomic(&page->cp_reference,
-                                          "page_list", cfs_current());
-                        pvec[j++] = page;
-                }
-
-                /*
-                 * Here a delicate locking dance is performed. Current thread
-                 * holds a reference to a page, but has to own it before it
-                 * can be placed into queue. Owning implies waiting, so
-                 * radix-tree lock is to be released. After a wait one has to
-                 * check that pages weren't truncated (cl_page_own() returns
-                 * error in the latter case).
-                 */
-                cfs_spin_unlock(&hdr->coh_page_guard);
-                for (i = 0; i < j; ++i) {
-                        page = pvec[i];
-                        if (page_own(env, io, page) == 0)
-                                cl_page_list_add(queue, page);
-                        lu_ref_del(&page->cp_reference,
-                                   "page_list", cfs_current());
-                        cl_page_put(env, page);
-                }
-                cfs_spin_lock(&hdr->coh_page_guard);
-                if (nr < CLT_PVEC_SIZE)
-                        break;
-                if (resched != NULL && cfs_need_resched()) {
-                        *resched = 1;
-                        break;
-                }
-        }
-        cfs_spin_unlock(&hdr->coh_page_guard);
-        EXIT;
-}
-EXPORT_SYMBOL(cl_page_gang_lookup);
-
 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_object *obj  = page->cp_obj;
-        struct cl_site   *site = cl_object_site(obj);
-
-        PASSERT(env, page, cfs_list_empty(&page->cp_batch));
-        PASSERT(env, page, page->cp_owner == NULL);
-        PASSERT(env, page, page->cp_req == NULL);
-        PASSERT(env, page, page->cp_parent == NULL);
-        PASSERT(env, page, page->cp_state == CPS_FREEING);
-
-        ENTRY;
-        cfs_might_sleep();
-        while (!cfs_list_empty(&page->cp_layers)) {
-                struct cl_page_slice *slice;
-
-                slice = cfs_list_entry(page->cp_layers.next,
-                                       struct cl_page_slice, cpl_linkage);
-                cfs_list_del_init(page->cp_layers.next);
-                slice->cpl_ops->cpo_fini(env, slice);
-        }
-        cfs_atomic_dec(&site->cs_pages.cs_total);
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
-        cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
-#endif
-        lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
-        cl_object_put(env, obj);
-        lu_ref_fini(&page->cp_reference);
-        OBD_SLAB_FREE_PTR(page, cl_page_kmem);
-        EXIT;
+       struct cl_object *obj  = page->cp_obj;
+       int pagesize = cl_object_header(obj)->coh_page_bufsize;
+
+       PASSERT(env, page, list_empty(&page->cp_batch));
+       PASSERT(env, page, page->cp_owner == NULL);
+       PASSERT(env, page, page->cp_req == NULL);
+       PASSERT(env, page, page->cp_state == CPS_FREEING);
+
+       ENTRY;
+       while (!list_empty(&page->cp_layers)) {
+               struct cl_page_slice *slice;
+
+               slice = list_entry(page->cp_layers.next,
+                                  struct cl_page_slice, cpl_linkage);
+               list_del_init(page->cp_layers.next);
+               if (unlikely(slice->cpl_ops->cpo_fini != NULL))
+                       slice->cpl_ops->cpo_fini(env, slice);
+       }
+       CS_PAGE_DEC(obj, total);
+       CS_PAGESTATE_DEC(obj, page->cp_state);
+       lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
+       cl_object_put(env, obj);
+       lu_ref_fini(&page->cp_reference);
+       OBD_FREE(page, pagesize);
+       EXIT;
 }
 
 /**
@@ -323,63 +170,55 @@ static inline void cl_page_state_set_trust(struct cl_page *page,
         *(enum cl_page_state *)&page->cp_state = state;
 }
 
-static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
-                         pgoff_t ind, struct page *vmpage,
-                         enum cl_page_type type, struct cl_page **out)
+struct cl_page *cl_page_alloc(const struct lu_env *env,
+               struct cl_object *o, pgoff_t ind, struct page *vmpage,
+               enum cl_page_type type)
 {
-        struct cl_page          *page;
-        struct cl_page          *err  = NULL;
-        struct lu_object_header *head;
-        struct cl_site          *site = cl_object_site(o);
-        int                      result;
-
-        ENTRY;
-        result = +1;
-        OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
-        if (page != NULL) {
-                cfs_atomic_set(&page->cp_ref, 1);
-                page->cp_obj = o;
-                cl_object_get(o);
-                page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
-                                                     "cl_page", page);
-                page->cp_index = ind;
-                cl_page_state_set_trust(page, CPS_CACHED);
-                page->cp_type = type;
-                CFS_INIT_LIST_HEAD(&page->cp_layers);
-                CFS_INIT_LIST_HEAD(&page->cp_batch);
-                CFS_INIT_LIST_HEAD(&page->cp_flight);
-                cfs_mutex_init(&page->cp_mutex);
-                lu_ref_init(&page->cp_reference);
-                head = o->co_lu.lo_header;
-                cfs_list_for_each_entry(o, &head->loh_layers,
-                                        co_lu.lo_linkage) {
-                        if (o->co_ops->coo_page_init != NULL) {
-                                err = o->co_ops->coo_page_init(env, o,
-                                                               page, vmpage);
-                                if (err != NULL) {
-                                        cl_page_state_set_trust(page,
-                                                                CPS_FREEING);
-                                        cl_page_free(env, page);
-                                        page = err;
-                                        break;
-                                }
-                        }
-                }
-                if (err == NULL) {
-                        cfs_atomic_inc(&site->cs_pages.cs_busy);
-                        cfs_atomic_inc(&site->cs_pages.cs_total);
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
-                        cfs_atomic_inc(&site->cs_pages_state[CPS_CACHED]);
-#endif
-                        cfs_atomic_inc(&site->cs_pages.cs_created);
-                        result = 0;
-                }
-        } else
-                page = ERR_PTR(-ENOMEM);
-        *out = page;
-        RETURN(result);
+       struct cl_page          *page;
+       struct lu_object_header *head;
+
+       ENTRY;
+       OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
+                       GFP_NOFS);
+       if (page != NULL) {
+               int result = 0;
+               atomic_set(&page->cp_ref, 1);
+               page->cp_obj = o;
+               cl_object_get(o);
+               lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
+                                    page);
+               page->cp_vmpage = vmpage;
+               cl_page_state_set_trust(page, CPS_CACHED);
+               page->cp_type = type;
+               INIT_LIST_HEAD(&page->cp_layers);
+               INIT_LIST_HEAD(&page->cp_batch);
+               INIT_LIST_HEAD(&page->cp_flight);
+               lu_ref_init(&page->cp_reference);
+               head = o->co_lu.lo_header;
+               list_for_each_entry(o, &head->loh_layers,
+                                   co_lu.lo_linkage) {
+                       if (o->co_ops->coo_page_init != NULL) {
+                               result = o->co_ops->coo_page_init(env, o, page,
+                                                                 ind);
+                               if (result != 0) {
+                                       cl_page_delete0(env, page);
+                                       cl_page_free(env, page);
+                                       page = ERR_PTR(result);
+                                       break;
+                               }
+                       }
+               }
+               if (result == 0) {
+                       CS_PAGE_INC(o, total);
+                       CS_PAGE_INC(o, create);
+                       CS_PAGESTATE_DEC(o, CPS_CACHED);
+               }
+       } else {
+               page = ERR_PTR(-ENOMEM);
+       }
+       RETURN(page);
 }
+EXPORT_SYMBOL(cl_page_alloc);
 
 /**
  * Returns a cl_page with index \a idx at the object \a o, and associated with
@@ -392,30 +231,29 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
  *
  * \see cl_object_find(), cl_lock_find()
  */
-static struct cl_page *cl_page_find0(const struct lu_env *env,
-                                     struct cl_object *o,
-                                     pgoff_t idx, struct page *vmpage,
-                                     enum cl_page_type type,
-                                     struct cl_page *parent)
+struct cl_page *cl_page_find(const struct lu_env *env,
+                            struct cl_object *o,
+                            pgoff_t idx, struct page *vmpage,
+                            enum cl_page_type type)
 {
-        struct cl_page          *page;
-        struct cl_page          *ghost = NULL;
-        struct cl_object_header *hdr;
-        struct cl_site          *site = cl_object_site(o);
-        int err;
+       struct cl_page          *page = NULL;
+       struct cl_object_header *hdr;
 
-        LINVRNT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
-        cfs_might_sleep();
+       LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
+       might_sleep();
 
-        ENTRY;
+       ENTRY;
 
-        hdr = cl_object_header(o);
-        cfs_atomic_inc(&site->cs_pages.cs_lookup);
+       hdr = cl_object_header(o);
+       CS_PAGE_INC(o, lookup);
 
         CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
         /* fast path. */
         if (type == CPT_CACHEABLE) {
+               /* vmpage lock is used to protect the child/parent
+                * relationship */
+               KLASSERT(PageLocked(vmpage));
                 /*
                  * cl_vmpage_page() can be called here without any locks as
                  *
@@ -426,146 +264,32 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
                  *       reference on it.
                  */
                 page = cl_vmpage_page(vmpage, o);
-                PINVRNT(env, page,
-                        ergo(page != NULL,
-                             cl_page_vmpage(env, page) == vmpage &&
-                             (void *)radix_tree_lookup(&hdr->coh_tree,
-                                                       idx) == page));
-        } else {
-                cfs_spin_lock(&hdr->coh_page_guard);
-                page = cl_page_lookup(hdr, idx);
-                cfs_spin_unlock(&hdr->coh_page_guard);
-        }
-        if (page != NULL) {
-                cfs_atomic_inc(&site->cs_pages.cs_hit);
-                RETURN(page);
+               if (page != NULL) {
+                       CS_PAGE_INC(o, hit);
+                       RETURN(page);
+               }
         }
 
         /* allocate and initialize cl_page */
-        err = cl_page_alloc(env, o, idx, vmpage, type, &page);
-        if (err != 0)
-                RETURN(page);
-        /*
-         * XXX optimization: use radix_tree_preload() here, and change tree
-         * gfp mask to GFP_KERNEL in cl_object_header_init().
-         */
-        cfs_spin_lock(&hdr->coh_page_guard);
-        err = radix_tree_insert(&hdr->coh_tree, idx, page);
-        if (err != 0) {
-                ghost = page;
-                /*
-                 * Noted by Jay: a lock on \a vmpage protects cl_page_find()
-                 * from this race, but
-                 *
-                 *     0. it's better to have cl_page interface "locally
-                 *     consistent" so that its correctness can be reasoned
-                 *     about without appealing to the (obscure world of) VM
-                 *     locking.
-                 *
-                 *     1. handling this race allows ->coh_tree to remain
-                 *     consistent even when VM locking is somehow busted,
-                 *     which is very useful during diagnosing and debugging.
-                 */
-                page = ERR_PTR(err);
-                if (err == -EEXIST) {
-                        /*
-                         * XXX in case of a lookup for CPT_TRANSIENT page,
-                         * nothing protects a CPT_CACHEABLE page from being
-                         * concurrently moved into CPS_FREEING state.
-                         */
-                        page = cl_page_lookup(hdr, idx);
-                        PASSERT(env, page, page != NULL);
-                        if (page->cp_type == CPT_TRANSIENT &&
-                            type == CPT_CACHEABLE) {
-                                /* XXX: We should make sure that inode sem
-                                 * keeps being held in the lifetime of
-                                 * transient pages, so it is impossible to
-                                 * have conflicting transient pages.
-                                 */
-                                cfs_spin_unlock(&hdr->coh_page_guard);
-                                cl_page_put(env, page);
-                                cfs_spin_lock(&hdr->coh_page_guard);
-                                page = ERR_PTR(-EBUSY);
-                        }
-                }
-        } else {
-                if (parent) {
-                        LASSERT(page->cp_parent == NULL);
-                        page->cp_parent = parent;
-                        parent->cp_child = page;
-                }
-                hdr->coh_pages++;
-        }
-        cfs_spin_unlock(&hdr->coh_page_guard);
-
-        if (unlikely(ghost != NULL)) {
-                cfs_atomic_dec(&site->cs_pages.cs_busy);
-                cl_page_delete0(env, ghost, 0);
-                cl_page_free(env, ghost);
-        }
-        RETURN(page);
-}
-
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
-                             pgoff_t idx, struct page *vmpage,
-                             enum cl_page_type type)
-{
-        return cl_page_find0(env, o, idx, vmpage, type, NULL);
+        page = cl_page_alloc(env, o, idx, vmpage, type);
+       RETURN(page);
 }
 EXPORT_SYMBOL(cl_page_find);
 
-
-struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
-                                 pgoff_t idx, struct page *vmpage,
-                                 struct cl_page *parent)
-{
-        return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
-}
-EXPORT_SYMBOL(cl_page_find_sub);
-
 static inline int cl_page_invariant(const struct cl_page *pg)
 {
-        struct cl_object_header *header;
-        struct cl_page          *parent;
-        struct cl_page          *child;
-        struct cl_io            *owner;
+       /*
+        * Page invariant is protected by a VM lock.
+        */
+       LINVRNT(cl_page_is_vmlocked(NULL, pg));
 
-        /*
-         * Page invariant is protected by a VM lock.
-         */
-        LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
-        header = cl_object_header(pg->cp_obj);
-        parent = pg->cp_parent;
-        child  = pg->cp_child;
-        owner  = pg->cp_owner;
-
-        return cfs_atomic_read(&pg->cp_ref) > 0 &&
-                ergo(parent != NULL, parent->cp_child == pg) &&
-                ergo(child != NULL, child->cp_parent == pg) &&
-                ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
-                ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
-                ergo(owner != NULL && parent != NULL,
-                     parent->cp_owner == pg->cp_owner->ci_parent) &&
-                ergo(owner != NULL && child != NULL,
-                     child->cp_owner->ci_parent == owner) &&
-                /*
-                 * Either page is early in initialization (has neither child
-                 * nor parent yet), or it is in the object radix tree.
-                 */
-                ergo(pg->cp_state < CPS_FREEING,
-                     (void *)radix_tree_lookup(&header->coh_tree,
-                                               pg->cp_index) == pg ||
-                     (child == NULL && parent == NULL));
+       return cl_page_in_use_noref(pg);
 }
 
 static void cl_page_state_set0(const struct lu_env *env,
                                struct cl_page *page, enum cl_page_state state)
 {
         enum cl_page_state old;
-#ifdef LUSTRE_PAGESTATE_TRACKING
-        struct cl_site *site = cl_object_site(page->cp_obj);
-#endif
 
         /*
          * Matrix of allowed state transitions [old][new], for sanity
@@ -613,24 +337,18 @@ static void cl_page_state_set0(const struct lu_env *env,
         old = page->cp_state;
         PASSERT(env, page, allowed_transitions[old][state]);
         CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
-        for (; page != NULL; page = page->cp_child) {
-                PASSERT(env, page, page->cp_state == old);
-                PASSERT(env, page,
-                        equi(state == CPS_OWNED, page->cp_owner != NULL));
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
-                cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
-                cfs_atomic_inc(&site->cs_pages_state[state]);
-#endif
-                cl_page_state_set_trust(page, state);
-        }
-        EXIT;
+       PASSERT(env, page, page->cp_state == old);
+       PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner != NULL));
+
+       CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
+       CS_PAGESTATE_INC(page->cp_obj, state);
+       cl_page_state_set_trust(page, state);
+       EXIT;
 }
 
 static void cl_page_state_set(const struct lu_env *env,
                               struct cl_page *page, enum cl_page_state state)
 {
-        PINVRNT(env, page, cl_page_invariant(page));
         cl_page_state_set0(env, page, state);
 }
 
@@ -644,10 +362,9 @@ static void cl_page_state_set(const struct lu_env *env,
  */
 void cl_page_get(struct cl_page *page)
 {
-        ENTRY;
-        LASSERT(page->cp_state != CPS_FREEING);
-        cl_page_get_trust(page);
-        EXIT;
+       ENTRY;
+       cl_page_get_trust(page);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_get);
 
@@ -662,118 +379,52 @@ EXPORT_SYMBOL(cl_page_get);
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_object_header *hdr;
-        struct cl_site *site = cl_object_site(page->cp_obj);
-
-        PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
-
         ENTRY;
         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
-                       cfs_atomic_read(&page->cp_ref));
-
-        hdr = cl_object_header(cl_object_top(page->cp_obj));
-        if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
-                cfs_atomic_dec(&site->cs_pages.cs_busy);
-                /* We're going to access the page w/o a reference, but it's
-                 * ok because we have grabbed the lock coh_page_guard, which
-                 * means nobody is able to free this page behind us.
-                 */
-                if (page->cp_state == CPS_FREEING) {
-                        /* We drop the page reference and check the page state
-                         * inside the coh_page_guard. So that if it gets here,
-                         * it is the REALLY last reference to this page.
-                         */
-                        cfs_spin_unlock(&hdr->coh_page_guard);
-
-                        LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
-                        PASSERT(env, page, page->cp_owner == NULL);
-                        PASSERT(env, page, cfs_list_empty(&page->cp_batch));
-                        /*
-                         * Page is no longer reachable by other threads. Tear
-                         * it down.
-                         */
-                        cl_page_free(env, page);
-
-                        EXIT;
-                        return;
-                }
-                cfs_spin_unlock(&hdr->coh_page_guard);
-        }
-
-        EXIT;
+                      atomic_read(&page->cp_ref));
+
+       if (atomic_dec_and_test(&page->cp_ref)) {
+               LASSERT(page->cp_state == CPS_FREEING);
+
+               LASSERT(atomic_read(&page->cp_ref) == 0);
+               PASSERT(env, page, page->cp_owner == NULL);
+               PASSERT(env, page, list_empty(&page->cp_batch));
+               /*
+                * Page is no longer reachable by other threads. Tear
+                * it down.
+                */
+               cl_page_free(env, page);
+       }
+
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_put);
 
 /**
- * Returns a VM page associated with a given cl_page.
- */
-cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
-{
-        const struct cl_page_slice *slice;
-
-        /*
-         * Find uppermost layer with ->cpo_vmpage() method, and return its
-         * result.
-         */
-        page = cl_page_top(page);
-        do {
-                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
-                        if (slice->cpl_ops->cpo_vmpage != NULL)
-                                RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
-                }
-                page = page->cp_child;
-        } while (page != NULL);
-        LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
-}
-EXPORT_SYMBOL(cl_page_vmpage);
-
-/**
  * Returns a cl_page associated with a VM page, and given cl_object.
  */
-struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
+struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
 {
-        struct cl_page *page;
-        struct cl_object_header *hdr;
-
-        ENTRY;
-        KLASSERT(PageLocked(vmpage));
-
-        /*
-         * NOTE: absence of races and liveness of data are guaranteed by page
-         *       lock on a "vmpage". That works because object destruction has
-         *       bottom-to-top pass.
-         */
-
-        /*
-         * This loop assumes that ->private points to the top-most page. This
-         * can be rectified easily.
-         */
-        hdr = cl_object_header(cl_object_top(obj));
-        cfs_spin_lock(&hdr->coh_page_guard);
-        for (page = (void *)vmpage->private;
-             page != NULL; page = page->cp_child) {
-                if (cl_object_same(page->cp_obj, obj)) {
-                        cl_page_get_trust(page);
-                        break;
-                }
-        }
-        cfs_spin_unlock(&hdr->coh_page_guard);
-        LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
-        RETURN(page);
+       struct cl_page *page;
+
+       ENTRY;
+       KLASSERT(PageLocked(vmpage));
+
+       /*
+        * NOTE: absence of races and liveness of data are guaranteed by page
+        *       lock on a "vmpage". That works because object destruction has
+        *       bottom-to-top pass.
+        */
+
+       page = (struct cl_page *)vmpage->private;
+       if (page != NULL) {
+               cl_page_get_trust(page);
+               LASSERT(page->cp_type == CPT_CACHEABLE);
+       }
+       RETURN(page);
 }
 EXPORT_SYMBOL(cl_vmpage_page);
 
-/**
- * Returns the top-page for a given page.
- *
- * \see cl_object_top(), cl_io_top()
- */
-struct cl_page *cl_page_top(struct cl_page *page)
-{
-        return cl_page_top_trusted(page);
-}
-EXPORT_SYMBOL(cl_page_top);
-
 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
                                        const struct lu_device_type *dtype)
 {
@@ -783,80 +434,83 @@ EXPORT_SYMBOL(cl_page_at);
 
 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
 
-#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                   \
-({                                                                      \
-        const struct lu_env        *__env  = (_env);                    \
-        struct cl_page             *__page = (_page);                   \
-        const struct cl_page_slice *__scan;                             \
-        int                         __result;                           \
-        ptrdiff_t                   __op   = (_op);                     \
-        int                       (*__method)_proto;                    \
-                                                                        \
-        __result = 0;                                                   \
-        __page = cl_page_top(__page);                                   \
-        do {                                                            \
-                cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
-                                        cpl_linkage) {                  \
-                        __method = *(void **)((char *)__scan->cpl_ops + \
-                                              __op);                    \
-                        if (__method != NULL) {                         \
-                                __result = (*__method)(__env, __scan,   \
-                                                       ## __VA_ARGS__); \
-                                if (__result != 0)                      \
-                                        break;                          \
-                        }                                               \
-                }                                                       \
-                __page = __page->cp_child;                              \
-        } while (__page != NULL && __result == 0);                      \
-        if (__result > 0)                                               \
-                __result = 0;                                           \
-        __result;                                                       \
+#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...)                  \
+({                                                                     \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
+       const struct cl_page_slice *__scan;                             \
+       int                         __result;                           \
+       ptrdiff_t                   __op   = (_op);                     \
+       int                        (*__method)_proto;                   \
+                                                                       \
+       __result = 0;                                                   \
+       list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {  \
+               __method = *(void **)((char *)__scan->cpl_ops +  __op); \
+               if (__method != NULL) {                                 \
+                       __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+                       if (__result != 0)                              \
+                               break;                                  \
+               }                                                       \
+       }                                                               \
+       if (__result > 0)                                               \
+               __result = 0;                                           \
+       __result;                                                       \
 })
 
-#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                   \
-do {                                                                    \
-        const struct lu_env        *__env  = (_env);                    \
-        struct cl_page             *__page = (_page);                   \
-        const struct cl_page_slice *__scan;                             \
-        ptrdiff_t                   __op   = (_op);                     \
-        void                      (*__method)_proto;                    \
-                                                                        \
-        __page = cl_page_top(__page);                                   \
-        do {                                                            \
-                cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
-                                        cpl_linkage) {                  \
-                        __method = *(void **)((char *)__scan->cpl_ops + \
-                                              __op);                    \
-                        if (__method != NULL)                           \
-                                (*__method)(__env, __scan,              \
-                                            ## __VA_ARGS__);            \
-                }                                                       \
-                __page = __page->cp_child;                              \
-        } while (__page != NULL);                                       \
+#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...)          \
+({                                                                     \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
+       const struct cl_page_slice *__scan;                             \
+       int                         __result;                           \
+       ptrdiff_t                   __op   = (_op);                     \
+       int                       (*__method)_proto;                    \
+                                                                       \
+       __result = 0;                                                   \
+       list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
+                                   cpl_linkage) {                      \
+               __method = *(void **)((char *)__scan->cpl_ops +  __op); \
+               if (__method != NULL) {                                 \
+                       __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+                       if (__result != 0)                              \
+                               break;                                  \
+               }                                                       \
+       }                                                               \
+       if (__result > 0)                                               \
+               __result = 0;                                           \
+       __result;                                                       \
+})
+
+#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)                  \
+do {                                                                   \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
+       const struct cl_page_slice *__scan;                             \
+       ptrdiff_t                   __op   = (_op);                     \
+       void                      (*__method)_proto;                    \
+                                                                       \
+       list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) {  \
+               __method = *(void **)((char *)__scan->cpl_ops +  __op); \
+               if (__method != NULL)                                   \
+                       (*__method)(__env, __scan, ## __VA_ARGS__);     \
+       }                                                               \
 } while (0)
 
-#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)               \
-do {                                                                        \
-        const struct lu_env        *__env  = (_env);                        \
-        struct cl_page             *__page = (_page);                       \
-        const struct cl_page_slice *__scan;                                 \
-        ptrdiff_t                   __op   = (_op);                         \
-        void                      (*__method)_proto;                        \
-                                                                            \
-        /* get to the bottom page. */                                       \
-        while (__page->cp_child != NULL)                                    \
-                __page = __page->cp_child;                                  \
-        do {                                                                \
-                cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
-                                                cpl_linkage) {              \
-                        __method = *(void **)((char *)__scan->cpl_ops +     \
-                                              __op);                        \
-                        if (__method != NULL)                               \
-                                (*__method)(__env, __scan,                  \
-                                            ## __VA_ARGS__);                \
-                }                                                           \
-                __page = __page->cp_parent;                                 \
-        } while (__page != NULL);                                           \
+#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)          \
+do {                                                                   \
+       const struct lu_env        *__env  = (_env);                    \
+       struct cl_page             *__page = (_page);                   \
+       const struct cl_page_slice *__scan;                             \
+       ptrdiff_t                   __op   = (_op);                     \
+       void                      (*__method)_proto;                    \
+                                                                       \
+       /* get to the bottom page. */                                   \
+       list_for_each_entry_reverse(__scan, &__page->cp_layers,         \
+                                   cpl_linkage) {                      \
+               __method = *(void **)((char *)__scan->cpl_ops + __op);  \
+               if (__method != NULL)                                   \
+                       (*__method)(__env, __scan, ## __VA_ARGS__);     \
+       }                                                               \
 } while (0)
 
 static int cl_page_invoke(const struct lu_env *env,
@@ -885,26 +539,21 @@ static void cl_page_invoid(const struct lu_env *env,
 
 static void cl_page_owner_clear(struct cl_page *page)
 {
-        ENTRY;
-        for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
-                if (page->cp_owner != NULL) {
-                        LASSERT(page->cp_owner->ci_owned_nr > 0);
-                        page->cp_owner->ci_owned_nr--;
-                        page->cp_owner = NULL;
-                        page->cp_task = NULL;
-                }
-        }
-        EXIT;
+       ENTRY;
+       if (page->cp_owner != NULL) {
+               LASSERT(page->cp_owner->ci_owned_nr > 0);
+               page->cp_owner->ci_owned_nr--;
+               page->cp_owner = NULL;
+       }
+       EXIT;
 }
 
 static void cl_page_owner_set(struct cl_page *page)
 {
-        ENTRY;
-        for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
-                LASSERT(page->cp_owner != NULL);
-                page->cp_owner->ci_owned_nr++;
-        }
-        EXIT;
+       ENTRY;
+       LASSERT(page->cp_owner != NULL);
+       page->cp_owner->ci_owned_nr++;
+       EXIT;
 }
 
 void cl_page_disown0(const struct lu_env *env,
@@ -915,7 +564,7 @@ void cl_page_disown0(const struct lu_env *env,
         ENTRY;
         state = pg->cp_state;
         PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
-        PINVRNT(env, pg, cl_page_invariant(pg));
+        PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
         cl_page_owner_clear(pg);
 
         if (state == CPS_OWNED)
@@ -937,9 +586,10 @@ void cl_page_disown0(const struct lu_env *env,
  */
 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
 {
-        LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
-        ENTRY;
-        RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
+       struct cl_io *top = cl_io_top((struct cl_io *)io);
+       LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
+       ENTRY;
+       RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
 }
 EXPORT_SYMBOL(cl_page_is_owned);
 
@@ -971,11 +621,10 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
         PINVRNT(env, pg, !cl_page_is_owned(pg, io));
 
         ENTRY;
-        pg = cl_page_top(pg);
         io = cl_io_top(io);
 
         if (pg->cp_state == CPS_FREEING) {
-                result = -EAGAIN;
+                result = -ENOENT;
         } else {
                 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
                                         (const struct lu_env *,
@@ -985,14 +634,13 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
                 if (result == 0) {
                         PASSERT(env, pg, pg->cp_owner == NULL);
                         PASSERT(env, pg, pg->cp_req == NULL);
-                        pg->cp_owner = io;
-                        pg->cp_task  = current;
+                       pg->cp_owner = cl_io_top(io);;
                         cl_page_owner_set(pg);
                         if (pg->cp_state != CPS_FREEING) {
                                 cl_page_state_set(env, pg, CPS_OWNED);
                         } else {
                                 cl_page_disown0(env, io, pg);
-                                result = -EAGAIN;
+                                result = -ENOENT;
                         }
                 }
         }
@@ -1037,21 +685,17 @@ EXPORT_SYMBOL(cl_page_own_try);
 void cl_page_assume(const struct lu_env *env,
                     struct cl_io *io, struct cl_page *pg)
 {
-        PASSERT(env, pg, pg->cp_state < CPS_OWNED);
-        PASSERT(env, pg, pg->cp_owner == NULL);
-        PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
-        PINVRNT(env, pg, cl_page_invariant(pg));
+       PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
 
-        ENTRY;
-        pg = cl_page_top(pg);
-        io = cl_io_top(io);
+       ENTRY;
+       io = cl_io_top(io);
 
-        cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
-        pg->cp_owner = io;
-        pg->cp_task = current;
-        cl_page_owner_set(pg);
-        cl_page_state_set(env, pg, CPS_OWNED);
-        EXIT;
+       cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
+       PASSERT(env, pg, pg->cp_owner == NULL);
+       pg->cp_owner = cl_io_top(io);
+       cl_page_owner_set(pg);
+       cl_page_state_set(env, pg, CPS_OWNED);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_assume);
 
@@ -1073,7 +717,6 @@ void cl_page_unassume(const struct lu_env *env,
         PINVRNT(env, pg, cl_page_invariant(pg));
 
         ENTRY;
-        pg = cl_page_top(pg);
         io = cl_io_top(io);
         cl_page_owner_clear(pg);
         cl_page_state_set(env, pg, CPS_CACHED);
@@ -1099,13 +742,13 @@ EXPORT_SYMBOL(cl_page_unassume);
 void cl_page_disown(const struct lu_env *env,
                     struct cl_io *io, struct cl_page *pg)
 {
-        PINVRNT(env, pg, cl_page_is_owned(pg, io));
+       PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
+               pg->cp_state == CPS_FREEING);
 
-        ENTRY;
-        pg = cl_page_top(pg);
-        io = cl_io_top(io);
-        cl_page_disown0(env, io, pg);
-        EXIT;
+       ENTRY;
+       io = cl_io_top(io);
+       cl_page_disown0(env, io, pg);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_disown);
 
@@ -1122,10 +765,10 @@ EXPORT_SYMBOL(cl_page_disown);
 void cl_page_discard(const struct lu_env *env,
                      struct cl_io *io, struct cl_page *pg)
 {
-        PINVRNT(env, pg, cl_page_is_owned(pg, io));
-        PINVRNT(env, pg, cl_page_invariant(pg));
+       PINVRNT(env, pg, cl_page_is_owned(pg, io));
+       PINVRNT(env, pg, cl_page_invariant(pg));
 
-        cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
+       cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
 }
 EXPORT_SYMBOL(cl_page_discard);
 
@@ -1134,13 +777,10 @@ EXPORT_SYMBOL(cl_page_discard);
  * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
  * path. Doesn't check page invariant.
  */
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
-                            int radix)
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
 {
-        struct cl_page *tmp = pg;
         ENTRY;
 
-        PASSERT(env, pg, pg == cl_page_top(pg));
         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
 
         /*
@@ -1148,38 +788,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
          */
         cl_page_owner_clear(pg);
 
-        /* 
-         * unexport the page firstly before freeing it so that
-         * the page content is considered to be invalid.
-         * We have to do this because a CPS_FREEING cl_page may
-         * be NOT under the protection of a cl_lock.
-         * Afterwards, if this page is found by other threads, then this
-         * page will be forced to reread.
-         */
-        cl_page_export(env, pg, 0);
         cl_page_state_set0(env, pg, CPS_FREEING);
 
-        if (!radix)
-                /*
-                 * !radix means that @pg is not yet in the radix tree, skip
-                 * removing it.
-                 */
-                tmp = pg->cp_child;
-        for (; tmp != NULL; tmp = tmp->cp_child) {
-                void                    *value;
-                struct cl_object_header *hdr;
-
-                hdr = cl_object_header(tmp->cp_obj);
-                cfs_spin_lock(&hdr->coh_page_guard);
-                value = radix_tree_delete(&hdr->coh_tree, tmp->cp_index);
-                PASSERT(env, tmp, value == tmp);
-                PASSERT(env, tmp, hdr->coh_pages > 0);
-                hdr->coh_pages--;
-                cfs_spin_unlock(&hdr->coh_page_guard);
-        }
-
-        CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
+        CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
                        (const struct lu_env *, const struct cl_page_slice *));
+
         EXIT;
 }
 
@@ -1202,7 +815,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
  * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
  * drain after some time, at which point page will be recycled.
  *
- * \pre  pg == cl_page_top(pg)
  * \pre  VM page is locked
  * \post pg->cp_state == CPS_FREEING
  *
@@ -1210,33 +822,14 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
  */
 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
 {
-        PINVRNT(env, pg, cl_page_invariant(pg));
-        ENTRY;
-        cl_page_delete0(env, pg, 1);
-        EXIT;
+       PINVRNT(env, pg, cl_page_invariant(pg));
+       ENTRY;
+       cl_page_delete0(env, pg);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_delete);
 
 /**
- * Unmaps page from user virtual memory.
- *
- * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to unmap page from user space
- * virtual memory.
- *
- * \see cl_page_operations::cpo_unmap()
- */
-int cl_page_unmap(const struct lu_env *env,
-                  struct cl_io *io, struct cl_page *pg)
-{
-        PINVRNT(env, pg, cl_page_is_owned(pg, io));
-        PINVRNT(env, pg, cl_page_invariant(pg));
-
-        return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
-}
-EXPORT_SYMBOL(cl_page_unmap);
-
-/**
  * Marks page up-to-date.
  *
  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
@@ -1264,7 +857,6 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
         const struct cl_page_slice *slice;
 
         ENTRY;
-        pg = cl_page_top_trusted((struct cl_page *)pg);
         slice = container_of(pg->cp_layers.next,
                              const struct cl_page_slice, cpl_linkage);
         PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
@@ -1318,15 +910,17 @@ int cl_page_prep(const struct lu_env *env, struct cl_io *io,
          * PG_writeback without risking other layers deciding to skip this
          * page.
          */
-        result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
-        if (result == 0)
-                cl_page_io_start(env, pg, crt);
-
-        KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
-                      equi(result == 0,
-                           PageWriteback(cl_page_vmpage(env, pg)))));
-        CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
-        return result;
+       if (crt >= CRT_NR)
+               return -EINVAL;
+       result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
+       if (result == 0)
+               cl_page_io_start(env, pg, crt);
+
+       KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
+                     equi(result == 0,
+                          PageWriteback(cl_page_vmpage(pg)))));
+       CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
+       return result;
 }
 EXPORT_SYMBOL(cl_page_prep);
 
@@ -1354,31 +948,30 @@ void cl_page_completion(const struct lu_env *env,
         /* cl_page::cp_req already cleared by the caller (osc_completion()) */
         PASSERT(env, pg, pg->cp_req == NULL);
         PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
-        PINVRNT(env, pg, cl_page_invariant(pg));
 
         ENTRY;
         CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
-        if (crt == CRT_READ && ioret == 0) {
-                PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
-                pg->cp_flags |= CPF_READ_COMPLETED;
-        }
-
         cl_page_state_set(env, pg, CPS_CACHED);
+       if (crt >= CRT_NR)
+               return;
         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
                                (const struct lu_env *,
                                 const struct cl_page_slice *, int), ioret);
         if (anchor) {
+                LASSERT(cl_page_is_vmlocked(env, pg));
                 LASSERT(pg->cp_sync_io == anchor);
                 pg->cp_sync_io = NULL;
+       }
+       /*
+        * As page->cp_obj is pinned by a reference from page->cp_req, it is
+        * safe to call cl_page_put() without risking object destruction in a
+        * non-blocking context.
+        */
+       cl_page_put(env, pg);
+
+       if (anchor)
                 cl_sync_io_note(anchor, ioret);
-        }
 
-        /* Don't assert the page writeback bit here because the lustre file
-         * may be as a backend of swap space. in this case, the page writeback
-         * is set by VM, and obvious we shouldn't clear it at all. Fortunately
-         * this type of pages are all TRANSIENT pages. */
-        KLASSERT(ergo(pg->cp_type == CPT_CACHEABLE,
-                      !PageWriteback(cl_page_vmpage(env, pg))));
         EXIT;
 }
 EXPORT_SYMBOL(cl_page_completion);
@@ -1400,6 +993,8 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
         PINVRNT(env, pg, crt < CRT_NR);
 
         ENTRY;
+       if (crt >= CRT_NR)
+               RETURN(-EINVAL);
         result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
                                 (const struct lu_env *,
                                  const struct cl_page_slice *));
@@ -1413,37 +1008,29 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
 EXPORT_SYMBOL(cl_page_make_ready);
 
 /**
- * Notify layers that high level io decided to place this page into a cache
- * for future transfer.
- *
- * The layer implementing transfer engine (osc) has to register this page in
- * its queues.
+ * Called if a pge is being written back by kernel's intention.
  *
  * \pre  cl_page_is_owned(pg, io)
- * \post ergo(result == 0,
- *            pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT)
+ * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
  *
- * \see cl_page_operations::cpo_cache_add()
+ * \see cl_page_operations::cpo_flush()
  */
-int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
-                      struct cl_page *pg, enum cl_req_type crt)
+int cl_page_flush(const struct lu_env *env, struct cl_io *io,
+                 struct cl_page *pg)
 {
-        int result;
+       int result;
 
-        PINVRNT(env, pg, crt < CRT_NR);
-        PINVRNT(env, pg, cl_page_is_owned(pg, io));
-        PINVRNT(env, pg, cl_page_invariant(pg));
+       PINVRNT(env, pg, cl_page_is_owned(pg, io));
+       PINVRNT(env, pg, cl_page_invariant(pg));
 
-        ENTRY;
-        result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_cache_add));
-        if (result == 0) {
-                cl_page_owner_clear(pg);
-                cl_page_state_set(env, pg, CPS_CACHED);
-        }
-        CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
-        RETURN(result);
+       ENTRY;
+
+       result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
+
+       CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
+       RETURN(result);
 }
-EXPORT_SYMBOL(cl_page_cache_add);
+EXPORT_SYMBOL(cl_page_flush);
 
 /**
  * Checks whether page is protected by any extent lock is at least required
@@ -1453,73 +1040,23 @@ EXPORT_SYMBOL(cl_page_cache_add);
  * \see cl_page_operations::cpo_is_under_lock()
  */
 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
-                          struct cl_page *page)
+                          struct cl_page *page, pgoff_t *max_index)
 {
-        int rc;
+       int rc;
 
-        PINVRNT(env, page, cl_page_invariant(page));
+       PINVRNT(env, page, cl_page_invariant(page));
 
-        ENTRY;
-        rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
-                            (const struct lu_env *,
-                             const struct cl_page_slice *, struct cl_io *),
-                            io);
-        PASSERT(env, page, rc != 0);
-        RETURN(rc);
+       ENTRY;
+       rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
+                                   (const struct lu_env *,
+                                    const struct cl_page_slice *,
+                                    struct cl_io *, pgoff_t *),
+                                   io, max_index);
+       RETURN(rc);
 }
 EXPORT_SYMBOL(cl_page_is_under_lock);
 
 /**
- * Purges all cached pages belonging to the object \a obj.
- */
-int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
-{
-        struct cl_thread_info   *info;
-        struct cl_object        *obj = cl_object_top(clobj);
-        struct cl_io            *io;
-        struct cl_page_list     *plist;
-        int                      resched;
-        int                      result;
-
-        ENTRY;
-        info  = cl_env_info(env);
-        plist = &info->clt_list;
-        io    = &info->clt_io;
-
-        /*
-         * initialize the io. This is ugly since we never do IO in this
-         * function, we just make cl_page_list functions happy. -jay
-         */
-        io->ci_obj = obj;
-        result = cl_io_init(env, io, CIT_MISC, obj);
-        if (result != 0) {
-                cl_io_fini(env, io);
-                RETURN(io->ci_result);
-        }
-
-        do {
-                cl_page_list_init(plist);
-                cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, plist, 0,
-                                    &resched);
-                /*
-                 * Since we're purging the pages of an object, we don't care
-                 * the possible outcomes of the following functions.
-                 */
-                cl_page_list_unmap(env, io, plist);
-                cl_page_list_discard(env, io, plist);
-                cl_page_list_disown(env, io, plist);
-                cl_page_list_fini(env, plist);
-
-                if (resched)
-                        cfs_cond_resched();
-        } while (resched);
-
-        cl_io_fini(env, io);
-        RETURN(result);
-}
-EXPORT_SYMBOL(cl_pages_prune);
-
-/**
  * Tells transfer engine that only part of a page is to be transmitted.
  *
  * \see cl_page_operations::cpo_clip()
@@ -1543,12 +1080,11 @@ EXPORT_SYMBOL(cl_page_clip);
 void cl_page_header_print(const struct lu_env *env, void *cookie,
                           lu_printer_t printer, const struct cl_page *pg)
 {
-        (*printer)(env, cookie,
-                   "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
-                   pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
-                   pg->cp_index, pg->cp_parent, pg->cp_child,
-                   pg->cp_state, pg->cp_error, pg->cp_type,
-                   pg->cp_owner, pg->cp_req, pg->cp_flags);
+       (*printer)(env, cookie,
+                  "page@%p[%d %p %d %d %d %p %p]\n",
+                  pg, atomic_read(&pg->cp_ref), pg->cp_obj,
+                  pg->cp_state, pg->cp_error, pg->cp_type,
+                  pg->cp_owner, pg->cp_req);
 }
 EXPORT_SYMBOL(cl_page_header_print);
 
@@ -1558,16 +1094,12 @@ EXPORT_SYMBOL(cl_page_header_print);
 void cl_page_print(const struct lu_env *env, void *cookie,
                    lu_printer_t printer, const struct cl_page *pg)
 {
-        struct cl_page *scan;
-
-        for (scan = cl_page_top((struct cl_page *)pg);
-             scan != NULL; scan = scan->cp_child)
-                cl_page_header_print(env, cookie, printer, scan);
-        CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
-                       (const struct lu_env *env,
-                        const struct cl_page_slice *slice,
-                        void *cookie, lu_printer_t p), cookie, printer);
-        (*printer)(env, cookie, "end page@%p\n", pg);
+       cl_page_header_print(env, cookie, printer, pg);
+       CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
+                      (const struct lu_env *env,
+                       const struct cl_page_slice *slice,
+                       void *cookie, lu_printer_t p), cookie, printer);
+       (*printer)(env, cookie, "end page@%p\n", pg);
 }
 EXPORT_SYMBOL(cl_page_print);
 
@@ -1587,10 +1119,7 @@ EXPORT_SYMBOL(cl_page_cancel);
  */
 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
 {
-        /*
-         * XXX for now.
-         */
-        return (loff_t)idx << CFS_PAGE_SHIFT;
+       return (loff_t)idx << PAGE_CACHE_SHIFT;
 }
 EXPORT_SYMBOL(cl_offset);
 
@@ -1599,16 +1128,13 @@ EXPORT_SYMBOL(cl_offset);
  */
 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
 {
-        /*
-         * XXX for now.
-         */
-        return offset >> CFS_PAGE_SHIFT;
+       return offset >> PAGE_CACHE_SHIFT;
 }
 EXPORT_SYMBOL(cl_index);
 
 int cl_page_size(const struct cl_object *obj)
 {
-        return 1 << CFS_PAGE_SHIFT;
+       return 1 << PAGE_CACHE_SHIFT;
 }
 EXPORT_SYMBOL(cl_page_size);
 
@@ -1622,24 +1148,24 @@ EXPORT_SYMBOL(cl_page_size);
  * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
  */
 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
-                       struct cl_object *obj,
-                       const struct cl_page_operations *ops)
+                      struct cl_object *obj, pgoff_t index,
+                      const struct cl_page_operations *ops)
 {
-        ENTRY;
-        cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
-        slice->cpl_obj  = obj;
-        slice->cpl_ops  = ops;
-        slice->cpl_page = page;
-        EXIT;
+       ENTRY;
+       list_add_tail(&slice->cpl_linkage, &page->cp_layers);
+       slice->cpl_obj  = obj;
+       slice->cpl_index = index;
+       slice->cpl_ops  = ops;
+       slice->cpl_page = page;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_slice_add);
 
 int  cl_page_init(void)
 {
-        return lu_kmem_init(cl_page_caches);
+        return 0;
 }
 
 void cl_page_fini(void)
 {
-        lu_kmem_fini(cl_page_caches);
 }