Whamcloud - gitweb
LU-2722 clio: directIO thread races with completion thread
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
index 5dcf62d..29a570c 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  * GPL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -39,9 +39,6 @@
  */
 
 #define DEBUG_SUBSYSTEM S_CLASS
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 
 #include <libcfs/libcfs.h>
 #include <obd_class.h>
 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
                             int radix);
 
-static cfs_mem_cache_t      *cl_page_kmem = NULL;
-
-static struct lu_kmem_descr cl_page_caches[] = {
-        {
-                .ckd_cache = &cl_page_kmem,
-                .ckd_name  = "cl_page_kmem",
-                .ckd_size  = sizeof (struct cl_page)
-        },
-        {
-                .ckd_cache = NULL
-        }
-};
-
 #ifdef LIBCFS_DEBUG
 # define PASSERT(env, page, expr)                                       \
   do {                                                                    \
@@ -93,13 +77,30 @@ static struct lu_kmem_descr cl_page_caches[] = {
         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
 #endif /* !INVARIANT_CHECK */
 
+/* Disable page statistic by default due to huge performance penalty. */
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+#define CS_PAGE_INC(o, item) \
+       cfs_atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+#define CS_PAGE_DEC(o, item) \
+       cfs_atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+#define CS_PAGESTATE_INC(o, state) \
+       cfs_atomic_inc(&cl_object_site(o)->cs_pages_state[state])
+#define CS_PAGESTATE_DEC(o, state) \
+       cfs_atomic_dec(&cl_object_site(o)->cs_pages_state[state])
+#else
+#define CS_PAGE_INC(o, item)
+#define CS_PAGE_DEC(o, item)
+#define CS_PAGESTATE_INC(o, state)
+#define CS_PAGESTATE_DEC(o, state)
+#endif
+
 /**
- * Internal version of cl_page_top, it should be called with page referenced,
- * or coh_page_guard held.
+ * Internal version of cl_page_top, it should be called if the page is
+ * known to be not freed, says with page referenced, or radix tree lock held,
+ * or page owned.
  */
 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
 {
-        LASSERT(cl_is_page(page));
         while (page->cp_parent != NULL)
                 page = page->cp_parent;
         return page;
@@ -118,12 +119,8 @@ static struct cl_page *cl_page_top_trusted(struct cl_page *page)
  */
 static void cl_page_get_trust(struct cl_page *page)
 {
-        LASSERT(cl_is_page(page));
-        /*
-         * Checkless version for trusted users.
-         */
-        if (atomic_inc_return(&page->cp_ref) == 1)
-                atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
+        LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
+        cfs_atomic_inc(&page->cp_ref);
 }
 
 /**
@@ -137,18 +134,11 @@ cl_page_at_trusted(const struct cl_page *page,
                    const struct lu_device_type *dtype)
 {
         const struct cl_page_slice *slice;
-
-#ifdef INVARIANT_CHECK
-        struct cl_object_header *ch = cl_object_header(page->cp_obj);
-
-        if (!atomic_read(&page->cp_ref))
-                LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
-#endif
         ENTRY;
 
         page = cl_page_top_trusted((struct cl_page *)page);
         do {
-                list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
                         if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
                                 RETURN(slice);
                 }
@@ -170,23 +160,27 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
         LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
 
         page = radix_tree_lookup(&hdr->coh_tree, index);
-        if (page != NULL) {
-                LASSERT(cl_is_page(page));
+        if (page != NULL)
                 cl_page_get_trust(page);
-        }
         return page;
 }
 EXPORT_SYMBOL(cl_page_lookup);
 
 /**
- * Returns a list of pages by a given [start, end] of @obj.
+ * Returns a list of pages by a given [start, end] of \a obj.
+ *
+ * \param resched If not NULL, then we give up before hogging CPU for too
+ * long and set *resched = 1, in that case caller should implement a retry
+ * logic.
  *
  * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
  * crucial in the face of [offset, EOF] locks.
+ *
+ * Return at least one page in @queue unless there is no covered page.
  */
-void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
-                         struct cl_io *io, pgoff_t start, pgoff_t end,
-                         struct cl_page_list *queue)
+int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
+                        struct cl_io *io, pgoff_t start, pgoff_t end,
+                        cl_page_gang_cb_t cb, void *cbdata)
 {
         struct cl_object_header *hdr;
         struct cl_page          *page;
@@ -197,28 +191,30 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
         unsigned int             nr;
         unsigned int             i;
         unsigned int             j;
+        int                      res = CLP_GANG_OKAY;
+        int                      tree_lock = 1;
         ENTRY;
 
         idx = start;
         hdr = cl_object_header(obj);
         pvec = cl_env_info(env)->clt_pvec;
         dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
-        spin_lock(&hdr->coh_page_guard);
+       spin_lock(&hdr->coh_page_guard);
         while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
                                             idx, CLT_PVEC_SIZE)) > 0) {
+                int end_of_region = 0;
                 idx = pvec[nr - 1]->cp_index + 1;
                 for (i = 0, j = 0; i < nr; ++i) {
                         page = pvec[i];
-                        PASSERT(env, page, cl_is_page(page));
                         pvec[i] = NULL;
-                        if (page->cp_index > end)
+
+                        LASSERT(page->cp_type == CPT_CACHEABLE);
+                        if (page->cp_index > end) {
+                                end_of_region = 1;
                                 break;
+                        }
                         if (page->cp_state == CPS_FREEING)
                                 continue;
-                        if (page->cp_type == CPT_TRANSIENT) {
-                                /* God, we found a transient page!*/
-                                continue;
-                        }
 
                         slice = cl_page_at_trusted(page, dtype);
                         /*
@@ -226,6 +222,7 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                          * for osc, in case of ...
                          */
                         PASSERT(env, page, slice != NULL);
+
                         page = slice->cpl_page;
                         /*
                          * Can safely call cl_page_get_trust() under
@@ -236,7 +233,7 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                          */
                         cl_page_get_trust(page);
                         lu_ref_add_atomic(&page->cp_reference,
-                                          "page_list", cfs_current());
+                                          "gang_lookup", cfs_current());
                         pvec[j++] = page;
                 }
 
@@ -248,52 +245,61 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                  * check that pages weren't truncated (cl_page_own() returns
                  * error in the latter case).
                  */
-                spin_unlock(&hdr->coh_page_guard);
+               spin_unlock(&hdr->coh_page_guard);
+                tree_lock = 0;
+
                 for (i = 0; i < j; ++i) {
                         page = pvec[i];
-                        if (cl_page_own(env, io, page) == 0)
-                                cl_page_list_add(queue, page);
+                        if (res == CLP_GANG_OKAY)
+                                res = (*cb)(env, io, page, cbdata);
                         lu_ref_del(&page->cp_reference,
-                                   "page_list", cfs_current());
+                                   "gang_lookup", cfs_current());
                         cl_page_put(env, page);
                 }
-                spin_lock(&hdr->coh_page_guard);
-                if (nr < CLT_PVEC_SIZE)
+                if (nr < CLT_PVEC_SIZE || end_of_region)
                         break;
-        }
-        spin_unlock(&hdr->coh_page_guard);
-        EXIT;
+
+                if (res == CLP_GANG_OKAY && cfs_need_resched())
+                        res = CLP_GANG_RESCHED;
+                if (res != CLP_GANG_OKAY)
+                        break;
+
+               spin_lock(&hdr->coh_page_guard);
+               tree_lock = 1;
+       }
+       if (tree_lock)
+               spin_unlock(&hdr->coh_page_guard);
+       RETURN(res);
 }
 EXPORT_SYMBOL(cl_page_gang_lookup);
 
 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
 {
         struct cl_object *obj  = page->cp_obj;
-        struct cl_site   *site = cl_object_site(obj);
+       int pagesize = cl_object_header(obj)->coh_page_bufsize;
 
-        PASSERT(env, page, cl_is_page(page));
-        PASSERT(env, page, list_empty(&page->cp_batch));
+        PASSERT(env, page, cfs_list_empty(&page->cp_batch));
         PASSERT(env, page, page->cp_owner == NULL);
         PASSERT(env, page, page->cp_req == NULL);
         PASSERT(env, page, page->cp_parent == NULL);
         PASSERT(env, page, page->cp_state == CPS_FREEING);
 
         ENTRY;
-        might_sleep();
-        while (!list_empty(&page->cp_layers)) {
+        cfs_might_sleep();
+        while (!cfs_list_empty(&page->cp_layers)) {
                 struct cl_page_slice *slice;
 
-                slice = list_entry(page->cp_layers.next, struct cl_page_slice,
-                                   cpl_linkage);
-                list_del_init(page->cp_layers.next);
+                slice = cfs_list_entry(page->cp_layers.next,
+                                       struct cl_page_slice, cpl_linkage);
+                cfs_list_del_init(page->cp_layers.next);
                 slice->cpl_ops->cpo_fini(env, slice);
         }
-        atomic_dec(&site->cs_pages.cs_total);
-        atomic_dec(&site->cs_pages_state[page->cp_state]);
+       CS_PAGE_DEC(obj, total);
+       CS_PAGESTATE_DEC(obj, page->cp_state);
         lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
         cl_object_put(env, obj);
         lu_ref_fini(&page->cp_reference);
-        OBD_SLAB_FREE_PTR(page, cl_page_kmem);
+        OBD_FREE(page, pagesize);
         EXIT;
 }
 
@@ -308,58 +314,55 @@ static inline void cl_page_state_set_trust(struct cl_page *page,
         *(enum cl_page_state *)&page->cp_state = state;
 }
 
-static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
-                         pgoff_t ind, struct page *vmpage,
-                         enum cl_page_type type, struct cl_page **out)
+static struct cl_page *cl_page_alloc(const struct lu_env *env,
+               struct cl_object *o, pgoff_t ind, struct page *vmpage,
+               enum cl_page_type type)
 {
-        struct cl_page          *page;
-        struct cl_page          *err  = NULL;
-        struct lu_object_header *head;
-        struct cl_site          *site = cl_object_site(o);
-        int                      result;
-
-        ENTRY;
-        result = +1;
-        OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
-        if (page != NULL) {
-                atomic_set(&page->cp_ref, 1);
-                page->cp_obj = o;
-                cl_object_get(o);
-                page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
-                                                     "cl_page", page);
-                page->cp_index = ind;
-                cl_page_state_set_trust(page, CPS_CACHED);
-                page->cp_type = type;
-                CFS_INIT_LIST_HEAD(&page->cp_layers);
-                CFS_INIT_LIST_HEAD(&page->cp_batch);
-                CFS_INIT_LIST_HEAD(&page->cp_flight);
-                mutex_init(&page->cp_mutex);
-                lu_ref_init(&page->cp_reference);
-                head = o->co_lu.lo_header;
-                list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
-                        if (o->co_ops->coo_page_init != NULL) {
-                                err = o->co_ops->coo_page_init(env, o,
-                                                               page, vmpage);
-                                if (err != NULL) {
-                                        cl_page_state_set_trust(page,
-                                                                CPS_FREEING);
-                                        cl_page_free(env, page);
-                                        page = err;
-                                        break;
-                                }
-                        }
-                }
-                if (err == NULL) {
-                        atomic_inc(&site->cs_pages.cs_busy);
-                        atomic_inc(&site->cs_pages.cs_total);
-                        atomic_inc(&site->cs_pages_state[CPS_CACHED]);
-                        atomic_inc(&site->cs_pages.cs_created);
-                        result = 0;
-                }
-        } else
-                page = ERR_PTR(-ENOMEM);
-        *out = page;
-        RETURN(result);
+       struct cl_page          *page;
+       struct lu_object_header *head;
+
+       ENTRY;
+       OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
+                       CFS_ALLOC_IO);
+       if (page != NULL) {
+               int result;
+               cfs_atomic_set(&page->cp_ref, 1);
+               if (type == CPT_CACHEABLE) /* for radix tree */
+                       cfs_atomic_inc(&page->cp_ref);
+               page->cp_obj = o;
+               cl_object_get(o);
+               page->cp_obj_ref = lu_object_ref_add(&o->co_lu, "cl_page",page);
+               page->cp_index = ind;
+               cl_page_state_set_trust(page, CPS_CACHED);
+               page->cp_type = type;
+               CFS_INIT_LIST_HEAD(&page->cp_layers);
+               CFS_INIT_LIST_HEAD(&page->cp_batch);
+               CFS_INIT_LIST_HEAD(&page->cp_flight);
+               mutex_init(&page->cp_mutex);
+               lu_ref_init(&page->cp_reference);
+               head = o->co_lu.lo_header;
+               cfs_list_for_each_entry(o, &head->loh_layers,
+                                       co_lu.lo_linkage) {
+                       if (o->co_ops->coo_page_init != NULL) {
+                               result = o->co_ops->coo_page_init(env, o,
+                                                                 page, vmpage);
+                               if (result != 0) {
+                                       cl_page_delete0(env, page, 0);
+                                       cl_page_free(env, page);
+                                       page = ERR_PTR(result);
+                                       break;
+                               }
+                       }
+               }
+               if (result == 0) {
+                       CS_PAGE_INC(o, total);
+                       CS_PAGE_INC(o, create);
+                       CS_PAGESTATE_DEC(o, CPS_CACHED);
+               }
+       } else {
+               page = ERR_PTR(-ENOMEM);
+       }
+       RETURN(page);
 }
 
 /**
@@ -373,28 +376,32 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
  *
  * \see cl_object_find(), cl_lock_find()
  */
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
-                             pgoff_t idx, struct page *vmpage,
-                             enum cl_page_type type)
+static struct cl_page *cl_page_find0(const struct lu_env *env,
+                                     struct cl_object *o,
+                                     pgoff_t idx, struct page *vmpage,
+                                     enum cl_page_type type,
+                                     struct cl_page *parent)
 {
-        struct cl_page          *page;
+        struct cl_page          *page = NULL;
         struct cl_page          *ghost = NULL;
         struct cl_object_header *hdr;
-        struct cl_site          *site = cl_object_site(o);
         int err;
 
-        LINVRNT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
-        might_sleep();
+        LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
+        cfs_might_sleep();
 
         ENTRY;
 
         hdr = cl_object_header(o);
-        atomic_inc(&site->cs_pages.cs_lookup);
+       CS_PAGE_INC(o, lookup);
 
-        CDEBUG(D_PAGE, "%lu@"DFID" %p %lu %i\n",
+        CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
         /* fast path. */
         if (type == CPT_CACHEABLE) {
+               /* vmpage lock is used to protect the child/parent
+                * relationship */
+               KLASSERT(PageLocked(vmpage));
                 /*
                  * cl_vmpage_page() can be called here without any locks as
                  *
@@ -410,25 +417,32 @@ struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
                              cl_page_vmpage(env, page) == vmpage &&
                              (void *)radix_tree_lookup(&hdr->coh_tree,
                                                        idx) == page));
-        } else {
-                spin_lock(&hdr->coh_page_guard);
-                page = cl_page_lookup(hdr, idx);
-                spin_unlock(&hdr->coh_page_guard);
         }
+
         if (page != NULL) {
-                atomic_inc(&site->cs_pages.cs_hit);
+               CS_PAGE_INC(o, hit);
                 RETURN(page);
         }
 
         /* allocate and initialize cl_page */
-        err = cl_page_alloc(env, o, idx, vmpage, type, &page);
-        if (err != 0)
+        page = cl_page_alloc(env, o, idx, vmpage, type);
+        if (IS_ERR(page))
+                RETURN(page);
+
+        if (type == CPT_TRANSIENT) {
+                if (parent) {
+                        LASSERT(page->cp_parent == NULL);
+                        page->cp_parent = parent;
+                        parent->cp_child = page;
+                }
                 RETURN(page);
+        }
+
         /*
          * XXX optimization: use radix_tree_preload() here, and change tree
          * gfp mask to GFP_KERNEL in cl_object_header_init().
          */
-        spin_lock(&hdr->coh_page_guard);
+       spin_lock(&hdr->coh_page_guard);
         err = radix_tree_insert(&hdr->coh_tree, idx, page);
         if (err != 0) {
                 ghost = page;
@@ -445,41 +459,43 @@ struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
                  *     consistent even when VM locking is somehow busted,
                  *     which is very useful during diagnosing and debugging.
                  */
-                if (err == -EEXIST) {
-                        /*
-                         * XXX in case of a lookup for CPT_TRANSIENT page,
-                         * nothing protects a CPT_CACHEABLE page from being
-                         * concurrently moved into CPS_FREEING state.
-                         */
-                        page = cl_page_lookup(hdr, idx);
-                        PASSERT(env, page, page != NULL);
-                        if (page->cp_type == CPT_TRANSIENT &&
-                            type == CPT_CACHEABLE) {
-                                /* XXX: We should make sure that inode sem
-                                 * keeps being held in the lifetime of
-                                 * transient pages, so it is impossible to
-                                 * have conflicting transient pages.
-                                 */
-                                spin_unlock(&hdr->coh_page_guard);
-                                cl_page_put(env, page);
-                                spin_lock(&hdr->coh_page_guard);
-                                page = ERR_PTR(-EBUSY);
-                        }
-                } else
-                        page = ERR_PTR(err);
-        } else
+                page = ERR_PTR(err);
+                CL_PAGE_DEBUG(D_ERROR, env, ghost,
+                              "fail to insert into radix tree: %d\n", err);
+        } else {
+                if (parent) {
+                        LASSERT(page->cp_parent == NULL);
+                        page->cp_parent = parent;
+                        parent->cp_child = page;
+                }
                 hdr->coh_pages++;
-        spin_unlock(&hdr->coh_page_guard);
+        }
+       spin_unlock(&hdr->coh_page_guard);
 
         if (unlikely(ghost != NULL)) {
-                atomic_dec(&site->cs_pages.cs_busy);
                 cl_page_delete0(env, ghost, 0);
                 cl_page_free(env, ghost);
         }
         RETURN(page);
 }
+
+struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
+                             pgoff_t idx, struct page *vmpage,
+                             enum cl_page_type type)
+{
+        return cl_page_find0(env, o, idx, vmpage, type, NULL);
+}
 EXPORT_SYMBOL(cl_page_find);
 
+
+struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
+                                 pgoff_t idx, struct page *vmpage,
+                                 struct cl_page *parent)
+{
+        return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
+}
+EXPORT_SYMBOL(cl_page_find_sub);
+
 static inline int cl_page_invariant(const struct cl_page *pg)
 {
         struct cl_object_header *header;
@@ -487,7 +503,6 @@ static inline int cl_page_invariant(const struct cl_page *pg)
         struct cl_page          *child;
         struct cl_io            *owner;
 
-        LASSERT(cl_is_page(pg));
         /*
          * Page invariant is protected by a VM lock.
          */
@@ -498,7 +513,7 @@ static inline int cl_page_invariant(const struct cl_page *pg)
         child  = pg->cp_child;
         owner  = pg->cp_owner;
 
-        return atomic_read(&pg->cp_ref) > 0 &&
+        return cl_page_in_use(pg) &&
                 ergo(parent != NULL, parent->cp_child == pg) &&
                 ergo(child != NULL, child->cp_parent == pg) &&
                 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
@@ -511,7 +526,7 @@ static inline int cl_page_invariant(const struct cl_page *pg)
                  * Either page is early in initialization (has neither child
                  * nor parent yet), or it is in the object radix tree.
                  */
-                ergo(pg->cp_state < CPS_FREEING,
+                ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
                      (void *)radix_tree_lookup(&header->coh_tree,
                                                pg->cp_index) == pg ||
                      (child == NULL && parent == NULL));
@@ -521,7 +536,6 @@ static void cl_page_state_set0(const struct lu_env *env,
                                struct cl_page *page, enum cl_page_state state)
 {
         enum cl_page_state old;
-        struct cl_site *site = cl_object_site(page->cp_obj);
 
         /*
          * Matrix of allowed state transitions [old][new], for sanity
@@ -568,14 +582,14 @@ static void cl_page_state_set0(const struct lu_env *env,
         ENTRY;
         old = page->cp_state;
         PASSERT(env, page, allowed_transitions[old][state]);
-        CL_PAGE_HEADER(D_TRACE, env, page, "%i -> %i\n", old, state);
+        CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
         for (; page != NULL; page = page->cp_child) {
                 PASSERT(env, page, page->cp_state == old);
                 PASSERT(env, page,
                         equi(state == CPS_OWNED, page->cp_owner != NULL));
 
-                atomic_dec(&site->cs_pages_state[page->cp_state]);
-                atomic_inc(&site->cs_pages_state[state]);
+               CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
+               CS_PAGESTATE_INC(page->cp_obj, state);
                 cl_page_state_set_trust(page, state);
         }
         EXIT;
@@ -584,7 +598,6 @@ static void cl_page_state_set0(const struct lu_env *env,
 static void cl_page_state_set(const struct lu_env *env,
                               struct cl_page *page, enum cl_page_state state)
 {
-        PINVRNT(env, page, cl_page_invariant(page));
         cl_page_state_set0(env, page, state);
 }
 
@@ -598,10 +611,9 @@ static void cl_page_state_set(const struct lu_env *env,
  */
 void cl_page_get(struct cl_page *page)
 {
-        ENTRY;
-        LASSERT(page->cp_state != CPS_FREEING);
-        cl_page_get_trust(page);
-        EXIT;
+       ENTRY;
+       cl_page_get_trust(page);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_get);
 
@@ -616,27 +628,26 @@ EXPORT_SYMBOL(cl_page_get);
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_object_header *hdr;
-        struct cl_site *site = cl_object_site(page->cp_obj);
-
-        PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
+        PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
 
         ENTRY;
-        CL_PAGE_HEADER(D_TRACE, env, page, "%i\n", atomic_read(&page->cp_ref));
-        hdr = cl_object_header(page->cp_obj);
-        if (atomic_dec_and_test(&page->cp_ref)) {
-                atomic_dec(&site->cs_pages.cs_busy);
-                if (page->cp_state == CPS_FREEING) {
-                        PASSERT(env, page, page->cp_owner == NULL);
-                        PASSERT(env, page, list_empty(&page->cp_batch));
-                        /*
-                         * Page is no longer reachable by other threads. Tear
-                         * it down.
-                         */
-                        cl_page_free(env, page);
-                }
-        }
-        EXIT;
+        CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
+                       cfs_atomic_read(&page->cp_ref));
+
+       if (cfs_atomic_dec_and_test(&page->cp_ref)) {
+               LASSERT(page->cp_state == CPS_FREEING);
+
+               LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
+               PASSERT(env, page, page->cp_owner == NULL);
+               PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+               /*
+                * Page is no longer reachable by other threads. Tear
+                * it down.
+                */
+               cl_page_free(env, page);
+       }
+
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_put);
 
@@ -653,7 +664,7 @@ cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
          */
         page = cl_page_top(page);
         do {
-                list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+                cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
                         if (slice->cpl_ops->cpo_vmpage != NULL)
                                 RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
                 }
@@ -668,7 +679,8 @@ EXPORT_SYMBOL(cl_page_vmpage);
  */
 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
 {
-        struct cl_page *page;
+       struct cl_page *top;
+       struct cl_page *page;
 
         ENTRY;
         KLASSERT(PageLocked(vmpage));
@@ -683,15 +695,18 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
          * This loop assumes that ->private points to the top-most page. This
          * can be rectified easily.
          */
-        for (page = (void *)vmpage->private;
-             page != NULL; page = page->cp_child) {
-                if (cl_object_same(page->cp_obj, obj)) {
-                        cl_page_get_trust(page);
-                        break;
-                }
-        }
-        LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
-        RETURN(page);
+        top = (struct cl_page *)vmpage->private;
+       if (top == NULL)
+               RETURN(NULL);
+
+       for (page = top; page != NULL; page = page->cp_child) {
+               if (cl_object_same(page->cp_obj, obj)) {
+                       cl_page_get_trust(page);
+                       break;
+               }
+       }
+       LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
+       RETURN(page);
 }
 EXPORT_SYMBOL(cl_vmpage_page);
 
@@ -706,20 +721,6 @@ struct cl_page *cl_page_top(struct cl_page *page)
 }
 EXPORT_SYMBOL(cl_page_top);
 
-/**
- * Returns true if \a addr is an address of an allocated cl_page. Used in
- * assertions. This check is optimistically imprecise, i.e., it occasionally
- * returns true for the incorrect addresses, but if it returns false, then the
- * address is guaranteed to be incorrect. (Should be named cl_pagep().)
- *
- * \see cl_is_lock()
- */
-int cl_is_page(const void *addr)
-{
-        return cfs_mem_is_in_cache(addr, cl_page_kmem);
-}
-EXPORT_SYMBOL(cl_is_page);
-
 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
                                        const struct lu_device_type *dtype)
 {
@@ -741,8 +742,8 @@ EXPORT_SYMBOL(cl_page_at);
         __result = 0;                                                   \
         __page = cl_page_top(__page);                                   \
         do {                                                            \
-                list_for_each_entry(__scan, &__page->cp_layers,         \
-                                    cpl_linkage) {                      \
+                cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
+                                        cpl_linkage) {                  \
                         __method = *(void **)((char *)__scan->cpl_ops + \
                                               __op);                    \
                         if (__method != NULL) {                         \
@@ -769,8 +770,8 @@ do {                                                                    \
                                                                         \
         __page = cl_page_top(__page);                                   \
         do {                                                            \
-                list_for_each_entry(__scan, &__page->cp_layers,         \
-                                    cpl_linkage) {                      \
+                cfs_list_for_each_entry(__scan, &__page->cp_layers,     \
+                                        cpl_linkage) {                  \
                         __method = *(void **)((char *)__scan->cpl_ops + \
                                               __op);                    \
                         if (__method != NULL)                           \
@@ -781,28 +782,28 @@ do {                                                                    \
         } while (__page != NULL);                                       \
 } while (0)
 
-#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)           \
-do {                                                                    \
-        const struct lu_env        *__env  = (_env);                    \
-        struct cl_page             *__page = (_page);                   \
-        const struct cl_page_slice *__scan;                             \
-        ptrdiff_t                   __op   = (_op);                     \
-        void                      (*__method)_proto;                    \
-                                                                        \
-        /* get to the bottom page. */                                   \
-        while (__page->cp_child != NULL)                                \
-                __page = __page->cp_child;                              \
-        do {                                                            \
-                list_for_each_entry_reverse(__scan, &__page->cp_layers, \
-                                            cpl_linkage) {              \
-                        __method = *(void **)((char *)__scan->cpl_ops + \
-                                              __op);                    \
-                        if (__method != NULL)                           \
-                                (*__method)(__env, __scan,              \
-                                            ## __VA_ARGS__);            \
-                }                                                       \
-                __page = __page->cp_parent;                             \
-        } while (__page != NULL);                                       \
+#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...)               \
+do {                                                                        \
+        const struct lu_env        *__env  = (_env);                        \
+        struct cl_page             *__page = (_page);                       \
+        const struct cl_page_slice *__scan;                                 \
+        ptrdiff_t                   __op   = (_op);                         \
+        void                      (*__method)_proto;                        \
+                                                                            \
+        /* get to the bottom page. */                                       \
+        while (__page->cp_child != NULL)                                    \
+                __page = __page->cp_child;                                  \
+        do {                                                                \
+                cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
+                                                cpl_linkage) {              \
+                        __method = *(void **)((char *)__scan->cpl_ops +     \
+                                              __op);                        \
+                        if (__method != NULL)                               \
+                                (*__method)(__env, __scan,                  \
+                                            ## __VA_ARGS__);                \
+                }                                                           \
+                __page = __page->cp_parent;                                 \
+        } while (__page != NULL);                                           \
 } while (0)
 
 static int cl_page_invoke(const struct lu_env *env,
@@ -837,6 +838,7 @@ static void cl_page_owner_clear(struct cl_page *page)
                         LASSERT(page->cp_owner->ci_owned_nr > 0);
                         page->cp_owner->ci_owned_nr--;
                         page->cp_owner = NULL;
+                        page->cp_task = NULL;
                 }
         }
         EXIT;
@@ -889,7 +891,7 @@ int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
 EXPORT_SYMBOL(cl_page_is_owned);
 
 /**
- * Owns a page by IO.
+ * Try to own a page by IO.
  *
  * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
  * into cl_page_state::CPS_OWNED state.
@@ -901,11 +903,15 @@ EXPORT_SYMBOL(cl_page_is_owned);
  *
  * \retval -ve failure, e.g., page was destroyed (and landed in
  *             cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
+ *             or, page was owned by another thread, or in IO.
  *
  * \see cl_page_disown()
  * \see cl_page_operations::cpo_own()
+ * \see cl_page_own_try()
+ * \see cl_page_own
  */
-int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
+static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
+                        struct cl_page *pg, int nonblock)
 {
         int result;
 
@@ -915,24 +921,57 @@ int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
         pg = cl_page_top(pg);
         io = cl_io_top(io);
 
-        cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_own));
-        PASSERT(env, pg, pg->cp_owner == NULL);
-        PASSERT(env, pg, pg->cp_req == NULL);
-        pg->cp_owner = io;
-        cl_page_owner_set(pg);
-        if (pg->cp_state != CPS_FREEING) {
-                cl_page_state_set(env, pg, CPS_OWNED);
-                result = 0;
+        if (pg->cp_state == CPS_FREEING) {
+                result = -ENOENT;
         } else {
-                cl_page_disown0(env, io, pg);
-                result = -EAGAIN;
+                result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
+                                        (const struct lu_env *,
+                                         const struct cl_page_slice *,
+                                         struct cl_io *, int),
+                                        io, nonblock);
+                if (result == 0) {
+                        PASSERT(env, pg, pg->cp_owner == NULL);
+                        PASSERT(env, pg, pg->cp_req == NULL);
+                        pg->cp_owner = io;
+                        pg->cp_task  = current;
+                        cl_page_owner_set(pg);
+                        if (pg->cp_state != CPS_FREEING) {
+                                cl_page_state_set(env, pg, CPS_OWNED);
+                        } else {
+                                cl_page_disown0(env, io, pg);
+                                result = -ENOENT;
+                        }
+                }
         }
         PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
         RETURN(result);
 }
+
+/**
+ * Own a page, might be blocked.
+ *
+ * \see cl_page_own0()
+ */
+int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
+{
+        return cl_page_own0(env, io, pg, 0);
+}
 EXPORT_SYMBOL(cl_page_own);
 
 /**
+ * Nonblock version of cl_page_own().
+ *
+ * \see cl_page_own0()
+ */
+int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
+                    struct cl_page *pg)
+{
+        return cl_page_own0(env, io, pg, 1);
+}
+EXPORT_SYMBOL(cl_page_own_try);
+
+
+/**
  * Assume page ownership.
  *
  * Called when page is already locked by the hosting VM.
@@ -945,17 +984,16 @@ EXPORT_SYMBOL(cl_page_own);
 void cl_page_assume(const struct lu_env *env,
                     struct cl_io *io, struct cl_page *pg)
 {
-        PASSERT(env, pg, pg->cp_state < CPS_OWNED);
-        PASSERT(env, pg, pg->cp_owner == NULL);
         PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
-        PINVRNT(env, pg, cl_page_invariant(pg));
 
         ENTRY;
         pg = cl_page_top(pg);
         io = cl_io_top(io);
 
         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
+        PASSERT(env, pg, pg->cp_owner == NULL);
         pg->cp_owner = io;
+        pg->cp_task = current;
         cl_page_owner_set(pg);
         cl_page_state_set(env, pg, CPS_OWNED);
         EXIT;
@@ -1044,35 +1082,53 @@ EXPORT_SYMBOL(cl_page_discard);
 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
                             int radix)
 {
+        struct cl_page *tmp = pg;
+        ENTRY;
+
         PASSERT(env, pg, pg == cl_page_top(pg));
         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
 
-        ENTRY;
         /*
          * Severe all ways to obtain new pointers to @pg.
          */
         cl_page_owner_clear(pg);
+
+        /* 
+         * unexport the page firstly before freeing it so that
+         * the page content is considered to be invalid.
+         * We have to do this because a CPS_FREEING cl_page may
+         * be NOT under the protection of a cl_lock.
+         * Afterwards, if this page is found by other threads, then this
+         * page will be forced to reread.
+         */
+        cl_page_export(env, pg, 0);
         cl_page_state_set0(env, pg, CPS_FREEING);
+
         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
                        (const struct lu_env *, const struct cl_page_slice *));
-        if (!radix)
-                /*
-                 * !radix means that @pg is not yet in the radix tree, skip
-                 * removing it.
-                 */
-                pg = pg->cp_child;
-        for (; pg != NULL; pg = pg->cp_child) {
-                void                    *value;
-                struct cl_object_header *hdr;
-
-                hdr = cl_object_header(pg->cp_obj);
-                spin_lock(&hdr->coh_page_guard);
-                value = radix_tree_delete(&hdr->coh_tree, pg->cp_index);
-                PASSERT(env, pg, value == pg);
-                PASSERT(env, pg, hdr->coh_pages > 0);
-                hdr->coh_pages--;
-                spin_unlock(&hdr->coh_page_guard);
+
+        if (tmp->cp_type == CPT_CACHEABLE) {
+                if (!radix)
+                        /* !radix means that @pg is not yet in the radix tree,
+                         * skip removing it.
+                         */
+                        tmp = pg->cp_child;
+                for (; tmp != NULL; tmp = tmp->cp_child) {
+                        void                    *value;
+                        struct cl_object_header *hdr;
+
+                        hdr = cl_object_header(tmp->cp_obj);
+                       spin_lock(&hdr->coh_page_guard);
+                       value = radix_tree_delete(&hdr->coh_tree,
+                                                 tmp->cp_index);
+                       PASSERT(env, tmp, value == tmp);
+                       PASSERT(env, tmp, hdr->coh_pages > 0);
+                       hdr->coh_pages--;
+                       spin_unlock(&hdr->coh_page_guard);
+                       cl_page_put(env, tmp);
+                }
         }
+
         EXIT;
 }
 
@@ -1133,17 +1189,17 @@ EXPORT_SYMBOL(cl_page_unmap);
  * Marks page up-to-date.
  *
  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to mark page as up-to-date. From
- * this moment on, page can be shown to the user space without Lustre being
- * notified, hence the name.
+ * layer responsible for VM interaction has to mark/clear page as up-to-date
+ * by the \a uptodate argument.
  *
  * \see cl_page_operations::cpo_export()
  */
-void cl_page_export(const struct lu_env *env, struct cl_page *pg)
+void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
 {
         PINVRNT(env, pg, cl_page_invariant(pg));
         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
-                       (const struct lu_env *, const struct cl_page_slice *));
+                       (const struct lu_env *,
+                        const struct cl_page_slice *, int), uptodate);
 }
 EXPORT_SYMBOL(cl_page_export);
 
@@ -1218,7 +1274,7 @@ int cl_page_prep(const struct lu_env *env, struct cl_io *io,
         KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
                       equi(result == 0,
                            PageWriteback(cl_page_vmpage(env, pg)))));
-        CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, result);
+        CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
         return result;
 }
 EXPORT_SYMBOL(cl_page_prep);
@@ -1241,14 +1297,15 @@ EXPORT_SYMBOL(cl_page_prep);
 void cl_page_completion(const struct lu_env *env,
                         struct cl_page *pg, enum cl_req_type crt, int ioret)
 {
+        struct cl_sync_io *anchor = pg->cp_sync_io;
+
         PASSERT(env, pg, crt < CRT_NR);
         /* cl_page::cp_req already cleared by the caller (osc_completion()) */
         PASSERT(env, pg, pg->cp_req == NULL);
         PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
-        PINVRNT(env, pg, cl_page_invariant(pg));
 
         ENTRY;
-        CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, ioret);
+        CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
         if (crt == CRT_READ && ioret == 0) {
                 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
                 pg->cp_flags |= CPF_READ_COMPLETED;
@@ -1258,13 +1315,21 @@ void cl_page_completion(const struct lu_env *env,
         CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
                                (const struct lu_env *,
                                 const struct cl_page_slice *, int), ioret);
+        if (anchor) {
+                LASSERT(cl_page_is_vmlocked(env, pg));
+                LASSERT(pg->cp_sync_io == anchor);
+                pg->cp_sync_io = NULL;
+       }
+       /*
+        * As page->cp_obj is pinned by a reference from page->cp_req, it is
+        * safe to call cl_page_put() without risking object destruction in a
+        * non-blocking context.
+        */
+       cl_page_put(env, pg);
+
+       if (anchor)
+                cl_sync_io_note(anchor, ioret);
 
-        /* Don't assert the page writeback bit here because the lustre file
-         * may be as a backend of swap space. in this case, the page writeback
-         * is set by VM, and obvious we shouldn't clear it at all. Fortunately
-         * this type of pages are all TRANSIENT pages. */
-        KLASSERT(ergo(pg->cp_type == CPT_CACHEABLE,
-                      !PageWriteback(cl_page_vmpage(env, pg))));
         EXIT;
 }
 EXPORT_SYMBOL(cl_page_completion);
@@ -1293,7 +1358,7 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
                 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
                 cl_page_io_start(env, pg, crt);
         }
-        CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, result);
+        CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
         RETURN(result);
 }
 EXPORT_SYMBOL(cl_page_make_ready);
@@ -1306,32 +1371,61 @@ EXPORT_SYMBOL(cl_page_make_ready);
  * its queues.
  *
  * \pre  cl_page_is_owned(pg, io)
- * \post ergo(result == 0,
- *            pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT)
+ * \post cl_page_is_owned(pg, io)
  *
  * \see cl_page_operations::cpo_cache_add()
  */
 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
                       struct cl_page *pg, enum cl_req_type crt)
 {
-        int result;
+       const struct cl_page_slice *scan;
+       int result = 0;
 
-        PINVRNT(env, pg, crt < CRT_NR);
-        PINVRNT(env, pg, cl_page_is_owned(pg, io));
-        PINVRNT(env, pg, cl_page_invariant(pg));
+       PINVRNT(env, pg, crt < CRT_NR);
+       PINVRNT(env, pg, cl_page_is_owned(pg, io));
+       PINVRNT(env, pg, cl_page_invariant(pg));
 
-        ENTRY;
-        result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_cache_add));
-        if (result == 0) {
-                cl_page_owner_clear(pg);
-                cl_page_state_set(env, pg, CPS_CACHED);
-        }
-        CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", crt, result);
-        RETURN(result);
+       ENTRY;
+
+       cfs_list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
+               if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
+                       continue;
+
+               result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
+               if (result != 0)
+                       break;
+       }
+       CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_page_cache_add);
 
 /**
+ * Called if a pge is being written back by kernel's intention.
+ *
+ * \pre  cl_page_is_owned(pg, io)
+ * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
+ *
+ * \see cl_page_operations::cpo_flush()
+ */
+int cl_page_flush(const struct lu_env *env, struct cl_io *io,
+                 struct cl_page *pg)
+{
+       int result;
+
+       PINVRNT(env, pg, cl_page_is_owned(pg, io));
+       PINVRNT(env, pg, cl_page_invariant(pg));
+
+       ENTRY;
+
+       result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
+
+       CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
+       RETURN(result);
+}
+EXPORT_SYMBOL(cl_page_flush);
+
+/**
  * Checks whether page is protected by any extent lock is at least required
  * mode.
  *
@@ -1355,6 +1449,16 @@ int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
 }
 EXPORT_SYMBOL(cl_page_is_under_lock);
 
+static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
+                         struct cl_page *page, void *cbdata)
+{
+        cl_page_own(env, io, page);
+        cl_page_unmap(env, io, page);
+        cl_page_discard(env, io, page);
+        cl_page_disown(env, io, page);
+        return CLP_GANG_OKAY;
+}
+
 /**
  * Purges all cached pages belonging to the object \a obj.
  */
@@ -1363,12 +1467,10 @@ int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
         struct cl_thread_info   *info;
         struct cl_object        *obj = cl_object_top(clobj);
         struct cl_io            *io;
-        struct cl_page_list     *plist;
         int                      result;
 
         ENTRY;
         info  = cl_env_info(env);
-        plist = &info->clt_list;
         io    = &info->clt_io;
 
         /*
@@ -1376,22 +1478,19 @@ int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
          * function, we just make cl_page_list functions happy. -jay
          */
         io->ci_obj = obj;
+       io->ci_ignore_layout = 1;
         result = cl_io_init(env, io, CIT_MISC, obj);
         if (result != 0) {
                 cl_io_fini(env, io);
                 RETURN(io->ci_result);
         }
 
-        cl_page_list_init(plist);
-        cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, plist);
-        /*
-         * Since we're purging the pages of an object, we don't care
-         * the possible outcomes of the following functions.
-         */
-        cl_page_list_unmap(env, io, plist);
-        cl_page_list_discard(env, io, plist);
-        cl_page_list_disown(env, io, plist);
-        cl_page_list_fini(env, plist);
+        do {
+                result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
+                                             page_prune_cb, NULL);
+                if (result == CLP_GANG_RESCHED)
+                        cfs_cond_resched();
+        } while (result != CLP_GANG_OKAY);
 
         cl_io_fini(env, io);
         RETURN(result);
@@ -1408,7 +1507,7 @@ void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
 {
         PINVRNT(env, pg, cl_page_invariant(pg));
 
-        CL_PAGE_HEADER(D_TRACE, env, pg, "%i %i\n", from, to);
+        CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
                        (const struct lu_env *,
                         const struct cl_page_slice *,int, int),
@@ -1424,7 +1523,7 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
 {
         (*printer)(env, cookie,
                    "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
-                   pg, atomic_read(&pg->cp_ref), pg->cp_obj,
+                   pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
                    pg->cp_index, pg->cp_parent, pg->cp_child,
                    pg->cp_state, pg->cp_error, pg->cp_type,
                    pg->cp_owner, pg->cp_req, pg->cp_flags);
@@ -1505,7 +1604,7 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
                        const struct cl_page_operations *ops)
 {
         ENTRY;
-        list_add_tail(&slice->cpl_linkage, &page->cp_layers);
+        cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
         slice->cpl_obj  = obj;
         slice->cpl_ops  = ops;
         slice->cpl_page = page;
@@ -1515,10 +1614,9 @@ EXPORT_SYMBOL(cl_page_slice_add);
 
 int  cl_page_init(void)
 {
-        return lu_kmem_init(cl_page_caches);
+        return 0;
 }
 
 void cl_page_fini(void)
 {
-        lu_kmem_fini(cl_page_caches);
 }