Whamcloud - gitweb
LU-2722 clio: directIO thread races with completion thread
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
index d74c812..29a570c 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -29,7 +27,7 @@
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -41,9 +39,6 @@
  */
 
 #define DEBUG_SUBSYSTEM S_CLASS
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 
 #include <libcfs/libcfs.h>
 #include <obd_class.h>
 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
                             int radix);
 
-static cfs_mem_cache_t      *cl_page_kmem = NULL;
-
-static struct lu_kmem_descr cl_page_caches[] = {
-        {
-                .ckd_cache = &cl_page_kmem,
-                .ckd_name  = "cl_page_kmem",
-                .ckd_size  = sizeof (struct cl_page)
-        },
-        {
-                .ckd_cache = NULL
-        }
-};
-
 #ifdef LIBCFS_DEBUG
 # define PASSERT(env, page, expr)                                       \
   do {                                                                    \
@@ -95,9 +77,27 @@ static struct lu_kmem_descr cl_page_caches[] = {
         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
 #endif /* !INVARIANT_CHECK */
 
+/* Disable page statistic by default due to huge performance penalty. */
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+#define CS_PAGE_INC(o, item) \
+       cfs_atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+#define CS_PAGE_DEC(o, item) \
+       cfs_atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
+#define CS_PAGESTATE_INC(o, state) \
+       cfs_atomic_inc(&cl_object_site(o)->cs_pages_state[state])
+#define CS_PAGESTATE_DEC(o, state) \
+       cfs_atomic_dec(&cl_object_site(o)->cs_pages_state[state])
+#else
+#define CS_PAGE_INC(o, item)
+#define CS_PAGE_DEC(o, item)
+#define CS_PAGESTATE_INC(o, state)
+#define CS_PAGESTATE_DEC(o, state)
+#endif
+
 /**
- * Internal version of cl_page_top, it should be called with page referenced,
- * or coh_page_guard held.
+ * Internal version of cl_page_top, it should be called if the page is
+ * known to be not freed, says with page referenced, or radix tree lock held,
+ * or page owned.
  */
 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
 {
@@ -119,11 +119,8 @@ static struct cl_page *cl_page_top_trusted(struct cl_page *page)
  */
 static void cl_page_get_trust(struct cl_page *page)
 {
-        /*
-         * Checkless version for trusted users.
-         */
-        if (cfs_atomic_inc_return(&page->cp_ref) == 1)
-                cfs_atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
+        LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
+        cfs_atomic_inc(&page->cp_ref);
 }
 
 /**
@@ -137,13 +134,6 @@ cl_page_at_trusted(const struct cl_page *page,
                    const struct lu_device_type *dtype)
 {
         const struct cl_page_slice *slice;
-
-#ifdef INVARIANT_CHECK
-        struct cl_object_header *ch = cl_object_header(page->cp_obj);
-
-        if (!cfs_atomic_read(&page->cp_ref))
-                LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
-#endif
         ENTRY;
 
         page = cl_page_top_trusted((struct cl_page *)page);
@@ -170,9 +160,8 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
         LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
 
         page = radix_tree_lookup(&hdr->coh_tree, index);
-        if (page != NULL) {
+        if (page != NULL)
                 cl_page_get_trust(page);
-        }
         return page;
 }
 EXPORT_SYMBOL(cl_page_lookup);
@@ -210,7 +199,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
         hdr = cl_object_header(obj);
         pvec = cl_env_info(env)->clt_pvec;
         dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
-        cfs_spin_lock(&hdr->coh_page_guard);
+       spin_lock(&hdr->coh_page_guard);
         while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
                                             idx, CLT_PVEC_SIZE)) > 0) {
                 int end_of_region = 0;
@@ -256,7 +245,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                  * check that pages weren't truncated (cl_page_own() returns
                  * error in the latter case).
                  */
-                cfs_spin_unlock(&hdr->coh_page_guard);
+               spin_unlock(&hdr->coh_page_guard);
                 tree_lock = 0;
 
                 for (i = 0; i < j; ++i) {
@@ -275,19 +264,19 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                 if (res != CLP_GANG_OKAY)
                         break;
 
-                cfs_spin_lock(&hdr->coh_page_guard);
-                tree_lock = 1;
-        }
-        if (tree_lock)
-                cfs_spin_unlock(&hdr->coh_page_guard);
-        RETURN(res);
+               spin_lock(&hdr->coh_page_guard);
+               tree_lock = 1;
+       }
+       if (tree_lock)
+               spin_unlock(&hdr->coh_page_guard);
+       RETURN(res);
 }
 EXPORT_SYMBOL(cl_page_gang_lookup);
 
 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
 {
         struct cl_object *obj  = page->cp_obj;
-        struct cl_site   *site = cl_object_site(obj);
+       int pagesize = cl_object_header(obj)->coh_page_bufsize;
 
         PASSERT(env, page, cfs_list_empty(&page->cp_batch));
         PASSERT(env, page, page->cp_owner == NULL);
@@ -305,15 +294,12 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
                 cfs_list_del_init(page->cp_layers.next);
                 slice->cpl_ops->cpo_fini(env, slice);
         }
-        cfs_atomic_dec(&site->cs_pages.cs_total);
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
-        cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
-#endif
+       CS_PAGE_DEC(obj, total);
+       CS_PAGESTATE_DEC(obj, page->cp_state);
         lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
         cl_object_put(env, obj);
         lu_ref_fini(&page->cp_reference);
-        OBD_SLAB_FREE_PTR(page, cl_page_kmem);
+        OBD_FREE(page, pagesize);
         EXIT;
 }
 
@@ -328,61 +314,55 @@ static inline void cl_page_state_set_trust(struct cl_page *page,
         *(enum cl_page_state *)&page->cp_state = state;
 }
 
-static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
-                         pgoff_t ind, struct page *vmpage,
-                         enum cl_page_type type, struct cl_page **out)
+static struct cl_page *cl_page_alloc(const struct lu_env *env,
+               struct cl_object *o, pgoff_t ind, struct page *vmpage,
+               enum cl_page_type type)
 {
-        struct cl_page          *page;
-        struct cl_page          *err  = NULL;
-        struct lu_object_header *head;
-        struct cl_site          *site = cl_object_site(o);
-        int                      result;
-
-        ENTRY;
-        result = +1;
-        OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
-        if (page != NULL) {
-                cfs_atomic_set(&page->cp_ref, 1);
-                page->cp_obj = o;
-                cl_object_get(o);
-                page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
-                                                     "cl_page", page);
-                page->cp_index = ind;
-                cl_page_state_set_trust(page, CPS_CACHED);
-                page->cp_type = type;
-                CFS_INIT_LIST_HEAD(&page->cp_layers);
-                CFS_INIT_LIST_HEAD(&page->cp_batch);
-                CFS_INIT_LIST_HEAD(&page->cp_flight);
-                cfs_mutex_init(&page->cp_mutex);
-                lu_ref_init(&page->cp_reference);
-                head = o->co_lu.lo_header;
-                cfs_list_for_each_entry(o, &head->loh_layers,
-                                        co_lu.lo_linkage) {
-                        if (o->co_ops->coo_page_init != NULL) {
-                                err = o->co_ops->coo_page_init(env, o,
-                                                               page, vmpage);
-                                if (err != NULL) {
-                                        cl_page_delete0(env, page, 0);
-                                        cl_page_free(env, page);
-                                        page = err;
-                                        break;
-                                }
-                        }
-                }
-                if (err == NULL) {
-                        cfs_atomic_inc(&site->cs_pages.cs_busy);
-                        cfs_atomic_inc(&site->cs_pages.cs_total);
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
-                        cfs_atomic_inc(&site->cs_pages_state[CPS_CACHED]);
-#endif
-                        cfs_atomic_inc(&site->cs_pages.cs_created);
-                        result = 0;
-                }
-        } else
-                page = ERR_PTR(-ENOMEM);
-        *out = page;
-        RETURN(result);
+       struct cl_page          *page;
+       struct lu_object_header *head;
+
+       ENTRY;
+       OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
+                       CFS_ALLOC_IO);
+       if (page != NULL) {
+               int result;
+               cfs_atomic_set(&page->cp_ref, 1);
+               if (type == CPT_CACHEABLE) /* for radix tree */
+                       cfs_atomic_inc(&page->cp_ref);
+               page->cp_obj = o;
+               cl_object_get(o);
+               page->cp_obj_ref = lu_object_ref_add(&o->co_lu, "cl_page",page);
+               page->cp_index = ind;
+               cl_page_state_set_trust(page, CPS_CACHED);
+               page->cp_type = type;
+               CFS_INIT_LIST_HEAD(&page->cp_layers);
+               CFS_INIT_LIST_HEAD(&page->cp_batch);
+               CFS_INIT_LIST_HEAD(&page->cp_flight);
+               mutex_init(&page->cp_mutex);
+               lu_ref_init(&page->cp_reference);
+               head = o->co_lu.lo_header;
+               cfs_list_for_each_entry(o, &head->loh_layers,
+                                       co_lu.lo_linkage) {
+                       if (o->co_ops->coo_page_init != NULL) {
+                               result = o->co_ops->coo_page_init(env, o,
+                                                                 page, vmpage);
+                               if (result != 0) {
+                                       cl_page_delete0(env, page, 0);
+                                       cl_page_free(env, page);
+                                       page = ERR_PTR(result);
+                                       break;
+                               }
+                       }
+               }
+               if (result == 0) {
+                       CS_PAGE_INC(o, total);
+                       CS_PAGE_INC(o, create);
+                       CS_PAGESTATE_DEC(o, CPS_CACHED);
+               }
+       } else {
+               page = ERR_PTR(-ENOMEM);
+       }
+       RETURN(page);
 }
 
 /**
@@ -405,7 +385,6 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
         struct cl_page          *page = NULL;
         struct cl_page          *ghost = NULL;
         struct cl_object_header *hdr;
-        struct cl_site          *site = cl_object_site(o);
         int err;
 
         LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
@@ -414,12 +393,15 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
         ENTRY;
 
         hdr = cl_object_header(o);
-        cfs_atomic_inc(&site->cs_pages.cs_lookup);
+       CS_PAGE_INC(o, lookup);
 
         CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
         /* fast path. */
         if (type == CPT_CACHEABLE) {
+               /* vmpage lock is used to protect the child/parent
+                * relationship */
+               KLASSERT(PageLocked(vmpage));
                 /*
                  * cl_vmpage_page() can be called here without any locks as
                  *
@@ -438,13 +420,13 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
         }
 
         if (page != NULL) {
-                cfs_atomic_inc(&site->cs_pages.cs_hit);
+               CS_PAGE_INC(o, hit);
                 RETURN(page);
         }
 
         /* allocate and initialize cl_page */
-        err = cl_page_alloc(env, o, idx, vmpage, type, &page);
-        if (err != 0)
+        page = cl_page_alloc(env, o, idx, vmpage, type);
+        if (IS_ERR(page))
                 RETURN(page);
 
         if (type == CPT_TRANSIENT) {
@@ -460,7 +442,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
          * XXX optimization: use radix_tree_preload() here, and change tree
          * gfp mask to GFP_KERNEL in cl_object_header_init().
          */
-        cfs_spin_lock(&hdr->coh_page_guard);
+       spin_lock(&hdr->coh_page_guard);
         err = radix_tree_insert(&hdr->coh_tree, idx, page);
         if (err != 0) {
                 ghost = page;
@@ -488,10 +470,9 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
                 }
                 hdr->coh_pages++;
         }
-        cfs_spin_unlock(&hdr->coh_page_guard);
+       spin_unlock(&hdr->coh_page_guard);
 
         if (unlikely(ghost != NULL)) {
-                cfs_atomic_dec(&site->cs_pages.cs_busy);
                 cl_page_delete0(env, ghost, 0);
                 cl_page_free(env, ghost);
         }
@@ -532,7 +513,7 @@ static inline int cl_page_invariant(const struct cl_page *pg)
         child  = pg->cp_child;
         owner  = pg->cp_owner;
 
-        return cfs_atomic_read(&pg->cp_ref) > 0 &&
+        return cl_page_in_use(pg) &&
                 ergo(parent != NULL, parent->cp_child == pg) &&
                 ergo(child != NULL, child->cp_parent == pg) &&
                 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
@@ -555,9 +536,6 @@ static void cl_page_state_set0(const struct lu_env *env,
                                struct cl_page *page, enum cl_page_state state)
 {
         enum cl_page_state old;
-#ifdef LUSTRE_PAGESTATE_TRACKING
-        struct cl_site *site = cl_object_site(page->cp_obj);
-#endif
 
         /*
          * Matrix of allowed state transitions [old][new], for sanity
@@ -610,10 +588,8 @@ static void cl_page_state_set0(const struct lu_env *env,
                 PASSERT(env, page,
                         equi(state == CPS_OWNED, page->cp_owner != NULL));
 
-#ifdef LUSTRE_PAGESTATE_TRACKING
-                cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
-                cfs_atomic_inc(&site->cs_pages_state[state]);
-#endif
+               CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
+               CS_PAGESTATE_INC(page->cp_obj, state);
                 cl_page_state_set_trust(page, state);
         }
         EXIT;
@@ -635,10 +611,9 @@ static void cl_page_state_set(const struct lu_env *env,
  */
 void cl_page_get(struct cl_page *page)
 {
-        ENTRY;
-        LASSERT(page->cp_state != CPS_FREEING);
-        cl_page_get_trust(page);
-        EXIT;
+       ENTRY;
+       cl_page_get_trust(page);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_get);
 
@@ -653,45 +628,26 @@ EXPORT_SYMBOL(cl_page_get);
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_object_header *hdr;
-        struct cl_site *site = cl_object_site(page->cp_obj);
-
         PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
 
         ENTRY;
         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
                        cfs_atomic_read(&page->cp_ref));
 
-        hdr = cl_object_header(cl_object_top(page->cp_obj));
-        if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
-                cfs_atomic_dec(&site->cs_pages.cs_busy);
-                /* We're going to access the page w/o a reference, but it's
-                 * ok because we have grabbed the lock coh_page_guard, which
-                 * means nobody is able to free this page behind us.
-                 */
-                if (page->cp_state == CPS_FREEING) {
-                        /* We drop the page reference and check the page state
-                         * inside the coh_page_guard. So that if it gets here,
-                         * it is the REALLY last reference to this page.
-                         */
-                        cfs_spin_unlock(&hdr->coh_page_guard);
-
-                        LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
-                        PASSERT(env, page, page->cp_owner == NULL);
-                        PASSERT(env, page, cfs_list_empty(&page->cp_batch));
-                        /*
-                         * Page is no longer reachable by other threads. Tear
-                         * it down.
-                         */
-                        cl_page_free(env, page);
+       if (cfs_atomic_dec_and_test(&page->cp_ref)) {
+               LASSERT(page->cp_state == CPS_FREEING);
 
-                        EXIT;
-                        return;
-                }
-                cfs_spin_unlock(&hdr->coh_page_guard);
-        }
+               LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
+               PASSERT(env, page, page->cp_owner == NULL);
+               PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+               /*
+                * Page is no longer reachable by other threads. Tear
+                * it down.
+                */
+               cl_page_free(env, page);
+       }
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_put);
 
@@ -723,8 +679,8 @@ EXPORT_SYMBOL(cl_page_vmpage);
  */
 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
 {
-        struct cl_page *page;
-        struct cl_object_header *hdr;
+       struct cl_page *top;
+       struct cl_page *page;
 
         ENTRY;
         KLASSERT(PageLocked(vmpage));
@@ -739,18 +695,18 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
          * This loop assumes that ->private points to the top-most page. This
          * can be rectified easily.
          */
-        hdr = cl_object_header(cl_object_top(obj));
-        cfs_spin_lock(&hdr->coh_page_guard);
-        for (page = (void *)vmpage->private;
-             page != NULL; page = page->cp_child) {
-                if (cl_object_same(page->cp_obj, obj)) {
-                        cl_page_get_trust(page);
-                        break;
-                }
-        }
-        cfs_spin_unlock(&hdr->coh_page_guard);
-        LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
-        RETURN(page);
+        top = (struct cl_page *)vmpage->private;
+       if (top == NULL)
+               RETURN(NULL);
+
+       for (page = top; page != NULL; page = page->cp_child) {
+               if (cl_object_same(page->cp_obj, obj)) {
+                       cl_page_get_trust(page);
+                       break;
+               }
+       }
+       LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
+       RETURN(page);
 }
 EXPORT_SYMBOL(cl_vmpage_page);
 
@@ -1028,15 +984,14 @@ EXPORT_SYMBOL(cl_page_own_try);
 void cl_page_assume(const struct lu_env *env,
                     struct cl_io *io, struct cl_page *pg)
 {
-        PASSERT(env, pg, pg->cp_owner == NULL);
         PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
-        PINVRNT(env, pg, cl_page_invariant(pg));
 
         ENTRY;
         pg = cl_page_top(pg);
         io = cl_io_top(io);
 
         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
+        PASSERT(env, pg, pg->cp_owner == NULL);
         pg->cp_owner = io;
         pg->cp_task = current;
         cl_page_owner_set(pg);
@@ -1149,6 +1104,9 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
         cl_page_export(env, pg, 0);
         cl_page_state_set0(env, pg, CPS_FREEING);
 
+        CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
+                       (const struct lu_env *, const struct cl_page_slice *));
+
         if (tmp->cp_type == CPT_CACHEABLE) {
                 if (!radix)
                         /* !radix means that @pg is not yet in the radix tree,
@@ -1160,18 +1118,17 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
                         struct cl_object_header *hdr;
 
                         hdr = cl_object_header(tmp->cp_obj);
-                        cfs_spin_lock(&hdr->coh_page_guard);
-                        value = radix_tree_delete(&hdr->coh_tree,
-                                                  tmp->cp_index);
-                        PASSERT(env, tmp, value == tmp);
-                        PASSERT(env, tmp, hdr->coh_pages > 0);
-                        hdr->coh_pages--;
-                        cfs_spin_unlock(&hdr->coh_page_guard);
+                       spin_lock(&hdr->coh_page_guard);
+                       value = radix_tree_delete(&hdr->coh_tree,
+                                                 tmp->cp_index);
+                       PASSERT(env, tmp, value == tmp);
+                       PASSERT(env, tmp, hdr->coh_pages > 0);
+                       hdr->coh_pages--;
+                       spin_unlock(&hdr->coh_page_guard);
+                       cl_page_put(env, tmp);
                 }
         }
 
-        CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
-                       (const struct lu_env *, const struct cl_page_slice *));
         EXIT;
 }
 
@@ -1362,15 +1319,17 @@ void cl_page_completion(const struct lu_env *env,
                 LASSERT(cl_page_is_vmlocked(env, pg));
                 LASSERT(pg->cp_sync_io == anchor);
                 pg->cp_sync_io = NULL;
+       }
+       /*
+        * As page->cp_obj is pinned by a reference from page->cp_req, it is
+        * safe to call cl_page_put() without risking object destruction in a
+        * non-blocking context.
+        */
+       cl_page_put(env, pg);
+
+       if (anchor)
                 cl_sync_io_note(anchor, ioret);
-        }
 
-        /* Don't assert the page writeback bit here because the lustre file
-         * may be as a backend of swap space. in this case, the page writeback
-         * is set by VM, and obvious we shouldn't clear it at all. Fortunately
-         * this type of pages are all TRANSIENT pages. */
-        KLASSERT(ergo(pg->cp_type == CPT_CACHEABLE,
-                      !PageWriteback(cl_page_vmpage(env, pg))));
         EXIT;
 }
 EXPORT_SYMBOL(cl_page_completion);
@@ -1412,32 +1371,61 @@ EXPORT_SYMBOL(cl_page_make_ready);
  * its queues.
  *
  * \pre  cl_page_is_owned(pg, io)
- * \post ergo(result == 0,
- *            pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT)
+ * \post cl_page_is_owned(pg, io)
  *
  * \see cl_page_operations::cpo_cache_add()
  */
 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
                       struct cl_page *pg, enum cl_req_type crt)
 {
-        int result;
+       const struct cl_page_slice *scan;
+       int result = 0;
 
-        PINVRNT(env, pg, crt < CRT_NR);
-        PINVRNT(env, pg, cl_page_is_owned(pg, io));
-        PINVRNT(env, pg, cl_page_invariant(pg));
+       PINVRNT(env, pg, crt < CRT_NR);
+       PINVRNT(env, pg, cl_page_is_owned(pg, io));
+       PINVRNT(env, pg, cl_page_invariant(pg));
 
-        ENTRY;
-        result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_cache_add));
-        if (result == 0) {
-                cl_page_owner_clear(pg);
-                cl_page_state_set(env, pg, CPS_CACHED);
-        }
-        CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
-        RETURN(result);
+       ENTRY;
+
+       cfs_list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
+               if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
+                       continue;
+
+               result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
+               if (result != 0)
+                       break;
+       }
+       CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_page_cache_add);
 
 /**
+ * Called if a pge is being written back by kernel's intention.
+ *
+ * \pre  cl_page_is_owned(pg, io)
+ * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
+ *
+ * \see cl_page_operations::cpo_flush()
+ */
+int cl_page_flush(const struct lu_env *env, struct cl_io *io,
+                 struct cl_page *pg)
+{
+       int result;
+
+       PINVRNT(env, pg, cl_page_is_owned(pg, io));
+       PINVRNT(env, pg, cl_page_invariant(pg));
+
+       ENTRY;
+
+       result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
+
+       CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
+       RETURN(result);
+}
+EXPORT_SYMBOL(cl_page_flush);
+
+/**
  * Checks whether page is protected by any extent lock is at least required
  * mode.
  *
@@ -1490,6 +1478,7 @@ int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
          * function, we just make cl_page_list functions happy. -jay
          */
         io->ci_obj = obj;
+       io->ci_ignore_layout = 1;
         result = cl_io_init(env, io, CIT_MISC, obj);
         if (result != 0) {
                 cl_io_fini(env, io);
@@ -1625,10 +1614,9 @@ EXPORT_SYMBOL(cl_page_slice_add);
 
 int  cl_page_init(void)
 {
-        return lu_kmem_init(cl_page_caches);
+        return 0;
 }
 
 void cl_page_fini(void)
 {
-        lu_kmem_fini(cl_page_caches);
 }