Whamcloud - gitweb
LU-17744 ldiskfs: mballoc stats fixes
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
index 31ddf2c..1efb083 100644 (file)
@@ -27,7 +27,6 @@
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
  *
  * Client Lustre Page.
  *
 #include <cl_object.h>
 #include "cl_internal.h"
 
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
+static void __cl_page_delete(const struct lu_env *env, struct cl_page *pg);
 static DEFINE_MUTEX(cl_page_kmem_mutex);
 
 #ifdef LIBCFS_DEBUG
 # define PASSERT(env, page, expr)                                       \
-  do {                                                                    \
-          if (unlikely(!(expr))) {                                      \
-                  CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
-                  LASSERT(0);                                           \
-          }                                                             \
-  } while (0)
+do {                                                                    \
+       if (unlikely(!(expr))) {                                        \
+               CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");      \
+               LASSERT(0);                                             \
+       }                                                               \
+} while (0)
 #else /* !LIBCFS_DEBUG */
-# define PASSERT(env, page, exp) \
-        ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
+#define PASSERT(env, page, exp) \
+       ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
 #endif /* !LIBCFS_DEBUG */
 
 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
 # define PINVRNT(env, page, expr)                                       \
-  do {                                                                    \
-          if (unlikely(!(expr))) {                                      \
-                  CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
-                  LINVRNT(0);                                           \
-          }                                                             \
-  } while (0)
+do {                                                                    \
+       if (unlikely(!(expr))) {                                        \
+               CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");      \
+               LINVRNT(0);                                             \
+       }                                                               \
+} while (0)
 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
 # define PINVRNT(env, page, exp) \
         ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
@@ -100,7 +99,7 @@ static void cs_pagestate_inc(const struct cl_object *obj,
 }
 
 static void cs_pagestate_dec(const struct cl_object *obj,
-                             enum cl_page_state state)
+                            enum cl_page_state state)
 {
 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
        atomic_dec(&cl_object_site(obj)->cs_pages_state[state]);
@@ -119,8 +118,8 @@ static void cs_pagestate_dec(const struct cl_object *obj,
  */
 static void cl_page_get_trust(struct cl_page *page)
 {
-       LASSERT(atomic_read(&page->cp_ref) > 0);
-       atomic_inc(&page->cp_ref);
+       LASSERT(refcount_read(&page->cp_ref) > 0);
+       refcount_inc(&page->cp_ref);
 }
 
 static struct cl_page_slice *
@@ -146,29 +145,6 @@ cl_page_slice_get(const struct cl_page *cl_page, int index)
             slice = cl_page_slice_get(cl_page, i); i >= 0;     \
             slice = cl_page_slice_get(cl_page, --i))
 
-/**
- * Returns a slice within a cl_page, corresponding to the given layer in the
- * device stack.
- *
- * \see cl_lock_at()
- */
-static const struct cl_page_slice *
-cl_page_at_trusted(const struct cl_page *cl_page,
-                   const struct lu_device_type *dtype)
-{
-       const struct cl_page_slice *slice;
-       int i;
-
-       ENTRY;
-
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
-                       RETURN(slice);
-       }
-
-       RETURN(NULL);
-}
-
 static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
 {
        int index = cl_page->cp_kmem_index;
@@ -182,31 +158,43 @@ static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
        }
 }
 
-static void cl_page_free(const struct lu_env *env, struct cl_page *cl_page,
-                        struct pagevec *pvec)
+static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
+                        struct folio_batch *fbatch)
 {
-       struct cl_object *obj  = cl_page->cp_obj;
+       struct cl_object *obj  = cp->cp_obj;
        unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
-       struct cl_page_slice *slice;
-       int i;
+       struct page *vmpage;
 
        ENTRY;
-       PASSERT(env, cl_page, list_empty(&cl_page->cp_batch));
-       PASSERT(env, cl_page, cl_page->cp_owner == NULL);
-       PASSERT(env, cl_page, cl_page->cp_state == CPS_FREEING);
+       PASSERT(env, cp, list_empty(&cp->cp_batch));
 
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (unlikely(slice->cpl_ops->cpo_fini != NULL))
-                       slice->cpl_ops->cpo_fini(env, slice, pvec);
+       if (cp->cp_type == CPT_CACHEABLE) {
+               PASSERT(env, cp, cp->cp_owner == NULL);
+               PASSERT(env, cp, cp->cp_state == CPS_FREEING);
+               /* vmpage->private was already cleared when page was
+                * moved into CPS_FREEING state.
+                */
+               vmpage = cp->cp_vmpage;
+               LASSERT(vmpage != NULL);
+               LASSERT((struct cl_page *)vmpage->private != cp);
+
+               if (fbatch != NULL) {
+                       if (!folio_batch_add_page(fbatch, vmpage))
+                               folio_batch_release(fbatch);
+               } else {
+                       put_page(vmpage);
+               }
        }
-       cl_page->cp_layer_count = 0;
+
+       cp->cp_layer_count = 0;
        cs_page_dec(obj, CS_total);
-       cs_pagestate_dec(obj, cl_page->cp_state);
-       lu_object_ref_del_at(&obj->co_lu, &cl_page->cp_obj_ref,
-                            "cl_page", cl_page);
-       cl_object_put(env, obj);
-       lu_ref_fini(&cl_page->cp_reference);
-       __cl_page_free(cl_page, bufsize);
+       if (cp->cp_type != CPT_TRANSIENT)
+               cs_pagestate_dec(obj, cp->cp_state);
+       lu_object_ref_del_at(&obj->co_lu, &cp->cp_obj_ref, "cl_page", cp);
+       if (cp->cp_type != CPT_TRANSIENT)
+               cl_object_put(env, obj);
+       lu_ref_fini(&cp->cp_reference);
+       __cl_page_free(cp, bufsize);
        EXIT;
 }
 
@@ -216,13 +204,15 @@ static struct cl_page *__cl_page_alloc(struct cl_object *o)
        struct cl_page *cl_page = NULL;
        unsigned short bufsize = cl_object_header(o)->coh_page_bufsize;
 
+       if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_PAGE_ALLOC))
+               return NULL;
+
 check:
        /* the number of entries in cl_page_kmem_array is expected to
         * only be 2-3 entries, so the lookup overhead should be low.
         */
        for ( ; i < ARRAY_SIZE(cl_page_kmem_array); i++) {
-               if (smp_load_acquire(&cl_page_kmem_size_array[i])
-                   == bufsize) {
+               if (smp_load_acquire(&cl_page_kmem_size_array[i]) == bufsize) {
                        OBD_SLAB_ALLOC_GFP(cl_page, cl_page_kmem_array[i],
                                           bufsize, GFP_NOFS);
                        if (cl_page)
@@ -250,8 +240,7 @@ check:
                        mutex_unlock(&cl_page_kmem_mutex);
                        return NULL;
                }
-               smp_store_release(&cl_page_kmem_size_array[i],
-                                 bufsize);
+               smp_store_release(&cl_page_kmem_size_array[i], bufsize);
                mutex_unlock(&cl_page_kmem_mutex);
                goto check;
        } else {
@@ -268,7 +257,7 @@ struct cl_page *cl_page_alloc(const struct lu_env *env, struct cl_object *o,
                              enum cl_page_type type)
 {
        struct cl_page *cl_page;
-       struct lu_object_header *head;
+       struct cl_object *head;
 
        ENTRY;
 
@@ -276,30 +265,36 @@ struct cl_page *cl_page_alloc(const struct lu_env *env, struct cl_object *o,
        if (cl_page != NULL) {
                int result = 0;
 
-               /*
-                * Please fix cl_page:cp_state/type declaration if
+               /* Please fix cl_page:cp_state/type declaration if
                 * these assertions fail in the future.
                 */
                BUILD_BUG_ON((1 << CP_STATE_BITS) < CPS_NR); /* cp_state */
                BUILD_BUG_ON((1 << CP_TYPE_BITS) < CPT_NR); /* cp_type */
-               atomic_set(&cl_page->cp_ref, 1);
+               refcount_set(&cl_page->cp_ref, 1);
                cl_page->cp_obj = o;
-               cl_object_get(o);
+               if (type != CPT_TRANSIENT)
+                       cl_object_get(o);
                lu_object_ref_add_at(&o->co_lu, &cl_page->cp_obj_ref,
                                     "cl_page", cl_page);
                cl_page->cp_vmpage = vmpage;
-               cl_page->cp_state = CPS_CACHED;
+               if (cl_page->cp_type != CPT_TRANSIENT)
+                       cl_page->cp_state = CPS_CACHED;
                cl_page->cp_type = type;
+               if (type == CPT_TRANSIENT)
+                       /* correct inode to be added in ll_direct_rw_pages */
+                       cl_page->cp_inode = NULL;
+               else
+                       cl_page->cp_inode = page2inode(vmpage);
                INIT_LIST_HEAD(&cl_page->cp_batch);
                lu_ref_init(&cl_page->cp_reference);
-               head = o->co_lu.lo_header;
-               list_for_each_entry(o, &head->loh_layers,
-                                   co_lu.lo_linkage) {
+               head = o;
+               cl_page->cp_page_index = ind;
+               cl_object_for_each(o, head) {
                        if (o->co_ops->coo_page_init != NULL) {
                                result = o->co_ops->coo_page_init(env, o,
                                                        cl_page, ind);
                                if (result != 0) {
-                                       cl_page_delete0(env, cl_page);
+                                       __cl_page_delete(env, cl_page);
                                        cl_page_free(env, cl_page, NULL);
                                        cl_page = ERR_PTR(result);
                                        break;
@@ -344,31 +339,30 @@ struct cl_page *cl_page_find(const struct lu_env *env,
        hdr = cl_object_header(o);
        cs_page_inc(o, CS_lookup);
 
-        CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
-               idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
-        /* fast path. */
-        if (type == CPT_CACHEABLE) {
-               /* vmpage lock is used to protect the child/parent
-                * relationship */
+       CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
+              idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
+       /* fast path. */
+       if (type == CPT_CACHEABLE) {
+               /* vmpage lock used to protect the child/parent relationship */
                LASSERT(PageLocked(vmpage));
-                /*
-                 * cl_vmpage_page() can be called here without any locks as
-                 *
-                 *     - "vmpage" is locked (which prevents ->private from
-                 *       concurrent updates), and
-                 *
-                 *     - "o" cannot be destroyed while current thread holds a
-                 *       reference on it.
-                 */
-                page = cl_vmpage_page(vmpage, o);
+               /*
+                * cl_vmpage_page() can be called here without any locks as
+                *
+                *     - "vmpage" is locked (which prevents ->private from
+                *       concurrent updates), and
+                *
+                *     - "o" cannot be destroyed while current thread holds a
+                *       reference on it.
+                */
+               page = cl_vmpage_page(vmpage, o);
                if (page != NULL) {
                        cs_page_inc(o, CS_hit);
                        RETURN(page);
                }
-        }
+       }
 
-        /* allocate and initialize cl_page */
-        page = cl_page_alloc(env, o, idx, vmpage, type);
+       /* allocate and initialize cl_page */
+       page = cl_page_alloc(env, o, idx, vmpage, type);
        RETURN(page);
 }
 EXPORT_SYMBOL(cl_page_find);
@@ -378,16 +372,13 @@ static inline int cl_page_invariant(const struct cl_page *pg)
        return cl_page_in_use_noref(pg);
 }
 
-static void cl_page_state_set0(const struct lu_env *env,
-                              struct cl_page *cl_page,
-                              enum cl_page_state state)
+static void __cl_page_state_set(const struct lu_env *env,
+                               struct cl_page *cl_page,
+                               enum cl_page_state state)
 {
        enum cl_page_state old;
 
-       /*
-        * Matrix of allowed state transitions [old][new], for sanity
-        * checking.
-        */
+       /* Matrix of allowed state transitions [old][new] for sanity checking */
        static const int allowed_transitions[CPS_NR][CPS_NR] = {
                [CPS_CACHED] = {
                        [CPS_CACHED]  = 0,
@@ -441,9 +432,10 @@ static void cl_page_state_set0(const struct lu_env *env,
 }
 
 static void cl_page_state_set(const struct lu_env *env,
-                              struct cl_page *page, enum cl_page_state state)
+                             struct cl_page *page, enum cl_page_state state)
 {
-        cl_page_state_set0(env, page, state);
+       LASSERT(page->cp_type != CPT_TRANSIENT);
+       __cl_page_state_set(env, page, state);
 }
 
 /**
@@ -463,37 +455,36 @@ void cl_page_get(struct cl_page *page)
 EXPORT_SYMBOL(cl_page_get);
 
 /**
- * Releases a reference to a page, use the pagevec to release the pages
+ * Releases a reference to a page, use the folio_batch to release the pages
  * in batch if provided.
  *
- * Users need to do a final pagevec_release() to release any trailing pages.
+ * Users need to do a final folio_batch_release() to release any trailing pages.
  */
-void cl_pagevec_put(const struct lu_env *env, struct cl_page *page,
-                 struct pagevec *pvec)
+void cl_batch_put(const struct lu_env *env, struct cl_page *page,
+                 struct folio_batch *fbatch)
 {
-        ENTRY;
-        CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
-                      atomic_read(&page->cp_ref));
+       ENTRY;
+       CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
+                      refcount_read(&page->cp_ref));
 
-       if (atomic_dec_and_test(&page->cp_ref)) {
-               LASSERT(page->cp_state == CPS_FREEING);
+       if (refcount_dec_and_test(&page->cp_ref)) {
+               if (page->cp_type != CPT_TRANSIENT) {
+                       LASSERT(page->cp_state == CPS_FREEING);
+                       PASSERT(env, page, page->cp_owner == NULL);
+               }
 
-               LASSERT(atomic_read(&page->cp_ref) == 0);
-               PASSERT(env, page, page->cp_owner == NULL);
+               LASSERT(refcount_read(&page->cp_ref) == 0);
                PASSERT(env, page, list_empty(&page->cp_batch));
-               /*
-                * Page is no longer reachable by other threads. Tear
-                * it down.
-                */
-               cl_page_free(env, page, pvec);
+               /* Page is no longer reachable by other threads. Tear it down */
+               cl_page_free(env, page, fbatch);
        }
 
        EXIT;
 }
-EXPORT_SYMBOL(cl_pagevec_put);
+EXPORT_SYMBOL(cl_batch_put);
 
 /**
- * Releases a reference to a page, wrapper to cl_pagevec_put
+ * Releases a reference to a page, wrapper to cl_batch_put
  *
  * When last reference is released, page is returned to the cache, unless it
  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
@@ -503,13 +494,11 @@ EXPORT_SYMBOL(cl_pagevec_put);
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
-       cl_pagevec_put(env, page, NULL);
+       cl_batch_put(env, page, NULL);
 }
 EXPORT_SYMBOL(cl_page_put);
 
-/**
- * Returns a cl_page associated with a VM page, and given cl_object.
- */
+/* Returns a cl_page associated with a VM page, and given cl_object. */
 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
 {
        struct cl_page *page;
@@ -532,13 +521,6 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
 }
 EXPORT_SYMBOL(cl_vmpage_page);
 
-const struct cl_page_slice *cl_page_at(const struct cl_page *page,
-                                       const struct lu_device_type *dtype)
-{
-        return cl_page_at_trusted(page, dtype);
-}
-EXPORT_SYMBOL(cl_page_at);
-
 static void cl_page_owner_clear(struct cl_page *page)
 {
        ENTRY;
@@ -558,45 +540,39 @@ static void cl_page_owner_set(struct cl_page *page)
        EXIT;
 }
 
-void cl_page_disown0(const struct lu_env *env,
-                    struct cl_io *io, struct cl_page *cl_page)
+void __cl_page_disown(const struct lu_env *env, struct cl_page *cp)
 {
-       const struct cl_page_slice *slice;
+       struct page *vmpage;
        enum cl_page_state state;
-       int i;
-
-        ENTRY;
-       state = cl_page->cp_state;
-       PINVRNT(env, cl_page, state == CPS_OWNED ||
-               state == CPS_FREEING);
-       PINVRNT(env, cl_page, cl_page_invariant(cl_page) ||
-               state == CPS_FREEING);
-       cl_page_owner_clear(cl_page);
 
-       if (state == CPS_OWNED)
-               cl_page_state_set(env, cl_page, CPS_CACHED);
-        /*
-        * Completion call-backs are executed in the bottom-up order, so that
-        * uppermost layer (llite), responsible for VFS/VM interaction runs
-        * last and can release locks safely.
-        */
-       cl_page_slice_for_each_reverse(cl_page, slice, i) {
-               if (slice->cpl_ops->cpo_disown != NULL)
-                       (*slice->cpl_ops->cpo_disown)(env, slice, io);
+       ENTRY;
+       if (cp->cp_type == CPT_CACHEABLE) {
+               cl_page_owner_clear(cp);
+               state = cp->cp_state;
+               PINVRNT(env, cp, state == CPS_OWNED || state == CPS_FREEING);
+               PINVRNT(env, cp, cl_page_invariant(cp) || state == CPS_FREEING);
+               if (state == CPS_OWNED)
+                       cl_page_state_set(env, cp, CPS_CACHED);
+               vmpage = cp->cp_vmpage;
+               LASSERT(vmpage != NULL);
+               LASSERT(PageLocked(vmpage));
+               unlock_page(vmpage);
        }
 
        EXIT;
 }
 
-/**
- * returns true, iff page is owned by the given io.
- */
+/* returns true, iff page is owned by the given io. */
 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
 {
        struct cl_io *top = cl_io_top((struct cl_io *)io);
-       LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
+
+       LINVRNT(cl_object_same(pg->cp_obj, top->ci_obj));
        ENTRY;
-       RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
+       if (pg->cp_type != CPT_TRANSIENT)
+               RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
+       else
+               RETURN(pg->cp_owner == top);
 }
 EXPORT_SYMBOL(cl_page_is_owned);
 
@@ -616,74 +592,76 @@ EXPORT_SYMBOL(cl_page_is_owned);
  *             or, page was owned by another thread, or in IO.
  *
  * \see cl_page_disown()
- * \see cl_page_operations::cpo_own()
  * \see cl_page_own_try()
  * \see cl_page_own
  */
-static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
-                       struct cl_page *cl_page, int nonblock)
+static int __cl_page_own(const struct lu_env *env, struct cl_io *io,
+                        struct cl_page *cl_page, int nonblock)
 {
-       const struct cl_page_slice *slice;
-       int result = 0;
-       int i;
+       struct page *vmpage = cl_page->cp_vmpage;
+       int result;
 
        ENTRY;
        PINVRNT(env, cl_page, !cl_page_is_owned(cl_page, io));
-        io = cl_io_top(io);
 
-       if (cl_page->cp_state == CPS_FREEING) {
+       LASSERT(cl_page->cp_type != CPT_TRANSIENT);
+
+       if (cl_page->cp_type != CPT_TRANSIENT &&
+           cl_page->cp_state == CPS_FREEING) {
                result = -ENOENT;
                goto out;
        }
 
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (slice->cpl_ops->cpo_own)
-                       result = (*slice->cpl_ops->cpo_own)(env, slice,
-                                                           io, nonblock);
-               if (result != 0)
-                       break;
-       }
-       if (result > 0)
-               result = 0;
+       LASSERT(vmpage != NULL);
 
-       if (result == 0) {
-               PASSERT(env, cl_page, cl_page->cp_owner == NULL);
-               cl_page->cp_owner = cl_io_top(io);
-               cl_page_owner_set(cl_page);
-               if (cl_page->cp_state != CPS_FREEING) {
-                       cl_page_state_set(env, cl_page, CPS_OWNED);
-               } else {
-                       cl_page_disown0(env, io, cl_page);
-                       result = -ENOENT;
+       if (nonblock) {
+               if (!trylock_page(vmpage)) {
+                       result = -EAGAIN;
+                       goto out;
+               }
+
+               if (unlikely(PageWriteback(vmpage))) {
+                       unlock_page(vmpage);
+                       result = -EAGAIN;
+                       goto out;
                }
+       } else {
+               lock_page(vmpage);
+               wait_on_page_writeback(vmpage);
        }
 
+       PASSERT(env, cl_page, cl_page->cp_owner == NULL);
+       cl_page->cp_owner = cl_io_top(io);
+       cl_page_owner_set(cl_page);
+
+       if (cl_page->cp_state == CPS_FREEING) {
+               __cl_page_disown(env, cl_page);
+               result = -ENOENT;
+               goto out;
+       }
+
+       cl_page_state_set(env, cl_page, CPS_OWNED);
+
+       result = 0;
 out:
+       CDEBUG(D_INFO, "res %d\n", result);
        PINVRNT(env, cl_page, ergo(result == 0,
                cl_page_invariant(cl_page)));
        RETURN(result);
 }
 
-/**
- * Own a page, might be blocked.
- *
- * \see cl_page_own0()
- */
+/* Own a page, might be blocked. (see __cl_page_own()) */
 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
 {
-        return cl_page_own0(env, io, pg, 0);
+       return __cl_page_own(env, io, pg, 0);
 }
 EXPORT_SYMBOL(cl_page_own);
 
-/**
- * Nonblock version of cl_page_own().
- *
- * \see cl_page_own0()
- */
+/* Nonblock version of cl_page_own(). (see __cl_page_own()) */
 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
-                    struct cl_page *pg)
+                   struct cl_page *pg)
 {
-        return cl_page_own0(env, io, pg, 1);
+       return __cl_page_own(env, io, pg, 1);
 }
 EXPORT_SYMBOL(cl_page_own_try);
 
@@ -693,32 +671,28 @@ EXPORT_SYMBOL(cl_page_own_try);
  *
  * Called when page is already locked by the hosting VM.
  *
- * \pre !cl_page_is_owned(cl_page, io)
- * \post cl_page_is_owned(cl_page, io)
- *
- * \see cl_page_operations::cpo_assume()
+ * \pre !cl_page_is_owned(cp, io)
+ * \post cl_page_is_owned(cp, io)
  */
 void cl_page_assume(const struct lu_env *env,
-                   struct cl_io *io, struct cl_page *cl_page)
+                   struct cl_io *io, struct cl_page *cp)
 {
-       const struct cl_page_slice *slice;
-       int i;
+       struct page *vmpage;
 
        ENTRY;
+       PINVRNT(env, cp, cl_object_same(cp->cp_obj, cl_io_top(io)->ci_obj));
 
-       PINVRNT(env, cl_page,
-               cl_object_same(cl_page->cp_obj, io->ci_obj));
-       io = cl_io_top(io);
+       LASSERT(cp->cp_type != CPT_TRANSIENT);
+       PASSERT(env, cp, cp->cp_owner == NULL);
 
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (slice->cpl_ops->cpo_assume != NULL)
-                       (*slice->cpl_ops->cpo_assume)(env, slice, io);
-       }
+       vmpage = cp->cp_vmpage;
+       LASSERT(vmpage != NULL);
+       LASSERT(PageLocked(vmpage));
+       wait_on_page_writeback(vmpage);
+       cp->cp_owner = cl_io_top(io);
+       cl_page_owner_set(cp);
+       cl_page_state_set(env, cp, CPS_OWNED);
 
-       PASSERT(env, cl_page, cl_page->cp_owner == NULL);
-       cl_page->cp_owner = cl_io_top(io);
-       cl_page_owner_set(cl_page);
-       cl_page_state_set(env, cl_page, CPS_OWNED);
        EXIT;
 }
 EXPORT_SYMBOL(cl_page_assume);
@@ -729,29 +703,25 @@ EXPORT_SYMBOL(cl_page_assume);
  * Moves cl_page into cl_page_state::CPS_CACHED without releasing a lock
  * on the underlying VM page (as VM is supposed to do this itself).
  *
- * \pre   cl_page_is_owned(cl_page, io)
- * \post !cl_page_is_owned(cl_page, io)
- *
- * \see cl_page_assume()
+ * \pre   cl_page_is_owned(cp, io)
+ * \post !cl_page_is_owned(cp, io)
  */
 void cl_page_unassume(const struct lu_env *env,
-                     struct cl_io *io, struct cl_page *cl_page)
+                     struct cl_io *io, struct cl_page *cp)
 {
-       const struct cl_page_slice *slice;
-       int i;
+       struct page *vmpage;
 
-        ENTRY;
-       PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
-       PINVRNT(env, cl_page, cl_page_invariant(cl_page));
+       ENTRY;
 
-       io = cl_io_top(io);
-       cl_page_owner_clear(cl_page);
-       cl_page_state_set(env, cl_page, CPS_CACHED);
+       LASSERT(cp->cp_type != CPT_TRANSIENT);
 
-       cl_page_slice_for_each_reverse(cl_page, slice, i) {
-               if (slice->cpl_ops->cpo_unassume != NULL)
-                       (*slice->cpl_ops->cpo_unassume)(env, slice, io);
-       }
+       PINVRNT(env, cp, cl_page_is_owned(cp, io));
+       PINVRNT(env, cp, cl_page_invariant(cp));
+       cl_page_owner_clear(cp);
+       cl_page_state_set(env, cp, CPS_CACHED);
+       vmpage = cp->cp_vmpage;
+       LASSERT(vmpage != NULL);
+       LASSERT(PageLocked(vmpage));
 
        EXIT;
 }
@@ -766,18 +736,16 @@ EXPORT_SYMBOL(cl_page_unassume);
  * \post !cl_page_is_owned(pg, io)
  *
  * \see cl_page_own()
- * \see cl_page_operations::cpo_disown()
  */
 void cl_page_disown(const struct lu_env *env,
-                    struct cl_io *io, struct cl_page *pg)
+                   struct cl_io *io, struct cl_page *pg)
 {
-       PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
-               pg->cp_state == CPS_FREEING);
+       if (pg->cp_type != CPT_TRANSIENT) {
+               PINVRNT(env, pg, cl_page_is_owned(pg, cl_io_top(io)) ||
+                       pg->cp_state == CPS_FREEING);
+       }
 
-       ENTRY;
-       io = cl_io_top(io);
-       cl_page_disown0(env, io, pg);
-       EXIT;
+       __cl_page_disown(env, pg);
 }
 EXPORT_SYMBOL(cl_page_disown);
 
@@ -792,43 +760,53 @@ EXPORT_SYMBOL(cl_page_disown);
  * \see cl_page_operations::cpo_discard()
  */
 void cl_page_discard(const struct lu_env *env,
-                    struct cl_io *io, struct cl_page *cl_page)
+                    struct cl_io *io, struct cl_page *cp)
 {
+       struct page *vmpage;
        const struct cl_page_slice *slice;
        int i;
 
-       PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
-       PINVRNT(env, cl_page, cl_page_invariant(cl_page));
-
-       cl_page_slice_for_each(cl_page, slice, i) {
+       cl_page_slice_for_each(cp, slice, i) {
                if (slice->cpl_ops->cpo_discard != NULL)
                        (*slice->cpl_ops->cpo_discard)(env, slice, io);
        }
+
+       if (cp->cp_type == CPT_CACHEABLE) {
+               PINVRNT(env, cp, cl_page_is_owned(cp, io));
+               PINVRNT(env, cp, cl_page_invariant(cp));
+               vmpage = cp->cp_vmpage;
+               LASSERT(vmpage != NULL);
+               LASSERT(PageLocked(vmpage));
+               generic_error_remove_folio(vmpage->mapping, page_folio(vmpage));
+       } else {
+               cl_page_delete(env, cp);
+       }
 }
 EXPORT_SYMBOL(cl_page_discard);
 
 /**
  * Version of cl_page_delete() that can be called for not fully constructed
- * cl_pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
+ * cl_pages, e.g. in an error handling cl_page_find()->__cl_page_delete()
  * path. Doesn't check cl_page invariant.
  */
-static void cl_page_delete0(const struct lu_env *env,
-                           struct cl_page *cl_page)
+static void __cl_page_delete(const struct lu_env *env, struct cl_page *cp)
 {
        const struct cl_page_slice *slice;
        int i;
 
-        ENTRY;
-
-       PASSERT(env, cl_page, cl_page->cp_state != CPS_FREEING);
+       ENTRY;
 
        /*
         * Severe all ways to obtain new pointers to @pg.
+        * Transient pages already can't be found because they're not in cache.
         */
-       cl_page_owner_clear(cl_page);
-       cl_page_state_set0(env, cl_page, CPS_FREEING);
+       if (cp->cp_type != CPT_TRANSIENT) {
+               PASSERT(env, cp, cp->cp_state != CPS_FREEING);
+               cl_page_owner_clear(cp);
+               __cl_page_state_set(env, cp, CPS_FREEING);
+       }
 
-       cl_page_slice_for_each_reverse(cl_page, slice, i) {
+       cl_page_slice_for_each_reverse(cp, slice, i) {
                if (slice->cpl_ops->cpo_delete != NULL)
                        (*slice->cpl_ops->cpo_delete)(env, slice);
        }
@@ -864,60 +842,11 @@ void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
 {
        PINVRNT(env, pg, cl_page_invariant(pg));
        ENTRY;
-       cl_page_delete0(env, pg);
+       __cl_page_delete(env, pg);
        EXIT;
 }
 EXPORT_SYMBOL(cl_page_delete);
 
-/**
- * Marks page up-to-date.
- *
- * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to mark/clear page as up-to-date
- * by the \a uptodate argument.
- *
- * \see cl_page_operations::cpo_export()
- */
-void cl_page_export(const struct lu_env *env, struct cl_page *cl_page,
-                   int uptodate)
-{
-       const struct cl_page_slice *slice;
-       int i;
-
-       PINVRNT(env, cl_page, cl_page_invariant(cl_page));
-
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (slice->cpl_ops->cpo_export != NULL)
-                       (*slice->cpl_ops->cpo_export)(env, slice, uptodate);
-       }
-}
-EXPORT_SYMBOL(cl_page_export);
-
-/**
- * Returns true, if \a page is VM locked in a suitable sense by the calling
- * thread.
- */
-int cl_page_is_vmlocked(const struct lu_env *env,
-                       const struct cl_page *cl_page)
-{
-        const struct cl_page_slice *slice;
-       int result;
-
-       ENTRY;
-       slice = cl_page_slice_get(cl_page, 0);
-       PASSERT(env, cl_page, slice->cpl_ops->cpo_is_vmlocked != NULL);
-        /*
-        * Call ->cpo_is_vmlocked() directly instead of going through
-         * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
-         * cl_page_invariant().
-         */
-        result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
-       PASSERT(env, cl_page, result == -EBUSY || result == -ENODATA);
-
-       RETURN(result == -EBUSY);
-}
-EXPORT_SYMBOL(cl_page_is_vmlocked);
-
 void cl_page_touch(const struct lu_env *env,
                   const struct cl_page *cl_page, size_t to)
 {
@@ -937,64 +866,59 @@ EXPORT_SYMBOL(cl_page_touch);
 
 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
 {
-        ENTRY;
-        RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
+       ENTRY;
+       RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
 }
 
 static void cl_page_io_start(const struct lu_env *env,
-                             struct cl_page *pg, enum cl_req_type crt)
+                            struct cl_page *pg, enum cl_req_type crt)
 {
-        /*
-         * Page is queued for IO, change its state.
-         */
-        ENTRY;
-        cl_page_owner_clear(pg);
-        cl_page_state_set(env, pg, cl_req_type_state(crt));
-        EXIT;
+       /* Page is queued for IO, change its state. */
+       ENTRY;
+       if (pg->cp_type != CPT_TRANSIENT) {
+               cl_page_owner_clear(pg);
+               cl_page_state_set(env, pg, cl_req_type_state(crt));
+       }
+       EXIT;
 }
 
 /**
- * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
- * called top-to-bottom. Every layer either agrees to submit this page (by
- * returning 0), or requests to omit this page (by returning -EALREADY). Layer
- * handling interactions with the VM also has to inform VM that page is under
- * transfer now.
+ * Prepares page for immediate transfer. Return -EALREADY if this page
+ * should be omitted from transfer.
  */
 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
-                struct cl_page *cl_page, enum cl_req_type crt)
+                struct cl_page *cp, enum cl_req_type crt)
 {
-       const struct cl_page_slice *slice;
-       int result = 0;
-       int i;
-
-       PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
-       PINVRNT(env, cl_page, cl_page_invariant(cl_page));
-       PINVRNT(env, cl_page, crt < CRT_NR);
-
-        /*
-        * this has to be called bottom-to-top, so that llite can set up
-        * PG_writeback without risking other layers deciding to skip this
-        * page.
-        */
-       if (crt >= CRT_NR)
-               return -EINVAL;
+       struct page *vmpage = cp->cp_vmpage;
+       int rc;
+
+       if (cp->cp_type == CPT_TRANSIENT)
+               GOTO(start, rc = 0);
+       PASSERT(env, cp, crt < CRT_NR);
+       PINVRNT(env, cp, cl_page_is_owned(cp, io));
+       PINVRNT(env, cp, cl_page_invariant(cp));
+
+       if (crt == CRT_READ) {
+               if (PageUptodate(vmpage))
+                       GOTO(out, rc = -EALREADY);
+       } else {
+               LASSERT(PageLocked(vmpage));
+               LASSERT(!PageDirty(vmpage));
 
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (slice->cpl_ops->cpo_own)
-                       result = (*slice->cpl_ops->io[crt].cpo_prep)(env,
-                                                                    slice,
-                                                                    io);
-               if (result != 0)
-                       break;
+               /* ll_writepage path is not a sync write, so need to
+                * set page writeback flag
+                */
+               if (cp->cp_sync_io == NULL)
+                       set_page_writeback(vmpage);
        }
+start:
 
-       if (result >= 0) {
-               result = 0;
-               cl_page_io_start(env, cl_page, crt);
-       }
+       cl_page_io_start(env, cp, crt);
+       rc = 0;
+out:
+       CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
 
-       CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
-       return result;
+       return rc;
 }
 EXPORT_SYMBOL(cl_page_prep);
 
@@ -1021,12 +945,15 @@ void cl_page_completion(const struct lu_env *env,
        struct cl_sync_io *anchor = cl_page->cp_sync_io;
        int i;
 
-        ENTRY;
+       ENTRY;
        PASSERT(env, cl_page, crt < CRT_NR);
-       PASSERT(env, cl_page, cl_page->cp_state == cl_req_type_state(crt));
+       if (cl_page->cp_type != CPT_TRANSIENT)
+               PASSERT(env, cl_page,
+                       cl_page->cp_state == cl_req_type_state(crt));
 
        CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, ioret);
-       cl_page_state_set(env, cl_page, CPS_CACHED);
+       if (cl_page->cp_type != CPT_TRANSIENT)
+               cl_page_state_set(env, cl_page, CPS_CACHED);
        if (crt >= CRT_NR)
                return;
 
@@ -1051,36 +978,54 @@ EXPORT_SYMBOL(cl_page_completion);
  *
  * \pre  cl_page->cp_state == CPS_CACHED
  * \post cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
- *
- * \see cl_page_operations::cpo_make_ready()
  */
-int cl_page_make_ready(const struct lu_env *env, struct cl_page *cl_page,
-                       enum cl_req_type crt)
+int cl_page_make_ready(const struct lu_env *env, struct cl_page *cp,
+                      enum cl_req_type crt)
 {
-       const struct cl_page_slice *slice;
-       int result = 0;
-       int i;
+       struct page *vmpage = cp->cp_vmpage;
+       bool unlock = false;
+       int rc;
 
-        ENTRY;
-       PINVRNT(env, cl_page, crt < CRT_NR);
-       if (crt >= CRT_NR)
-               RETURN(-EINVAL);
+       ENTRY;
+       PASSERT(env, cp, crt == CRT_WRITE);
+
+       if (cp->cp_type == CPT_TRANSIENT)
+               GOTO(out, rc = 0);
+
+       lock_page(vmpage);
+       PASSERT(env, cp, PageUptodate(vmpage));
+       unlock = true;
+
+       if (clear_page_dirty_for_io(vmpage)) {
+               LASSERT(cp->cp_state == CPS_CACHED);
+               /* This actually clears the dirty bit in the radix tree  */
+               set_page_writeback(vmpage);
+               CL_PAGE_HEADER(D_PAGE, env, cp, "readied\n");
+               rc = 0;
+       } else if (cp->cp_state == CPS_PAGEOUT) {
+               /* is it possible for osc_flush_async_page()
+                * to already make it ready?
+                */
+               rc = -EALREADY;
+       } else {
+               CL_PAGE_DEBUG(D_ERROR, env, cp,
+                             "unexpecting page state %d\n",
+                             cp->cp_state);
+               LBUG();
+       }
 
-       cl_page_slice_for_each(cl_page, slice, i) {
-               if (slice->cpl_ops->io[crt].cpo_make_ready != NULL)
-                       result = (*slice->cpl_ops->io[crt].cpo_make_ready)(env, slice);
-               if (result != 0)
-                       break;
+out:
+       if (rc == 0) {
+               PASSERT(env, cp, cp->cp_state == CPS_CACHED);
+               cl_page_io_start(env, cp, crt);
        }
 
-       if (result >= 0) {
-               result = 0;
-               PASSERT(env, cl_page, cl_page->cp_state == CPS_CACHED);
-               cl_page_io_start(env, cl_page, crt);
-        }
-       CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
+       if (unlock)
+               unlock_page(vmpage);
 
-       RETURN(result);
+       CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
+
+       return rc;
 }
 EXPORT_SYMBOL(cl_page_make_ready);
 
@@ -1100,6 +1045,7 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
        int i;
 
        ENTRY;
+       LASSERT(cl_page->cp_type != CPT_TRANSIENT);
        PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
        PINVRNT(env, cl_page, cl_page_invariant(cl_page));
 
@@ -1123,7 +1069,7 @@ EXPORT_SYMBOL(cl_page_flush);
  * \see cl_page_operations::cpo_clip()
  */
 void cl_page_clip(const struct lu_env *env, struct cl_page *cl_page,
-                  int from, int to)
+                 int from, int to)
 {
        const struct cl_page_slice *slice;
        int i;
@@ -1138,65 +1084,52 @@ void cl_page_clip(const struct lu_env *env, struct cl_page *cl_page,
 }
 EXPORT_SYMBOL(cl_page_clip);
 
-/**
- * Prints human readable representation of \a pg to the \a f.
- */
+/* Prints human readable representation of \a pg to the \a f. */
 void cl_page_header_print(const struct lu_env *env, void *cookie,
-                          lu_printer_t printer, const struct cl_page *pg)
+                         lu_printer_t printer, const struct cl_page *pg)
 {
        (*printer)(env, cookie,
                   "page@%p[%d %p %d %d %p]\n",
-                  pg, atomic_read(&pg->cp_ref), pg->cp_obj,
+                  pg, refcount_read(&pg->cp_ref), pg->cp_obj,
                   pg->cp_state, pg->cp_type,
                   pg->cp_owner);
 }
 EXPORT_SYMBOL(cl_page_header_print);
 
-/**
- * Prints human readable representation of \a cl_page to the \a f.
- */
+/* Prints human readable representation of \a cl_page to the \a f. */
 void cl_page_print(const struct lu_env *env, void *cookie,
-                  lu_printer_t printer, const struct cl_page *cl_page)
+                  lu_printer_t printer, const struct cl_page *cp)
 {
+       struct page *vmpage = cp->cp_vmpage;
        const struct cl_page_slice *slice;
        int result = 0;
        int i;
 
-       cl_page_header_print(env, cookie, printer, cl_page);
-       cl_page_slice_for_each(cl_page, slice, i) {
+       cl_page_header_print(env, cookie, printer, cp);
+
+       (*printer)(env, cookie, "vmpage @%p", vmpage);
+
+       if (vmpage != NULL) {
+               (*printer)(env, cookie, " %lx %d:%d %lx %lu %slru",
+                          (long)vmpage->flags, page_count(vmpage),
+                          page_mapcount(vmpage), vmpage->private,
+                          page_index(vmpage),
+                          list_empty(&vmpage->lru) ? "not-" : "");
+       }
+
+       (*printer)(env, cookie, "\n");
+
+       cl_page_slice_for_each(cp, slice, i) {
                if (slice->cpl_ops->cpo_print != NULL)
                        result = (*slice->cpl_ops->cpo_print)(env, slice,
-                                                            cookie, printer);
+                                                             cookie, printer);
                if (result != 0)
                        break;
        }
-       (*printer)(env, cookie, "end page@%p\n", cl_page);
-}
-EXPORT_SYMBOL(cl_page_print);
-
-/**
- * Converts a byte offset within object \a obj into a page index.
- */
-loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
-{
-       return (loff_t)idx << PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_offset);
 
-/**
- * Converts a page index into a byte offset within object \a obj.
- */
-pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
-{
-       return offset >> PAGE_SHIFT;
+       (*printer)(env, cookie, "end page@%p\n", cp);
 }
-EXPORT_SYMBOL(cl_index);
-
-size_t cl_page_size(const struct cl_object *obj)
-{
-       return 1UL << PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_page_size);
+EXPORT_SYMBOL(cl_page_print);
 
 /**
  * Adds page slice to the compound page.
@@ -1218,7 +1151,6 @@ void cl_page_slice_add(struct cl_page *cl_page, struct cl_page_slice *slice,
        LASSERT(cl_page->cp_layer_count < CP_MAX_LAYER);
        LASSERT(offset < (1 << sizeof(cl_page->cp_layer_offset[0]) * 8));
        cl_page->cp_layer_offset[cl_page->cp_layer_count++] = offset;
-       slice->cpl_obj  = obj;
        slice->cpl_ops  = ops;
        slice->cpl_page = cl_page;
 
@@ -1226,9 +1158,7 @@ void cl_page_slice_add(struct cl_page *cl_page, struct cl_page_slice *slice,
 }
 EXPORT_SYMBOL(cl_page_slice_add);
 
-/**
- * Allocate and initialize cl_cache, called by ll_init_sbi().
- */
+/* Allocate and initialize cl_cache, called by ll_init_sbi(). */
 struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
 {
        struct cl_client_cache  *cache = NULL;
@@ -1239,28 +1169,24 @@ struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
                RETURN(NULL);
 
        /* Initialize cache data */
-       atomic_set(&cache->ccc_users, 1);
+       refcount_set(&cache->ccc_users, 1);
        cache->ccc_lru_max = lru_page_max;
        atomic_long_set(&cache->ccc_lru_left, lru_page_max);
        spin_lock_init(&cache->ccc_lru_lock);
        INIT_LIST_HEAD(&cache->ccc_lru);
 
-       /* turn unstable check off by default as it impacts performance */
-       cache->ccc_unstable_check = 0;
+       cache->ccc_unstable_check = 1;
        atomic_long_set(&cache->ccc_unstable_nr, 0);
-       init_waitqueue_head(&cache->ccc_unstable_waitq);
        mutex_init(&cache->ccc_max_cache_mb_lock);
 
        RETURN(cache);
 }
 EXPORT_SYMBOL(cl_cache_init);
 
-/**
- * Increase cl_cache refcount
- */
+/* Increase cl_cache refcount */
 void cl_cache_incref(struct cl_client_cache *cache)
 {
-       atomic_inc(&cache->ccc_users);
+       refcount_inc(&cache->ccc_users);
 }
 EXPORT_SYMBOL(cl_cache_incref);
 
@@ -1271,7 +1197,7 @@ EXPORT_SYMBOL(cl_cache_incref);
  */
 void cl_cache_decref(struct cl_client_cache *cache)
 {
-       if (atomic_dec_and_test(&cache->ccc_users))
+       if (refcount_dec_and_test(&cache->ccc_users))
                OBD_FREE(cache, sizeof(*cache));
 }
 EXPORT_SYMBOL(cl_cache_decref);