-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011 Whamcloud, Inc.
- *
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* Client Lustre Page.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
+#include <linux/list.h>
#include <libcfs/libcfs.h>
#include <obd_class.h>
#include <obd_support.h>
-#include <libcfs/list.h>
#include <cl_object.h>
#include "cl_internal.h"
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix);
-
-static cfs_mem_cache_t *cl_page_kmem = NULL;
-
-static struct lu_kmem_descr cl_page_caches[] = {
- {
- .ckd_cache = &cl_page_kmem,
- .ckd_name = "cl_page_kmem",
- .ckd_size = sizeof (struct cl_page)
- },
- {
- .ckd_cache = NULL
- }
-};
+static void __cl_page_delete(const struct lu_env *env, struct cl_page *pg);
+static DEFINE_MUTEX(cl_page_kmem_mutex);
#ifdef LIBCFS_DEBUG
# define PASSERT(env, page, expr) \
- do { \
- if (unlikely(!(expr))) { \
- CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
- LASSERT(0); \
- } \
- } while (0)
+do { \
+ if (unlikely(!(expr))) { \
+ CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
+ LASSERT(0); \
+ } \
+} while (0)
#else /* !LIBCFS_DEBUG */
-# define PASSERT(env, page, exp) \
- ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
+#define PASSERT(env, page, exp) \
+ ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
#endif /* !LIBCFS_DEBUG */
-#ifdef INVARIANT_CHECK
+#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
# define PINVRNT(env, page, expr) \
- do { \
- if (unlikely(!(expr))) { \
- CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
- LINVRNT(0); \
- } \
- } while (0)
-#else /* !INVARIANT_CHECK */
+do { \
+ if (unlikely(!(expr))) { \
+ CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
+ LINVRNT(0); \
+ } \
+} while (0)
+#else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
# define PINVRNT(env, page, exp) \
- ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
-#endif /* !INVARIANT_CHECK */
+ ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
+#endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
-/**
- * Internal version of cl_page_top, it should be called with page referenced,
- * or coh_page_guard held.
- */
-static struct cl_page *cl_page_top_trusted(struct cl_page *page)
+/* Disable page statistic by default due to huge performance penalty. */
+static void cs_page_inc(const struct cl_object *obj,
+ enum cache_stats_item item)
+{
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+ atomic_inc(&cl_object_site(obj)->cs_pages.cs_stats[item]);
+#endif
+}
+
+static void cs_page_dec(const struct cl_object *obj,
+ enum cache_stats_item item)
+{
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+ atomic_dec(&cl_object_site(obj)->cs_pages.cs_stats[item]);
+#endif
+}
+
+static void cs_pagestate_inc(const struct cl_object *obj,
+ enum cl_page_state state)
+{
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+ atomic_inc(&cl_object_site(obj)->cs_pages_state[state]);
+#endif
+}
+
+static void cs_pagestate_dec(const struct cl_object *obj,
+ enum cl_page_state state)
{
- while (page->cp_parent != NULL)
- page = page->cp_parent;
- return page;
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+ atomic_dec(&cl_object_site(obj)->cs_pages_state[state]);
+#endif
}
/**
*
* This function can be used to obtain initial reference to previously
* unreferenced cached object. It can be called only if concurrent page
- * reclamation is somehow prevented, e.g., by locking page radix-tree
- * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
+ * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
* associated with \a page.
*
* Use with care! Not exported.
*/
static void cl_page_get_trust(struct cl_page *page)
{
- /*
- * Checkless version for trusted users.
- */
- if (cfs_atomic_inc_return(&page->cp_ref) == 1)
- cfs_atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
+ LASSERT(refcount_read(&page->cp_ref) > 0);
+ refcount_inc(&page->cp_ref);
}
-/**
- * Returns a slice within a page, corresponding to the given layer in the
- * device stack.
- *
- * \see cl_lock_at()
- */
-static const struct cl_page_slice *
-cl_page_at_trusted(const struct cl_page *page,
- const struct lu_device_type *dtype)
+static struct cl_page_slice *
+cl_page_slice_get(const struct cl_page *cl_page, int index)
{
- const struct cl_page_slice *slice;
-
-#ifdef INVARIANT_CHECK
- struct cl_object_header *ch = cl_object_header(page->cp_obj);
-
- if (!cfs_atomic_read(&page->cp_ref))
- LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
-#endif
- ENTRY;
-
- page = cl_page_top_trusted((struct cl_page *)page);
- do {
- cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
- RETURN(slice);
- }
- page = page->cp_child;
- } while (page != NULL);
- RETURN(NULL);
+ if (index < 0 || index >= cl_page->cp_layer_count)
+ return NULL;
+
+ /* To get the cp_layer_offset values fit under 256 bytes, we
+ * use the offset beyond the end of struct cl_page.
+ */
+ return (struct cl_page_slice *)((char *)cl_page + sizeof(*cl_page) +
+ cl_page->cp_layer_offset[index]);
}
-/**
- * Returns a page with given index in the given object, or NULL if no page is
- * found. Acquires a reference on \a page.
- *
- * Locking: called under cl_object_header::coh_page_guard spin-lock.
- */
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
-{
- struct cl_page *page;
-
- LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
+#define cl_page_slice_for_each(cl_page, slice, i) \
+ for (i = 0, slice = cl_page_slice_get(cl_page, 0); \
+ i < (cl_page)->cp_layer_count; \
+ slice = cl_page_slice_get(cl_page, ++i))
- page = radix_tree_lookup(&hdr->coh_tree, index);
- if (page != NULL) {
- cl_page_get_trust(page);
- }
- return page;
-}
-EXPORT_SYMBOL(cl_page_lookup);
+#define cl_page_slice_for_each_reverse(cl_page, slice, i) \
+ for (i = (cl_page)->cp_layer_count - 1, \
+ slice = cl_page_slice_get(cl_page, i); i >= 0; \
+ slice = cl_page_slice_get(cl_page, --i))
-/**
- * Returns a list of pages by a given [start, end] of \a obj.
- *
- * \param resched If not NULL, then we give up before hogging CPU for too
- * long and set *resched = 1, in that case caller should implement a retry
- * logic.
- *
- * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
- * crucial in the face of [offset, EOF] locks.
- *
- * Return at least one page in @queue unless there is no covered page.
- */
-int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, pgoff_t start, pgoff_t end,
- struct cl_page_list *queue)
+static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
{
- struct cl_object_header *hdr;
- struct cl_page *page;
- struct cl_page **pvec;
- const struct cl_page_slice *slice;
- const struct lu_device_type *dtype;
- pgoff_t idx;
- unsigned int nr;
- unsigned int i;
- unsigned int j;
- int res = CLP_GANG_OKAY;
- int tree_lock = 1;
- ENTRY;
-
- idx = start;
- hdr = cl_object_header(obj);
- pvec = cl_env_info(env)->clt_pvec;
- dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
- cfs_spin_lock(&hdr->coh_page_guard);
- while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
- idx, CLT_PVEC_SIZE)) > 0) {
- int end_of_region = 0;
- idx = pvec[nr - 1]->cp_index + 1;
- for (i = 0, j = 0; i < nr; ++i) {
- page = pvec[i];
- pvec[i] = NULL;
-
- LASSERT(page->cp_type == CPT_CACHEABLE);
- if (page->cp_index > end) {
- end_of_region = 1;
- break;
- }
- if (page->cp_state == CPS_FREEING)
- continue;
-
- slice = cl_page_at_trusted(page, dtype);
- /*
- * Pages for lsm-less file has no underneath sub-page
- * for osc, in case of ...
- */
- PASSERT(env, page, slice != NULL);
-
- page = slice->cpl_page;
- /*
- * Can safely call cl_page_get_trust() under
- * radix-tree spin-lock.
- *
- * XXX not true, because @page is from object another
- * than @hdr and protected by different tree lock.
- */
- cl_page_get_trust(page);
- lu_ref_add_atomic(&page->cp_reference,
- "page_list", cfs_current());
- pvec[j++] = page;
- }
-
- /*
- * Here a delicate locking dance is performed. Current thread
- * holds a reference to a page, but has to own it before it
- * can be placed into queue. Owning implies waiting, so
- * radix-tree lock is to be released. After a wait one has to
- * check that pages weren't truncated (cl_page_own() returns
- * error in the latter case).
- */
- cfs_spin_unlock(&hdr->coh_page_guard);
- tree_lock = 0;
-
- for (i = 0; i < j; ++i) {
- page = pvec[i];
- if (res == CLP_GANG_OKAY) {
- typeof(cl_page_own) *page_own;
-
- page_own = queue->pl_nr ?
- cl_page_own_try : cl_page_own;
- if (page_own(env, io, page) == 0) {
- cl_page_list_add(queue, page);
- } else if (page->cp_state != CPS_FREEING) {
- /* cl_page_own() won't fail unless
- * the page is being freed. */
- LASSERT(queue->pl_nr != 0);
- res = CLP_GANG_AGAIN;
- }
- }
- lu_ref_del(&page->cp_reference,
- "page_list", cfs_current());
- cl_page_put(env, page);
- }
- if (nr < CLT_PVEC_SIZE || end_of_region)
- break;
-
- /* if the number of pages is zero, this will mislead the caller
- * that there is no page any more. */
- if (queue->pl_nr && cfs_need_resched())
- res = CLP_GANG_RESCHED;
- if (res != CLP_GANG_OKAY)
- break;
-
- cfs_spin_lock(&hdr->coh_page_guard);
- tree_lock = 1;
- }
- if (tree_lock)
- cfs_spin_unlock(&hdr->coh_page_guard);
- RETURN(res);
+ int index = cl_page->cp_kmem_index;
+
+ if (index >= 0) {
+ LASSERT(index < ARRAY_SIZE(cl_page_kmem_array));
+ LASSERT(cl_page_kmem_size_array[index] == bufsize);
+ OBD_SLAB_FREE(cl_page, cl_page_kmem_array[index], bufsize);
+ } else {
+ OBD_FREE(cl_page, bufsize);
+ }
}
-EXPORT_SYMBOL(cl_page_gang_lookup);
-static void cl_page_free(const struct lu_env *env, struct cl_page *page)
+static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
+ struct folio_batch *fbatch)
{
- struct cl_object *obj = page->cp_obj;
- struct cl_site *site = cl_object_site(obj);
-
- PASSERT(env, page, cfs_list_empty(&page->cp_batch));
- PASSERT(env, page, page->cp_owner == NULL);
- PASSERT(env, page, page->cp_req == NULL);
- PASSERT(env, page, page->cp_parent == NULL);
- PASSERT(env, page, page->cp_state == CPS_FREEING);
-
- ENTRY;
- cfs_might_sleep();
- while (!cfs_list_empty(&page->cp_layers)) {
- struct cl_page_slice *slice;
-
- slice = cfs_list_entry(page->cp_layers.next,
- struct cl_page_slice, cpl_linkage);
- cfs_list_del_init(page->cp_layers.next);
- slice->cpl_ops->cpo_fini(env, slice);
- }
- cfs_atomic_dec(&site->cs_pages.cs_total);
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
- cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
-#endif
- lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
- cl_object_put(env, obj);
- lu_ref_fini(&page->cp_reference);
- OBD_SLAB_FREE_PTR(page, cl_page_kmem);
- EXIT;
+ struct cl_object *obj = cp->cp_obj;
+ unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
+ struct page *vmpage;
+
+ ENTRY;
+ PASSERT(env, cp, list_empty(&cp->cp_batch));
+ PASSERT(env, cp, cp->cp_owner == NULL);
+ if (cp->cp_type != CPT_TRANSIENT)
+ PASSERT(env, cp, cp->cp_state == CPS_FREEING);
+
+ if (cp->cp_type == CPT_CACHEABLE) {
+ /* vmpage->private was already cleared when page was
+ * moved into CPS_FREEING state.
+ */
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT((struct cl_page *)vmpage->private != cp);
+
+ if (fbatch != NULL) {
+ if (!folio_batch_add_page(fbatch, vmpage))
+ folio_batch_release(fbatch);
+ } else {
+ put_page(vmpage);
+ }
+ }
+
+ cp->cp_layer_count = 0;
+ cs_page_dec(obj, CS_total);
+ if (cp->cp_type != CPT_TRANSIENT)
+ cs_pagestate_dec(obj, cp->cp_state);
+ lu_object_ref_del_at(&obj->co_lu, &cp->cp_obj_ref, "cl_page", cp);
+ if (cp->cp_type != CPT_TRANSIENT)
+ cl_object_put(env, obj);
+ lu_ref_fini(&cp->cp_reference);
+ __cl_page_free(cp, bufsize);
+ EXIT;
}
-/**
- * Helper function updating page state. This is the only place in the code
- * where cl_page::cp_state field is mutated.
- */
-static inline void cl_page_state_set_trust(struct cl_page *page,
- enum cl_page_state state)
+static struct cl_page *__cl_page_alloc(struct cl_object *o)
{
- /* bypass const. */
- *(enum cl_page_state *)&page->cp_state = state;
+ int i = 0;
+ struct cl_page *cl_page = NULL;
+ unsigned short bufsize = cl_object_header(o)->coh_page_bufsize;
+
+ if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_PAGE_ALLOC))
+ return NULL;
+
+check:
+ /* the number of entries in cl_page_kmem_array is expected to
+ * only be 2-3 entries, so the lookup overhead should be low.
+ */
+ for ( ; i < ARRAY_SIZE(cl_page_kmem_array); i++) {
+ if (smp_load_acquire(&cl_page_kmem_size_array[i]) == bufsize) {
+ OBD_SLAB_ALLOC_GFP(cl_page, cl_page_kmem_array[i],
+ bufsize, GFP_NOFS);
+ if (cl_page)
+ cl_page->cp_kmem_index = i;
+ return cl_page;
+ }
+ if (cl_page_kmem_size_array[i] == 0)
+ break;
+ }
+
+ if (i < ARRAY_SIZE(cl_page_kmem_array)) {
+ char cache_name[32];
+
+ mutex_lock(&cl_page_kmem_mutex);
+ if (cl_page_kmem_size_array[i]) {
+ mutex_unlock(&cl_page_kmem_mutex);
+ goto check;
+ }
+ snprintf(cache_name, sizeof(cache_name),
+ "cl_page_kmem-%u", bufsize);
+ cl_page_kmem_array[i] =
+ kmem_cache_create(cache_name, bufsize,
+ 0, 0, NULL);
+ if (cl_page_kmem_array[i] == NULL) {
+ mutex_unlock(&cl_page_kmem_mutex);
+ return NULL;
+ }
+ smp_store_release(&cl_page_kmem_size_array[i], bufsize);
+ mutex_unlock(&cl_page_kmem_mutex);
+ goto check;
+ } else {
+ OBD_ALLOC_GFP(cl_page, bufsize, GFP_NOFS);
+ if (cl_page)
+ cl_page->cp_kmem_index = -1;
+ }
+
+ return cl_page;
}
-static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
- pgoff_t ind, struct page *vmpage,
- enum cl_page_type type, struct cl_page **out)
+struct cl_page *cl_page_alloc(const struct lu_env *env, struct cl_object *o,
+ pgoff_t ind, struct page *vmpage,
+ enum cl_page_type type)
{
- struct cl_page *page;
- struct cl_page *err = NULL;
- struct lu_object_header *head;
- struct cl_site *site = cl_object_site(o);
- int result;
-
- ENTRY;
- result = +1;
- OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
- if (page != NULL) {
- cfs_atomic_set(&page->cp_ref, 1);
- page->cp_obj = o;
- cl_object_get(o);
- page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
- "cl_page", page);
- page->cp_index = ind;
- cl_page_state_set_trust(page, CPS_CACHED);
- page->cp_type = type;
- CFS_INIT_LIST_HEAD(&page->cp_layers);
- CFS_INIT_LIST_HEAD(&page->cp_batch);
- CFS_INIT_LIST_HEAD(&page->cp_flight);
- cfs_mutex_init(&page->cp_mutex);
- lu_ref_init(&page->cp_reference);
- head = o->co_lu.lo_header;
- cfs_list_for_each_entry(o, &head->loh_layers,
- co_lu.lo_linkage) {
- if (o->co_ops->coo_page_init != NULL) {
- err = o->co_ops->coo_page_init(env, o,
- page, vmpage);
- if (err != NULL) {
- cl_page_delete0(env, page, 0);
- cl_page_free(env, page);
- page = err;
- break;
- }
- }
- }
- if (err == NULL) {
- cfs_atomic_inc(&site->cs_pages.cs_busy);
- cfs_atomic_inc(&site->cs_pages.cs_total);
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
- cfs_atomic_inc(&site->cs_pages_state[CPS_CACHED]);
-#endif
- cfs_atomic_inc(&site->cs_pages.cs_created);
- result = 0;
- }
- } else
- page = ERR_PTR(-ENOMEM);
- *out = page;
- RETURN(result);
+ struct cl_page *cl_page;
+ struct cl_object *head;
+
+ ENTRY;
+
+ cl_page = __cl_page_alloc(o);
+ if (cl_page != NULL) {
+ int result = 0;
+
+ /* Please fix cl_page:cp_state/type declaration if
+ * these assertions fail in the future.
+ */
+ BUILD_BUG_ON((1 << CP_STATE_BITS) < CPS_NR); /* cp_state */
+ BUILD_BUG_ON((1 << CP_TYPE_BITS) < CPT_NR); /* cp_type */
+ refcount_set(&cl_page->cp_ref, 1);
+ cl_page->cp_obj = o;
+ if (type != CPT_TRANSIENT)
+ cl_object_get(o);
+ lu_object_ref_add_at(&o->co_lu, &cl_page->cp_obj_ref,
+ "cl_page", cl_page);
+ cl_page->cp_vmpage = vmpage;
+ if (cl_page->cp_type != CPT_TRANSIENT)
+ cl_page->cp_state = CPS_CACHED;
+ cl_page->cp_type = type;
+ if (type == CPT_TRANSIENT)
+ /* correct inode to be added in ll_direct_rw_pages */
+ cl_page->cp_inode = NULL;
+ else
+ cl_page->cp_inode = page2inode(vmpage);
+ INIT_LIST_HEAD(&cl_page->cp_batch);
+ lu_ref_init(&cl_page->cp_reference);
+ head = o;
+ cl_page->cp_page_index = ind;
+ cl_object_for_each(o, head) {
+ if (o->co_ops->coo_page_init != NULL) {
+ result = o->co_ops->coo_page_init(env, o,
+ cl_page, ind);
+ if (result != 0) {
+ __cl_page_delete(env, cl_page);
+ cl_page_free(env, cl_page, NULL);
+ cl_page = ERR_PTR(result);
+ break;
+ }
+ }
+ }
+ if (result == 0) {
+ cs_page_inc(o, CS_total);
+ cs_page_inc(o, CS_create);
+ cs_pagestate_dec(o, CPS_CACHED);
+ }
+ } else {
+ cl_page = ERR_PTR(-ENOMEM);
+ }
+ RETURN(cl_page);
}
/**
*
* \see cl_object_find(), cl_lock_find()
*/
-static struct cl_page *cl_page_find0(const struct lu_env *env,
- struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type,
- struct cl_page *parent)
-{
- struct cl_page *page = NULL;
- struct cl_page *ghost = NULL;
- struct cl_object_header *hdr;
- struct cl_site *site = cl_object_site(o);
- int err;
-
- LINVRNT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
- cfs_might_sleep();
-
- ENTRY;
-
- hdr = cl_object_header(o);
- cfs_atomic_inc(&site->cs_pages.cs_lookup);
-
- CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
- idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
- /* fast path. */
- if (type == CPT_CACHEABLE) {
- /*
- * cl_vmpage_page() can be called here without any locks as
- *
- * - "vmpage" is locked (which prevents ->private from
- * concurrent updates), and
- *
- * - "o" cannot be destroyed while current thread holds a
- * reference on it.
- */
- page = cl_vmpage_page(vmpage, o);
- PINVRNT(env, page,
- ergo(page != NULL,
- cl_page_vmpage(env, page) == vmpage &&
- (void *)radix_tree_lookup(&hdr->coh_tree,
- idx) == page));
- }
-
- if (page != NULL) {
- cfs_atomic_inc(&site->cs_pages.cs_hit);
- RETURN(page);
- }
-
- /* allocate and initialize cl_page */
- err = cl_page_alloc(env, o, idx, vmpage, type, &page);
- if (err != 0)
- RETURN(page);
-
- if (type == CPT_TRANSIENT) {
- if (parent) {
- LASSERT(page->cp_parent == NULL);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- RETURN(page);
- }
-
- /*
- * XXX optimization: use radix_tree_preload() here, and change tree
- * gfp mask to GFP_KERNEL in cl_object_header_init().
- */
- cfs_spin_lock(&hdr->coh_page_guard);
- err = radix_tree_insert(&hdr->coh_tree, idx, page);
- if (err != 0) {
- ghost = page;
- /*
- * Noted by Jay: a lock on \a vmpage protects cl_page_find()
- * from this race, but
- *
- * 0. it's better to have cl_page interface "locally
- * consistent" so that its correctness can be reasoned
- * about without appealing to the (obscure world of) VM
- * locking.
- *
- * 1. handling this race allows ->coh_tree to remain
- * consistent even when VM locking is somehow busted,
- * which is very useful during diagnosing and debugging.
- */
- page = ERR_PTR(err);
- CL_PAGE_DEBUG(D_ERROR, env, ghost,
- "fail to insert into radix tree: %d\n", err);
- } else {
- if (parent) {
- LASSERT(page->cp_parent == NULL);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- hdr->coh_pages++;
- }
- cfs_spin_unlock(&hdr->coh_page_guard);
-
- if (unlikely(ghost != NULL)) {
- cfs_atomic_dec(&site->cs_pages.cs_busy);
- cl_page_delete0(env, ghost, 0);
- cl_page_free(env, ghost);
- }
- RETURN(page);
-}
-
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type)
+struct cl_page *cl_page_find(const struct lu_env *env,
+ struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type)
{
- return cl_page_find0(env, o, idx, vmpage, type, NULL);
+ struct cl_page *page = NULL;
+ struct cl_object_header *hdr;
+
+ LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
+ might_sleep();
+
+ ENTRY;
+
+ hdr = cl_object_header(o);
+ cs_page_inc(o, CS_lookup);
+
+ CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
+ idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
+ /* fast path. */
+ if (type == CPT_CACHEABLE) {
+ /* vmpage lock used to protect the child/parent relationship */
+ LASSERT(PageLocked(vmpage));
+ /*
+ * cl_vmpage_page() can be called here without any locks as
+ *
+ * - "vmpage" is locked (which prevents ->private from
+ * concurrent updates), and
+ *
+ * - "o" cannot be destroyed while current thread holds a
+ * reference on it.
+ */
+ page = cl_vmpage_page(vmpage, o);
+ if (page != NULL) {
+ cs_page_inc(o, CS_hit);
+ RETURN(page);
+ }
+ }
+
+ /* allocate and initialize cl_page */
+ page = cl_page_alloc(env, o, idx, vmpage, type);
+ RETURN(page);
}
EXPORT_SYMBOL(cl_page_find);
-
-struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent)
-{
- return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
-}
-EXPORT_SYMBOL(cl_page_find_sub);
-
static inline int cl_page_invariant(const struct cl_page *pg)
{
- struct cl_object_header *header;
- struct cl_page *parent;
- struct cl_page *child;
- struct cl_io *owner;
-
- /*
- * Page invariant is protected by a VM lock.
- */
- LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
- header = cl_object_header(pg->cp_obj);
- parent = pg->cp_parent;
- child = pg->cp_child;
- owner = pg->cp_owner;
-
- return cfs_atomic_read(&pg->cp_ref) > 0 &&
- ergo(parent != NULL, parent->cp_child == pg) &&
- ergo(child != NULL, child->cp_parent == pg) &&
- ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
- ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
- ergo(owner != NULL && parent != NULL,
- parent->cp_owner == pg->cp_owner->ci_parent) &&
- ergo(owner != NULL && child != NULL,
- child->cp_owner->ci_parent == owner) &&
- /*
- * Either page is early in initialization (has neither child
- * nor parent yet), or it is in the object radix tree.
- */
- ergo(pg->cp_state < CPS_FREEING,
- (void *)radix_tree_lookup(&header->coh_tree,
- pg->cp_index) == pg ||
- (child == NULL && parent == NULL));
+ return cl_page_in_use_noref(pg);
}
-static void cl_page_state_set0(const struct lu_env *env,
- struct cl_page *page, enum cl_page_state state)
+static void __cl_page_state_set(const struct lu_env *env,
+ struct cl_page *cl_page,
+ enum cl_page_state state)
{
- enum cl_page_state old;
-#ifdef LUSTRE_PAGESTATE_TRACKING
- struct cl_site *site = cl_object_site(page->cp_obj);
-#endif
-
- /*
- * Matrix of allowed state transitions [old][new], for sanity
- * checking.
- */
- static const int allowed_transitions[CPS_NR][CPS_NR] = {
- [CPS_CACHED] = {
- [CPS_CACHED] = 0,
- [CPS_OWNED] = 1, /* io finds existing cached page */
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 1, /* write-out from the cache */
- [CPS_FREEING] = 1, /* eviction on the memory pressure */
- },
- [CPS_OWNED] = {
- [CPS_CACHED] = 1, /* release to the cache */
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 1, /* start read immediately */
- [CPS_PAGEOUT] = 1, /* start write immediately */
- [CPS_FREEING] = 1, /* lock invalidation or truncate */
- },
- [CPS_PAGEIN] = {
- [CPS_CACHED] = 1, /* io completion */
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 0,
- [CPS_FREEING] = 0,
- },
- [CPS_PAGEOUT] = {
- [CPS_CACHED] = 1, /* io completion */
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 0,
- [CPS_FREEING] = 0,
- },
- [CPS_FREEING] = {
- [CPS_CACHED] = 0,
- [CPS_OWNED] = 0,
- [CPS_PAGEIN] = 0,
- [CPS_PAGEOUT] = 0,
- [CPS_FREEING] = 0,
- }
- };
-
- ENTRY;
- old = page->cp_state;
- PASSERT(env, page, allowed_transitions[old][state]);
- CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
- for (; page != NULL; page = page->cp_child) {
- PASSERT(env, page, page->cp_state == old);
- PASSERT(env, page,
- equi(state == CPS_OWNED, page->cp_owner != NULL));
-
-#ifdef LUSTRE_PAGESTATE_TRACKING
- cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
- cfs_atomic_inc(&site->cs_pages_state[state]);
-#endif
- cl_page_state_set_trust(page, state);
- }
- EXIT;
+ enum cl_page_state old;
+
+ /* Matrix of allowed state transitions [old][new] for sanity checking */
+ static const int allowed_transitions[CPS_NR][CPS_NR] = {
+ [CPS_CACHED] = {
+ [CPS_CACHED] = 0,
+ [CPS_OWNED] = 1, /* io finds existing cached page */
+ [CPS_PAGEIN] = 0,
+ [CPS_PAGEOUT] = 1, /* write-out from the cache */
+ [CPS_FREEING] = 1, /* eviction on the memory pressure */
+ },
+ [CPS_OWNED] = {
+ [CPS_CACHED] = 1, /* release to the cache */
+ [CPS_OWNED] = 0,
+ [CPS_PAGEIN] = 1, /* start read immediately */
+ [CPS_PAGEOUT] = 1, /* start write immediately */
+ [CPS_FREEING] = 1, /* lock invalidation or truncate */
+ },
+ [CPS_PAGEIN] = {
+ [CPS_CACHED] = 1, /* io completion */
+ [CPS_OWNED] = 0,
+ [CPS_PAGEIN] = 0,
+ [CPS_PAGEOUT] = 0,
+ [CPS_FREEING] = 0,
+ },
+ [CPS_PAGEOUT] = {
+ [CPS_CACHED] = 1, /* io completion */
+ [CPS_OWNED] = 0,
+ [CPS_PAGEIN] = 0,
+ [CPS_PAGEOUT] = 0,
+ [CPS_FREEING] = 0,
+ },
+ [CPS_FREEING] = {
+ [CPS_CACHED] = 0,
+ [CPS_OWNED] = 0,
+ [CPS_PAGEIN] = 0,
+ [CPS_PAGEOUT] = 0,
+ [CPS_FREEING] = 0,
+ }
+ };
+
+ ENTRY;
+ old = cl_page->cp_state;
+ PASSERT(env, cl_page, allowed_transitions[old][state]);
+ CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d -> %d\n", old, state);
+ PASSERT(env, cl_page, cl_page->cp_state == old);
+ PASSERT(env, cl_page, equi(state == CPS_OWNED,
+ cl_page->cp_owner != NULL));
+
+ cs_pagestate_dec(cl_page->cp_obj, cl_page->cp_state);
+ cs_pagestate_inc(cl_page->cp_obj, state);
+ cl_page->cp_state = state;
+ EXIT;
}
static void cl_page_state_set(const struct lu_env *env,
- struct cl_page *page, enum cl_page_state state)
+ struct cl_page *page, enum cl_page_state state)
{
- PINVRNT(env, page, cl_page_invariant(page));
- cl_page_state_set0(env, page, state);
+ LASSERT(page->cp_type != CPT_TRANSIENT);
+ __cl_page_state_set(env, page, state);
}
/**
*/
void cl_page_get(struct cl_page *page)
{
- ENTRY;
- LASSERT(page->cp_state != CPS_FREEING);
- cl_page_get_trust(page);
- EXIT;
+ ENTRY;
+ cl_page_get_trust(page);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_get);
/**
- * Releases a reference to a page.
+ * Releases a reference to a page, use the folio_batch to release the pages
+ * in batch if provided.
+ *
+ * Users need to do a final folio_batch_release() to release any trailing pages.
+ */
+void cl_batch_put(const struct lu_env *env, struct cl_page *page,
+ struct folio_batch *fbatch)
+{
+ ENTRY;
+ CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
+ refcount_read(&page->cp_ref));
+
+ if (refcount_dec_and_test(&page->cp_ref)) {
+ if (page->cp_type != CPT_TRANSIENT)
+ LASSERT(page->cp_state == CPS_FREEING);
+
+ LASSERT(refcount_read(&page->cp_ref) == 0);
+ PASSERT(env, page, page->cp_owner == NULL);
+ PASSERT(env, page, list_empty(&page->cp_batch));
+ /* Page is no longer reachable by other threads. Tear it down */
+ cl_page_free(env, page, fbatch);
+ }
+
+ EXIT;
+}
+EXPORT_SYMBOL(cl_batch_put);
+
+/**
+ * Releases a reference to a page, wrapper to cl_batch_put
*
* When last reference is released, page is returned to the cache, unless it
* is in cl_page_state::CPS_FREEING state, in which case it is immediately
*/
void cl_page_put(const struct lu_env *env, struct cl_page *page)
{
- struct cl_object_header *hdr;
- struct cl_site *site = cl_object_site(page->cp_obj);
-
- PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
-
- ENTRY;
- CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
- cfs_atomic_read(&page->cp_ref));
-
- hdr = cl_object_header(cl_object_top(page->cp_obj));
- if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
- cfs_atomic_dec(&site->cs_pages.cs_busy);
- /* We're going to access the page w/o a reference, but it's
- * ok because we have grabbed the lock coh_page_guard, which
- * means nobody is able to free this page behind us.
- */
- if (page->cp_state == CPS_FREEING) {
- /* We drop the page reference and check the page state
- * inside the coh_page_guard. So that if it gets here,
- * it is the REALLY last reference to this page.
- */
- cfs_spin_unlock(&hdr->coh_page_guard);
-
- LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
- PASSERT(env, page, page->cp_owner == NULL);
- PASSERT(env, page, cfs_list_empty(&page->cp_batch));
- /*
- * Page is no longer reachable by other threads. Tear
- * it down.
- */
- cl_page_free(env, page);
-
- EXIT;
- return;
- }
- cfs_spin_unlock(&hdr->coh_page_guard);
- }
-
- EXIT;
+ cl_batch_put(env, page, NULL);
}
EXPORT_SYMBOL(cl_page_put);
-/**
- * Returns a VM page associated with a given cl_page.
- */
-cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- /*
- * Find uppermost layer with ->cpo_vmpage() method, and return its
- * result.
- */
- page = cl_page_top(page);
- do {
- cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_ops->cpo_vmpage != NULL)
- RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
- }
- page = page->cp_child;
- } while (page != NULL);
- LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
-}
-EXPORT_SYMBOL(cl_page_vmpage);
-
-/**
- * Returns a cl_page associated with a VM page, and given cl_object.
- */
-struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
+/* Returns a cl_page associated with a VM page, and given cl_object. */
+struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
{
- struct cl_page *page;
- struct cl_object_header *hdr;
-
- ENTRY;
- KLASSERT(PageLocked(vmpage));
-
- /*
- * NOTE: absence of races and liveness of data are guaranteed by page
- * lock on a "vmpage". That works because object destruction has
- * bottom-to-top pass.
- */
-
- /*
- * This loop assumes that ->private points to the top-most page. This
- * can be rectified easily.
- */
- hdr = cl_object_header(cl_object_top(obj));
- cfs_spin_lock(&hdr->coh_page_guard);
- for (page = (void *)vmpage->private;
- page != NULL; page = page->cp_child) {
- if (cl_object_same(page->cp_obj, obj)) {
- cl_page_get_trust(page);
- break;
- }
- }
- cfs_spin_unlock(&hdr->coh_page_guard);
- LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
- RETURN(page);
+ struct cl_page *page;
+
+ ENTRY;
+ LASSERT(PageLocked(vmpage));
+
+ /*
+ * NOTE: absence of races and liveness of data are guaranteed by page
+ * lock on a "vmpage". That works because object destruction has
+ * bottom-to-top pass.
+ */
+
+ page = (struct cl_page *)vmpage->private;
+ if (page != NULL) {
+ cl_page_get_trust(page);
+ LASSERT(page->cp_type == CPT_CACHEABLE);
+ }
+ RETURN(page);
}
EXPORT_SYMBOL(cl_vmpage_page);
-/**
- * Returns the top-page for a given page.
- *
- * \see cl_object_top(), cl_io_top()
- */
-struct cl_page *cl_page_top(struct cl_page *page)
-{
- return cl_page_top_trusted(page);
-}
-EXPORT_SYMBOL(cl_page_top);
-
-const struct cl_page_slice *cl_page_at(const struct cl_page *page,
- const struct lu_device_type *dtype)
-{
- return cl_page_at_trusted(page, dtype);
-}
-EXPORT_SYMBOL(cl_page_at);
-
-#define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
-
-#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
-({ \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- int __result; \
- ptrdiff_t __op = (_op); \
- int (*__method)_proto; \
- \
- __result = 0; \
- __page = cl_page_top(__page); \
- do { \
- cfs_list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method != NULL) { \
- __result = (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- __page = __page->cp_child; \
- } while (__page != NULL && __result == 0); \
- if (__result > 0) \
- __result = 0; \
- __result; \
-})
-
-#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
-do { \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- ptrdiff_t __op = (_op); \
- void (*__method)_proto; \
- \
- __page = cl_page_top(__page); \
- do { \
- cfs_list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method != NULL) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_child; \
- } while (__page != NULL); \
-} while (0)
-
-#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
-do { \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- ptrdiff_t __op = (_op); \
- void (*__method)_proto; \
- \
- /* get to the bottom page. */ \
- while (__page->cp_child != NULL) \
- __page = __page->cp_child; \
- do { \
- cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method != NULL) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_parent; \
- } while (__page != NULL); \
-} while (0)
-
-static int cl_page_invoke(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page, ptrdiff_t op)
-
-{
- PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- ENTRY;
- RETURN(CL_PAGE_INVOKE(env, page, op,
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io));
-}
-
-static void cl_page_invoid(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page, ptrdiff_t op)
-
-{
- PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- ENTRY;
- CL_PAGE_INVOID(env, page, op,
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *), io);
- EXIT;
-}
-
static void cl_page_owner_clear(struct cl_page *page)
{
- ENTRY;
- for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
- if (page->cp_owner != NULL) {
- LASSERT(page->cp_owner->ci_owned_nr > 0);
- page->cp_owner->ci_owned_nr--;
- page->cp_owner = NULL;
- page->cp_task = NULL;
- }
- }
- EXIT;
+ ENTRY;
+ if (page->cp_owner != NULL) {
+ LASSERT(page->cp_owner->ci_owned_nr > 0);
+ page->cp_owner->ci_owned_nr--;
+ page->cp_owner = NULL;
+ }
+ EXIT;
}
static void cl_page_owner_set(struct cl_page *page)
{
- ENTRY;
- for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
- LASSERT(page->cp_owner != NULL);
- page->cp_owner->ci_owned_nr++;
- }
- EXIT;
+ ENTRY;
+ LASSERT(page->cp_owner != NULL);
+ page->cp_owner->ci_owned_nr++;
+ EXIT;
}
-void cl_page_disown0(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
+void __cl_page_disown(const struct lu_env *env, struct cl_page *cp)
{
- enum cl_page_state state;
-
- ENTRY;
- state = pg->cp_state;
- PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
- PINVRNT(env, pg, cl_page_invariant(pg));
- cl_page_owner_clear(pg);
-
- if (state == CPS_OWNED)
- cl_page_state_set(env, pg, CPS_CACHED);
- /*
- * Completion call-backs are executed in the bottom-up order, so that
- * uppermost layer (llite), responsible for VFS/VM interaction runs
- * last and can release locks safely.
- */
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
- EXIT;
+ struct page *vmpage;
+ enum cl_page_state state;
+
+ ENTRY;
+ cl_page_owner_clear(cp);
+
+ if (cp->cp_type == CPT_CACHEABLE) {
+ state = cp->cp_state;
+ PINVRNT(env, cp, state == CPS_OWNED || state == CPS_FREEING);
+ PINVRNT(env, cp, cl_page_invariant(cp) || state == CPS_FREEING);
+ if (state == CPS_OWNED)
+ cl_page_state_set(env, cp, CPS_CACHED);
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ unlock_page(vmpage);
+ }
+
+ EXIT;
}
-/**
- * returns true, iff page is owned by the given io.
- */
+/* returns true, iff page is owned by the given io. */
int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
{
- LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
- ENTRY;
- RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
+ struct cl_io *top = cl_io_top((struct cl_io *)io);
+
+ LINVRNT(cl_object_same(pg->cp_obj, top->ci_obj));
+ ENTRY;
+ if (pg->cp_type != CPT_TRANSIENT)
+ RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
+ else
+ RETURN(pg->cp_owner == top);
}
EXPORT_SYMBOL(cl_page_is_owned);
* Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
* into cl_page_state::CPS_OWNED state.
*
- * \pre !cl_page_is_owned(pg, io)
- * \post result == 0 iff cl_page_is_owned(pg, io)
+ * \pre !cl_page_is_owned(cl_page, io)
+ * \post result == 0 iff cl_page_is_owned(cl_page, io)
*
* \retval 0 success
*
- * \retval -ve failure, e.g., page was destroyed (and landed in
+ * \retval -ve failure, e.g., cl_page was destroyed (and landed in
* cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
* or, page was owned by another thread, or in IO.
*
* \see cl_page_disown()
- * \see cl_page_operations::cpo_own()
* \see cl_page_own_try()
* \see cl_page_own
*/
-static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, int nonblock)
+static int __cl_page_own(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *cl_page, int nonblock)
{
- int result;
-
- PINVRNT(env, pg, !cl_page_is_owned(pg, io));
-
- ENTRY;
- pg = cl_page_top(pg);
- io = cl_io_top(io);
-
- if (pg->cp_state == CPS_FREEING) {
- result = -ENOENT;
- } else {
- result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
- (const struct lu_env *,
- const struct cl_page_slice *,
- struct cl_io *, int),
- io, nonblock);
- if (result == 0) {
- PASSERT(env, pg, pg->cp_owner == NULL);
- PASSERT(env, pg, pg->cp_req == NULL);
- pg->cp_owner = io;
- pg->cp_task = current;
- cl_page_owner_set(pg);
- if (pg->cp_state != CPS_FREEING) {
- cl_page_state_set(env, pg, CPS_OWNED);
- } else {
- cl_page_disown0(env, io, pg);
- result = -ENOENT;
- }
- }
- }
- PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
- RETURN(result);
+ struct page *vmpage = cl_page->cp_vmpage;
+ int result;
+
+ ENTRY;
+ PINVRNT(env, cl_page, !cl_page_is_owned(cl_page, io));
+
+ if (cl_page->cp_type != CPT_TRANSIENT &&
+ cl_page->cp_state == CPS_FREEING) {
+ result = -ENOENT;
+ goto out;
+ }
+
+ LASSERT(vmpage != NULL);
+
+ if (cl_page->cp_type == CPT_TRANSIENT) {
+ /* OK */
+ } else if (nonblock) {
+ if (!trylock_page(vmpage)) {
+ result = -EAGAIN;
+ goto out;
+ }
+
+ if (unlikely(PageWriteback(vmpage))) {
+ unlock_page(vmpage);
+ result = -EAGAIN;
+ goto out;
+ }
+ } else {
+ lock_page(vmpage);
+ wait_on_page_writeback(vmpage);
+ }
+
+ PASSERT(env, cl_page, cl_page->cp_owner == NULL);
+ cl_page->cp_owner = cl_io_top(io);
+ cl_page_owner_set(cl_page);
+
+ if (cl_page->cp_type != CPT_TRANSIENT) {
+ if (cl_page->cp_state == CPS_FREEING) {
+ __cl_page_disown(env, cl_page);
+ result = -ENOENT;
+ goto out;
+ }
+
+ cl_page_state_set(env, cl_page, CPS_OWNED);
+ }
+
+ result = 0;
+out:
+ CDEBUG(D_INFO, "res %d\n", result);
+ PINVRNT(env, cl_page, ergo(result == 0,
+ cl_page_invariant(cl_page)));
+ RETURN(result);
}
-/**
- * Own a page, might be blocked.
- *
- * \see cl_page_own0()
- */
+/* Own a page, might be blocked. (see __cl_page_own()) */
int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
{
- return cl_page_own0(env, io, pg, 0);
+ return __cl_page_own(env, io, pg, 0);
}
EXPORT_SYMBOL(cl_page_own);
-/**
- * Nonblock version of cl_page_own().
- *
- * \see cl_page_own0()
- */
+/* Nonblock version of cl_page_own(). (see __cl_page_own()) */
int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg)
+ struct cl_page *pg)
{
- return cl_page_own0(env, io, pg, 1);
+ return __cl_page_own(env, io, pg, 1);
}
EXPORT_SYMBOL(cl_page_own_try);
*
* Called when page is already locked by the hosting VM.
*
- * \pre !cl_page_is_owned(pg, io)
- * \post cl_page_is_owned(pg, io)
- *
- * \see cl_page_operations::cpo_assume()
+ * \pre !cl_page_is_owned(cp, io)
+ * \post cl_page_is_owned(cp, io)
*/
void cl_page_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
+ struct cl_io *io, struct cl_page *cp)
{
- PASSERT(env, pg, pg->cp_state < CPS_OWNED);
- PASSERT(env, pg, pg->cp_owner == NULL);
- PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- ENTRY;
- pg = cl_page_top(pg);
- io = cl_io_top(io);
-
- cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
- pg->cp_owner = io;
- pg->cp_task = current;
- cl_page_owner_set(pg);
- cl_page_state_set(env, pg, CPS_OWNED);
- EXIT;
+ struct page *vmpage;
+
+ ENTRY;
+ PINVRNT(env, cp, cl_object_same(cp->cp_obj, cl_io_top(io)->ci_obj));
+
+ if (cp->cp_type == CPT_CACHEABLE) {
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ wait_on_page_writeback(vmpage);
+ }
+
+ PASSERT(env, cp, cp->cp_owner == NULL);
+ cp->cp_owner = cl_io_top(io);
+ cl_page_owner_set(cp);
+ if (cp->cp_type != CPT_TRANSIENT)
+ cl_page_state_set(env, cp, CPS_OWNED);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_assume);
/**
* Releases page ownership without unlocking the page.
*
- * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
- * underlying VM page (as VM is supposed to do this itself).
- *
- * \pre cl_page_is_owned(pg, io)
- * \post !cl_page_is_owned(pg, io)
+ * Moves cl_page into cl_page_state::CPS_CACHED without releasing a lock
+ * on the underlying VM page (as VM is supposed to do this itself).
*
- * \see cl_page_assume()
+ * \pre cl_page_is_owned(cp, io)
+ * \post !cl_page_is_owned(cp, io)
*/
void cl_page_unassume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
+ struct cl_io *io, struct cl_page *cp)
{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- ENTRY;
- pg = cl_page_top(pg);
- io = cl_io_top(io);
- cl_page_owner_clear(pg);
- cl_page_state_set(env, pg, CPS_CACHED);
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
- EXIT;
+ struct page *vmpage;
+
+ ENTRY;
+ PINVRNT(env, cp, cl_page_is_owned(cp, io));
+ PINVRNT(env, cp, cl_page_invariant(cp));
+
+ cl_page_owner_clear(cp);
+ if (cp->cp_type != CPT_TRANSIENT)
+ cl_page_state_set(env, cp, CPS_CACHED);
+
+ if (cp->cp_type == CPT_CACHEABLE) {
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ }
+
+ EXIT;
}
EXPORT_SYMBOL(cl_page_unassume);
* \post !cl_page_is_owned(pg, io)
*
* \see cl_page_own()
- * \see cl_page_operations::cpo_disown()
*/
void cl_page_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
+ struct cl_io *io, struct cl_page *pg)
{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ if (pg->cp_type != CPT_TRANSIENT) {
+ PINVRNT(env, pg, cl_page_is_owned(pg, cl_io_top(io)) ||
+ pg->cp_state == CPS_FREEING);
+ }
- ENTRY;
- pg = cl_page_top(pg);
- io = cl_io_top(io);
- cl_page_disown0(env, io, pg);
- EXIT;
+ __cl_page_disown(env, pg);
}
EXPORT_SYMBOL(cl_page_disown);
/**
- * Called when page is to be removed from the object, e.g., as a result of
- * truncate.
+ * Called when cl_page is to be removed from the object, e.g.,
+ * as a result of truncate.
*
* Calls cl_page_operations::cpo_discard() top-to-bottom.
*
- * \pre cl_page_is_owned(pg, io)
+ * \pre cl_page_is_owned(cl_page, io)
*
* \see cl_page_operations::cpo_discard()
*/
void cl_page_discard(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
+ struct cl_io *io, struct cl_page *cp)
{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
+ struct page *vmpage;
+ const struct cl_page_slice *slice;
+ int i;
+
+ PINVRNT(env, cp, cl_page_is_owned(cp, io));
+ PINVRNT(env, cp, cl_page_invariant(cp));
+
+ cl_page_slice_for_each(cp, slice, i) {
+ if (slice->cpl_ops->cpo_discard != NULL)
+ (*slice->cpl_ops->cpo_discard)(env, slice, io);
+ }
+
+ if (cp->cp_type == CPT_CACHEABLE) {
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ generic_error_remove_folio(vmpage->mapping, page_folio(vmpage));
+ } else {
+ cl_page_delete(env, cp);
+ }
}
EXPORT_SYMBOL(cl_page_discard);
/**
* Version of cl_page_delete() that can be called for not fully constructed
- * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
- * path. Doesn't check page invariant.
+ * cl_pages, e.g. in an error handling cl_page_find()->__cl_page_delete()
+ * path. Doesn't check cl_page invariant.
*/
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix)
+static void __cl_page_delete(const struct lu_env *env, struct cl_page *cp)
{
- struct cl_page *tmp = pg;
- ENTRY;
-
- PASSERT(env, pg, pg == cl_page_top(pg));
- PASSERT(env, pg, pg->cp_state != CPS_FREEING);
-
- /*
- * Severe all ways to obtain new pointers to @pg.
- */
- cl_page_owner_clear(pg);
-
- /*
- * unexport the page firstly before freeing it so that
- * the page content is considered to be invalid.
- * We have to do this because a CPS_FREEING cl_page may
- * be NOT under the protection of a cl_lock.
- * Afterwards, if this page is found by other threads, then this
- * page will be forced to reread.
- */
- cl_page_export(env, pg, 0);
- cl_page_state_set0(env, pg, CPS_FREEING);
-
- if (tmp->cp_type == CPT_CACHEABLE) {
- if (!radix)
- /* !radix means that @pg is not yet in the radix tree,
- * skip removing it.
- */
- tmp = pg->cp_child;
- for (; tmp != NULL; tmp = tmp->cp_child) {
- void *value;
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(tmp->cp_obj);
- cfs_spin_lock(&hdr->coh_page_guard);
- value = radix_tree_delete(&hdr->coh_tree,
- tmp->cp_index);
- PASSERT(env, tmp, value == tmp);
- PASSERT(env, tmp, hdr->coh_pages > 0);
- hdr->coh_pages--;
- cfs_spin_unlock(&hdr->coh_page_guard);
- }
- }
-
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
- (const struct lu_env *, const struct cl_page_slice *));
- EXIT;
+ const struct cl_page_slice *slice;
+ int i;
+
+ ENTRY;
+ if (cp->cp_type != CPT_TRANSIENT)
+ PASSERT(env, cp, cp->cp_state != CPS_FREEING);
+
+ /* Severe all ways to obtain new pointers to @pg. */
+ cl_page_owner_clear(cp);
+ if (cp->cp_type != CPT_TRANSIENT)
+ __cl_page_state_set(env, cp, CPS_FREEING);
+
+ cl_page_slice_for_each_reverse(cp, slice, i) {
+ if (slice->cpl_ops->cpo_delete != NULL)
+ (*slice->cpl_ops->cpo_delete)(env, slice);
+ }
+
+ EXIT;
}
/**
* Once page reaches cl_page_state::CPS_FREEING, all remaining references will
* drain after some time, at which point page will be recycled.
*
- * \pre pg == cl_page_top(pg)
* \pre VM page is locked
* \post pg->cp_state == CPS_FREEING
*
*/
void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
{
- PINVRNT(env, pg, cl_page_invariant(pg));
- ENTRY;
- cl_page_delete0(env, pg, 1);
- EXIT;
+ PINVRNT(env, pg, cl_page_invariant(pg));
+ ENTRY;
+ __cl_page_delete(env, pg);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_delete);
-/**
- * Unmaps page from user virtual memory.
- *
- * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to unmap page from user space
- * virtual memory.
- *
- * \see cl_page_operations::cpo_unmap()
- */
-int cl_page_unmap(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
+void cl_page_touch(const struct lu_env *env,
+ const struct cl_page *cl_page, size_t to)
{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
+ const struct cl_page_slice *slice;
+ int i;
- return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
-}
-EXPORT_SYMBOL(cl_page_unmap);
+ ENTRY;
-/**
- * Marks page up-to-date.
- *
- * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to mark/clear page as up-to-date
- * by the \a uptodate argument.
- *
- * \see cl_page_operations::cpo_export()
- */
-void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
-{
- PINVRNT(env, pg, cl_page_invariant(pg));
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
- (const struct lu_env *,
- const struct cl_page_slice *, int), uptodate);
-}
-EXPORT_SYMBOL(cl_page_export);
+ cl_page_slice_for_each(cl_page, slice, i) {
+ if (slice->cpl_ops->cpo_page_touch != NULL)
+ (*slice->cpl_ops->cpo_page_touch)(env, slice, to);
+ }
-/**
- * Returns true, iff \a pg is VM locked in a suitable sense by the calling
- * thread.
- */
-int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
-{
- int result;
- const struct cl_page_slice *slice;
-
- ENTRY;
- pg = cl_page_top_trusted((struct cl_page *)pg);
- slice = container_of(pg->cp_layers.next,
- const struct cl_page_slice, cpl_linkage);
- PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
- /*
- * Call ->cpo_is_vmlocked() directly instead of going through
- * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
- * cl_page_invariant().
- */
- result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
- PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
- RETURN(result == -EBUSY);
+ EXIT;
}
-EXPORT_SYMBOL(cl_page_is_vmlocked);
+EXPORT_SYMBOL(cl_page_touch);
static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
{
- ENTRY;
- RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
+ ENTRY;
+ RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
}
static void cl_page_io_start(const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt)
+ struct cl_page *pg, enum cl_req_type crt)
{
- /*
- * Page is queued for IO, change its state.
- */
- ENTRY;
- cl_page_owner_clear(pg);
- cl_page_state_set(env, pg, cl_req_type_state(crt));
- EXIT;
+ /* Page is queued for IO, change its state. */
+ ENTRY;
+ cl_page_owner_clear(pg);
+ if (pg->cp_type != CPT_TRANSIENT)
+ cl_page_state_set(env, pg, cl_req_type_state(crt));
+ EXIT;
}
/**
- * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
- * called top-to-bottom. Every layer either agrees to submit this page (by
- * returning 0), or requests to omit this page (by returning -EALREADY). Layer
- * handling interactions with the VM also has to inform VM that page is under
- * transfer now.
+ * Prepares page for immediate transfer. Return -EALREADY if this page
+ * should be omitted from transfer.
*/
int cl_page_prep(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt)
+ struct cl_page *cp, enum cl_req_type crt)
{
- int result;
-
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
- PINVRNT(env, pg, crt < CRT_NR);
-
- /*
- * XXX this has to be called bottom-to-top, so that llite can set up
- * PG_writeback without risking other layers deciding to skip this
- * page.
- */
- result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
- if (result == 0)
- cl_page_io_start(env, pg, crt);
-
- KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
- equi(result == 0,
- PageWriteback(cl_page_vmpage(env, pg)))));
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- return result;
+ struct page *vmpage = cp->cp_vmpage;
+ int rc;
+
+ PASSERT(env, cp, crt < CRT_NR);
+ PINVRNT(env, cp, cl_page_is_owned(cp, io));
+ PINVRNT(env, cp, cl_page_invariant(cp));
+
+ if (cp->cp_type == CPT_TRANSIENT) {
+ /* Nothing to do. */
+ } else if (crt == CRT_READ) {
+ if (PageUptodate(vmpage))
+ GOTO(out, rc = -EALREADY);
+ } else {
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageDirty(vmpage));
+
+ /* ll_writepage path is not a sync write, so need to
+ * set page writeback flag
+ */
+ if (cp->cp_sync_io == NULL)
+ set_page_writeback(vmpage);
+ }
+
+ cl_page_io_start(env, cp, crt);
+ rc = 0;
+out:
+ CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
+
+ return rc;
}
EXPORT_SYMBOL(cl_page_prep);
* uppermost layer (llite), responsible for the VFS/VM interaction runs last
* and can release locks safely.
*
- * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
- * \post pg->cp_state == CPS_CACHED
+ * \pre cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
+ * \post cl_page->cl_page_state == CPS_CACHED
*
* \see cl_page_operations::cpo_completion()
*/
void cl_page_completion(const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt, int ioret)
+ struct cl_page *cl_page, enum cl_req_type crt,
+ int ioret)
{
- struct cl_sync_io *anchor = pg->cp_sync_io;
-
- PASSERT(env, pg, crt < CRT_NR);
- /* cl_page::cp_req already cleared by the caller (osc_completion()) */
- PASSERT(env, pg, pg->cp_req == NULL);
- PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- ENTRY;
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
- if (crt == CRT_READ && ioret == 0) {
- PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
- pg->cp_flags |= CPF_READ_COMPLETED;
- }
-
- cl_page_state_set(env, pg, CPS_CACHED);
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
- (const struct lu_env *,
- const struct cl_page_slice *, int), ioret);
- if (anchor) {
- LASSERT(pg->cp_sync_io == anchor);
- pg->cp_sync_io = NULL;
- cl_sync_io_note(anchor, ioret);
- }
-
- /* Don't assert the page writeback bit here because the lustre file
- * may be as a backend of swap space. in this case, the page writeback
- * is set by VM, and obvious we shouldn't clear it at all. Fortunately
- * this type of pages are all TRANSIENT pages. */
- KLASSERT(ergo(pg->cp_type == CPT_CACHEABLE,
- !PageWriteback(cl_page_vmpage(env, pg))));
- EXIT;
+ const struct cl_page_slice *slice;
+ struct cl_sync_io *anchor = cl_page->cp_sync_io;
+ int i;
+
+ ENTRY;
+ PASSERT(env, cl_page, crt < CRT_NR);
+ if (cl_page->cp_type != CPT_TRANSIENT)
+ PASSERT(env, cl_page,
+ cl_page->cp_state == cl_req_type_state(crt));
+
+ CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, ioret);
+ if (cl_page->cp_type != CPT_TRANSIENT)
+ cl_page_state_set(env, cl_page, CPS_CACHED);
+ if (crt >= CRT_NR)
+ return;
+
+ cl_page_slice_for_each_reverse(cl_page, slice, i) {
+ if (slice->cpl_ops->io[crt].cpo_completion != NULL)
+ (*slice->cpl_ops->io[crt].cpo_completion)(env, slice,
+ ioret);
+ }
+
+ if (anchor != NULL) {
+ LASSERT(cl_page->cp_sync_io == anchor);
+ cl_page->cp_sync_io = NULL;
+ cl_sync_io_note(env, anchor, ioret);
+ }
+ EXIT;
}
EXPORT_SYMBOL(cl_page_completion);
* Notify layers that transfer formation engine decided to yank this page from
* the cache and to make it a part of a transfer.
*
- * \pre pg->cp_state == CPS_CACHED
- * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
- *
- * \see cl_page_operations::cpo_make_ready()
+ * \pre cl_page->cp_state == CPS_CACHED
+ * \post cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
*/
-int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
- enum cl_req_type crt)
+int cl_page_make_ready(const struct lu_env *env, struct cl_page *cp,
+ enum cl_req_type crt)
{
- int result;
-
- PINVRNT(env, pg, crt < CRT_NR);
-
- ENTRY;
- result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
- (const struct lu_env *,
- const struct cl_page_slice *));
- if (result == 0) {
- PASSERT(env, pg, pg->cp_state == CPS_CACHED);
- cl_page_io_start(env, pg, crt);
- }
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- RETURN(result);
+ struct page *vmpage = cp->cp_vmpage;
+ bool unlock = false;
+ int rc;
+
+ ENTRY;
+ PASSERT(env, cp, crt == CRT_WRITE);
+
+ if (cp->cp_type == CPT_TRANSIENT)
+ GOTO(out, rc = 0);
+
+ lock_page(vmpage);
+ PASSERT(env, cp, PageUptodate(vmpage));
+ unlock = true;
+
+ if (clear_page_dirty_for_io(vmpage)) {
+ LASSERT(cp->cp_state == CPS_CACHED);
+ /* This actually clears the dirty bit in the radix tree */
+ set_page_writeback(vmpage);
+ CL_PAGE_HEADER(D_PAGE, env, cp, "readied\n");
+ rc = 0;
+ } else if (cp->cp_state == CPS_PAGEOUT) {
+ /* is it possible for osc_flush_async_page()
+ * to already make it ready?
+ */
+ rc = -EALREADY;
+ } else {
+ CL_PAGE_DEBUG(D_ERROR, env, cp,
+ "unexpecting page state %d\n",
+ cp->cp_state);
+ LBUG();
+ }
+
+out:
+ if (rc == 0) {
+ PASSERT(env, cp, cp->cp_state == CPS_CACHED);
+ cl_page_io_start(env, cp, crt);
+ }
+
+ if (unlock)
+ unlock_page(vmpage);
+
+ CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
+
+ return rc;
}
EXPORT_SYMBOL(cl_page_make_ready);
/**
- * Notify layers that high level io decided to place this page into a cache
- * for future transfer.
+ * Called if a page is being written back by kernel's intention.
*
- * The layer implementing transfer engine (osc) has to register this page in
- * its queues.
+ * \pre cl_page_is_owned(cl_page, io)
+ * \post ergo(result == 0, cl_page->cp_state == CPS_PAGEOUT)
*
- * \pre cl_page_is_owned(pg, io)
- * \post ergo(result == 0,
- * pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT)
- *
- * \see cl_page_operations::cpo_cache_add()
+ * \see cl_page_operations::cpo_flush()
*/
-int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt)
+int cl_page_flush(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *cl_page)
{
- int result;
-
- PINVRNT(env, pg, crt < CRT_NR);
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- ENTRY;
- result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_cache_add));
- if (result == 0) {
- cl_page_owner_clear(pg);
- cl_page_state_set(env, pg, CPS_CACHED);
- }
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- RETURN(result);
+ const struct cl_page_slice *slice;
+ int result = 0;
+ int i;
+
+ ENTRY;
+ PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
+ PINVRNT(env, cl_page, cl_page_invariant(cl_page));
+
+ cl_page_slice_for_each(cl_page, slice, i) {
+ if (slice->cpl_ops->cpo_flush != NULL)
+ result = (*slice->cpl_ops->cpo_flush)(env, slice, io);
+ if (result != 0)
+ break;
+ }
+ if (result > 0)
+ result = 0;
+
+ CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d\n", result);
+ RETURN(result);
}
-EXPORT_SYMBOL(cl_page_cache_add);
-
-/**
- * Checks whether page is protected by any extent lock is at least required
- * mode.
- *
- * \return the same as in cl_page_operations::cpo_is_under_lock() method.
- * \see cl_page_operations::cpo_is_under_lock()
- */
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
-{
- int rc;
-
- PINVRNT(env, page, cl_page_invariant(page));
-
- ENTRY;
- rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
- PASSERT(env, page, rc != 0);
- RETURN(rc);
-}
-EXPORT_SYMBOL(cl_page_is_under_lock);
-
-/**
- * Purges all cached pages belonging to the object \a obj.
- */
-int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
-{
- struct cl_thread_info *info;
- struct cl_object *obj = cl_object_top(clobj);
- struct cl_io *io;
- struct cl_page_list *plist;
- int result;
-
- ENTRY;
- info = cl_env_info(env);
- plist = &info->clt_list;
- io = &info->clt_io;
-
- /*
- * initialize the io. This is ugly since we never do IO in this
- * function, we just make cl_page_list functions happy. -jay
- */
- io->ci_obj = obj;
- result = cl_io_init(env, io, CIT_MISC, obj);
- if (result != 0) {
- cl_io_fini(env, io);
- RETURN(io->ci_result);
- }
-
- do {
- cl_page_list_init(plist);
- result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
- plist);
- /*
- * Since we're purging the pages of an object, we don't care
- * the possible outcomes of the following functions.
- */
- cl_page_list_unmap(env, io, plist);
- cl_page_list_discard(env, io, plist);
- cl_page_list_disown(env, io, plist);
- cl_page_list_fini(env, plist);
-
- if (result == CLP_GANG_RESCHED)
- cfs_cond_resched();
- } while (result != CLP_GANG_OKAY);
-
- cl_io_fini(env, io);
- RETURN(result);
-}
-EXPORT_SYMBOL(cl_pages_prune);
+EXPORT_SYMBOL(cl_page_flush);
/**
* Tells transfer engine that only part of a page is to be transmitted.
*
* \see cl_page_operations::cpo_clip()
*/
-void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
- int from, int to)
+void cl_page_clip(const struct lu_env *env, struct cl_page *cl_page,
+ int from, int to)
{
- PINVRNT(env, pg, cl_page_invariant(pg));
+ const struct cl_page_slice *slice;
+ int i;
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
- (const struct lu_env *,
- const struct cl_page_slice *,int, int),
- from, to);
+ PINVRNT(env, cl_page, cl_page_invariant(cl_page));
+
+ CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", from, to);
+ cl_page_slice_for_each(cl_page, slice, i) {
+ if (slice->cpl_ops->cpo_clip != NULL)
+ (*slice->cpl_ops->cpo_clip)(env, slice, from, to);
+ }
}
EXPORT_SYMBOL(cl_page_clip);
-/**
- * Prints human readable representation of \a pg to the \a f.
- */
+/* Prints human readable representation of \a pg to the \a f. */
void cl_page_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_page *pg)
+ lu_printer_t printer, const struct cl_page *pg)
{
- (*printer)(env, cookie,
- "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
- pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
- pg->cp_index, pg->cp_parent, pg->cp_child,
- pg->cp_state, pg->cp_error, pg->cp_type,
- pg->cp_owner, pg->cp_req, pg->cp_flags);
+ (*printer)(env, cookie,
+ "page@%p[%d %p %d %d %p]\n",
+ pg, refcount_read(&pg->cp_ref), pg->cp_obj,
+ pg->cp_state, pg->cp_type,
+ pg->cp_owner);
}
EXPORT_SYMBOL(cl_page_header_print);
-/**
- * Prints human readable representation of \a pg to the \a f.
- */
+/* Prints human readable representation of \a cl_page to the \a f. */
void cl_page_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_page *pg)
+ lu_printer_t printer, const struct cl_page *cp)
{
- struct cl_page *scan;
-
- for (scan = cl_page_top((struct cl_page *)pg);
- scan != NULL; scan = scan->cp_child)
- cl_page_header_print(env, cookie, printer, scan);
- CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
- (const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t p), cookie, printer);
- (*printer)(env, cookie, "end page@%p\n", pg);
+ struct page *vmpage = cp->cp_vmpage;
+ const struct cl_page_slice *slice;
+ int result = 0;
+ int i;
+
+ cl_page_header_print(env, cookie, printer, cp);
+
+ (*printer)(env, cookie, "vmpage @%p", vmpage);
+
+ if (vmpage != NULL) {
+ (*printer)(env, cookie, " %lx %d:%d %lx %lu %slru",
+ (long)vmpage->flags, page_count(vmpage),
+ page_mapcount(vmpage), vmpage->private,
+ page_index(vmpage),
+ list_empty(&vmpage->lru) ? "not-" : "");
+ }
+
+ (*printer)(env, cookie, "\n");
+
+ cl_page_slice_for_each(cp, slice, i) {
+ if (slice->cpl_ops->cpo_print != NULL)
+ result = (*slice->cpl_ops->cpo_print)(env, slice,
+ cookie, printer);
+ if (result != 0)
+ break;
+ }
+
+ (*printer)(env, cookie, "end page@%p\n", cp);
}
EXPORT_SYMBOL(cl_page_print);
/**
- * Cancel a page which is still in a transfer.
- */
-int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
-{
- return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
- (const struct lu_env *,
- const struct cl_page_slice *));
-}
-EXPORT_SYMBOL(cl_page_cancel);
-
-/**
- * Converts a byte offset within object \a obj into a page index.
- */
-loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
-{
- /*
- * XXX for now.
- */
- return (loff_t)idx << CFS_PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_offset);
-
-/**
- * Converts a page index into a byte offset within object \a obj.
- */
-pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
-{
- /*
- * XXX for now.
- */
- return offset >> CFS_PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_index);
-
-int cl_page_size(const struct cl_object *obj)
-{
- return 1 << CFS_PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_page_size);
-
-/**
* Adds page slice to the compound page.
*
* This is called by cl_object_operations::coo_page_init() methods to add a
*
* \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
*/
-void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
- const struct cl_page_operations *ops)
+void cl_page_slice_add(struct cl_page *cl_page, struct cl_page_slice *slice,
+ struct cl_object *obj,
+ const struct cl_page_operations *ops)
{
- ENTRY;
- cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
- slice->cpl_obj = obj;
- slice->cpl_ops = ops;
- slice->cpl_page = page;
- EXIT;
+ unsigned int offset = (char *)slice -
+ ((char *)cl_page + sizeof(*cl_page));
+
+ ENTRY;
+ LASSERT(cl_page->cp_layer_count < CP_MAX_LAYER);
+ LASSERT(offset < (1 << sizeof(cl_page->cp_layer_offset[0]) * 8));
+ cl_page->cp_layer_offset[cl_page->cp_layer_count++] = offset;
+ slice->cpl_ops = ops;
+ slice->cpl_page = cl_page;
+
+ EXIT;
}
EXPORT_SYMBOL(cl_page_slice_add);
-int cl_page_init(void)
+/* Allocate and initialize cl_cache, called by ll_init_sbi(). */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
+{
+ struct cl_client_cache *cache = NULL;
+
+ ENTRY;
+ OBD_ALLOC(cache, sizeof(*cache));
+ if (cache == NULL)
+ RETURN(NULL);
+
+ /* Initialize cache data */
+ refcount_set(&cache->ccc_users, 1);
+ cache->ccc_lru_max = lru_page_max;
+ atomic_long_set(&cache->ccc_lru_left, lru_page_max);
+ spin_lock_init(&cache->ccc_lru_lock);
+ INIT_LIST_HEAD(&cache->ccc_lru);
+
+ cache->ccc_unstable_check = 1;
+ atomic_long_set(&cache->ccc_unstable_nr, 0);
+ mutex_init(&cache->ccc_max_cache_mb_lock);
+
+ RETURN(cache);
+}
+EXPORT_SYMBOL(cl_cache_init);
+
+/* Increase cl_cache refcount */
+void cl_cache_incref(struct cl_client_cache *cache)
{
- return lu_kmem_init(cl_page_caches);
+ refcount_inc(&cache->ccc_users);
}
+EXPORT_SYMBOL(cl_cache_incref);
-void cl_page_fini(void)
+/**
+ * Decrease cl_cache refcount and free the cache if refcount=0.
+ * Since llite, lov and osc all hold cl_cache refcount,
+ * the free will not cause race. (LU-6173)
+ */
+void cl_cache_decref(struct cl_client_cache *cache)
{
- lu_kmem_fini(cl_page_caches);
+ if (refcount_dec_and_test(&cache->ccc_users))
+ OBD_FREE(cache, sizeof(*cache));
}
+EXPORT_SYMBOL(cl_cache_decref);