4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 #include <libcfs/libcfs.h>
44 #include <obd_class.h>
45 #include <obd_support.h>
46 #include <libcfs/list.h>
48 #include <cl_object.h>
49 #include "cl_internal.h"
51 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
54 static cfs_mem_cache_t *cl_page_kmem = NULL;
56 static struct lu_kmem_descr cl_page_caches[] = {
58 .ckd_cache = &cl_page_kmem,
59 .ckd_name = "cl_page_kmem",
60 .ckd_size = sizeof (struct cl_page)
68 # define PASSERT(env, page, expr) \
70 if (unlikely(!(expr))) { \
71 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
75 #else /* !LIBCFS_DEBUG */
76 # define PASSERT(env, page, exp) \
77 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
78 #endif /* !LIBCFS_DEBUG */
80 #ifdef INVARIANT_CHECK
81 # define PINVRNT(env, page, expr) \
83 if (unlikely(!(expr))) { \
84 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
88 #else /* !INVARIANT_CHECK */
89 # define PINVRNT(env, page, exp) \
90 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
91 #endif /* !INVARIANT_CHECK */
93 /* Disable page statistic by default due to huge performance penalty. */
94 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
95 #define CS_PAGE_INC(o, item) \
96 cfs_atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
97 #define CS_PAGE_DEC(o, item) \
98 cfs_atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
99 #define CS_PAGESTATE_INC(o, state) \
100 cfs_atomic_inc(&cl_object_site(o)->cs_pages_state[state])
101 #define CS_PAGESTATE_DEC(o, state) \
102 cfs_atomic_dec(&cl_object_site(o)->cs_pages_state[state])
104 #define CS_PAGE_INC(o, item)
105 #define CS_PAGE_DEC(o, item)
106 #define CS_PAGESTATE_INC(o, state)
107 #define CS_PAGESTATE_DEC(o, state)
111 * Internal version of cl_page_top, it should be called if the page is
112 * known to be not freed, says with page referenced, or radix tree lock held,
115 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
117 while (page->cp_parent != NULL)
118 page = page->cp_parent;
123 * Internal version of cl_page_get().
125 * This function can be used to obtain initial reference to previously
126 * unreferenced cached object. It can be called only if concurrent page
127 * reclamation is somehow prevented, e.g., by locking page radix-tree
128 * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
129 * associated with \a page.
131 * Use with care! Not exported.
133 static void cl_page_get_trust(struct cl_page *page)
135 LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
136 cfs_atomic_inc(&page->cp_ref);
140 * Returns a slice within a page, corresponding to the given layer in the
145 static const struct cl_page_slice *
146 cl_page_at_trusted(const struct cl_page *page,
147 const struct lu_device_type *dtype)
149 const struct cl_page_slice *slice;
152 page = cl_page_top_trusted((struct cl_page *)page);
154 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
155 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
158 page = page->cp_child;
159 } while (page != NULL);
164 * Returns a page with given index in the given object, or NULL if no page is
165 * found. Acquires a reference on \a page.
167 * Locking: called under cl_object_header::coh_page_guard spin-lock.
169 struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
171 struct cl_page *page;
173 LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
175 page = radix_tree_lookup(&hdr->coh_tree, index);
177 cl_page_get_trust(page);
180 EXPORT_SYMBOL(cl_page_lookup);
183 * Returns a list of pages by a given [start, end] of \a obj.
185 * \param resched If not NULL, then we give up before hogging CPU for too
186 * long and set *resched = 1, in that case caller should implement a retry
189 * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
190 * crucial in the face of [offset, EOF] locks.
192 * Return at least one page in @queue unless there is no covered page.
194 int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
195 struct cl_io *io, pgoff_t start, pgoff_t end,
196 cl_page_gang_cb_t cb, void *cbdata)
198 struct cl_object_header *hdr;
199 struct cl_page *page;
200 struct cl_page **pvec;
201 const struct cl_page_slice *slice;
202 const struct lu_device_type *dtype;
207 int res = CLP_GANG_OKAY;
212 hdr = cl_object_header(obj);
213 pvec = cl_env_info(env)->clt_pvec;
214 dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
215 spin_lock(&hdr->coh_page_guard);
216 while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
217 idx, CLT_PVEC_SIZE)) > 0) {
218 int end_of_region = 0;
219 idx = pvec[nr - 1]->cp_index + 1;
220 for (i = 0, j = 0; i < nr; ++i) {
224 LASSERT(page->cp_type == CPT_CACHEABLE);
225 if (page->cp_index > end) {
229 if (page->cp_state == CPS_FREEING)
232 slice = cl_page_at_trusted(page, dtype);
234 * Pages for lsm-less file has no underneath sub-page
235 * for osc, in case of ...
237 PASSERT(env, page, slice != NULL);
239 page = slice->cpl_page;
241 * Can safely call cl_page_get_trust() under
242 * radix-tree spin-lock.
244 * XXX not true, because @page is from object another
245 * than @hdr and protected by different tree lock.
247 cl_page_get_trust(page);
248 lu_ref_add_atomic(&page->cp_reference,
249 "gang_lookup", cfs_current());
254 * Here a delicate locking dance is performed. Current thread
255 * holds a reference to a page, but has to own it before it
256 * can be placed into queue. Owning implies waiting, so
257 * radix-tree lock is to be released. After a wait one has to
258 * check that pages weren't truncated (cl_page_own() returns
259 * error in the latter case).
261 spin_unlock(&hdr->coh_page_guard);
264 for (i = 0; i < j; ++i) {
266 if (res == CLP_GANG_OKAY)
267 res = (*cb)(env, io, page, cbdata);
268 lu_ref_del(&page->cp_reference,
269 "gang_lookup", cfs_current());
270 cl_page_put(env, page);
272 if (nr < CLT_PVEC_SIZE || end_of_region)
275 if (res == CLP_GANG_OKAY && cfs_need_resched())
276 res = CLP_GANG_RESCHED;
277 if (res != CLP_GANG_OKAY)
280 spin_lock(&hdr->coh_page_guard);
284 spin_unlock(&hdr->coh_page_guard);
287 EXPORT_SYMBOL(cl_page_gang_lookup);
289 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
291 struct cl_object *obj = page->cp_obj;
293 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
294 PASSERT(env, page, page->cp_owner == NULL);
295 PASSERT(env, page, page->cp_req == NULL);
296 PASSERT(env, page, page->cp_parent == NULL);
297 PASSERT(env, page, page->cp_state == CPS_FREEING);
301 while (!cfs_list_empty(&page->cp_layers)) {
302 struct cl_page_slice *slice;
304 slice = cfs_list_entry(page->cp_layers.next,
305 struct cl_page_slice, cpl_linkage);
306 cfs_list_del_init(page->cp_layers.next);
307 slice->cpl_ops->cpo_fini(env, slice);
309 CS_PAGE_DEC(obj, total);
310 CS_PAGESTATE_DEC(obj, page->cp_state);
311 lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
312 cl_object_put(env, obj);
313 lu_ref_fini(&page->cp_reference);
314 OBD_SLAB_FREE_PTR(page, cl_page_kmem);
319 * Helper function updating page state. This is the only place in the code
320 * where cl_page::cp_state field is mutated.
322 static inline void cl_page_state_set_trust(struct cl_page *page,
323 enum cl_page_state state)
326 *(enum cl_page_state *)&page->cp_state = state;
329 static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
330 pgoff_t ind, struct page *vmpage,
331 enum cl_page_type type, struct cl_page **out)
333 struct cl_page *page;
334 struct cl_page *err = NULL;
335 struct lu_object_header *head;
340 OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
342 cfs_atomic_set(&page->cp_ref, 1);
343 if (type == CPT_CACHEABLE) /* for radix tree */
344 cfs_atomic_inc(&page->cp_ref);
347 page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
349 page->cp_index = ind;
350 cl_page_state_set_trust(page, CPS_CACHED);
351 page->cp_type = type;
352 CFS_INIT_LIST_HEAD(&page->cp_layers);
353 CFS_INIT_LIST_HEAD(&page->cp_batch);
354 CFS_INIT_LIST_HEAD(&page->cp_flight);
355 mutex_init(&page->cp_mutex);
356 lu_ref_init(&page->cp_reference);
357 head = o->co_lu.lo_header;
358 cfs_list_for_each_entry(o, &head->loh_layers,
360 if (o->co_ops->coo_page_init != NULL) {
361 err = o->co_ops->coo_page_init(env, o,
364 cl_page_delete0(env, page, 0);
365 cl_page_free(env, page);
372 CS_PAGE_INC(o, total);
373 CS_PAGE_INC(o, create);
374 CS_PAGESTATE_DEC(o, CPS_CACHED);
378 page = ERR_PTR(-ENOMEM);
384 * Returns a cl_page with index \a idx at the object \a o, and associated with
385 * the VM page \a vmpage.
387 * This is the main entry point into the cl_page caching interface. First, a
388 * cache (implemented as a per-object radix tree) is consulted. If page is
389 * found there, it is returned immediately. Otherwise new page is allocated
390 * and returned. In any case, additional reference to page is acquired.
392 * \see cl_object_find(), cl_lock_find()
394 static struct cl_page *cl_page_find0(const struct lu_env *env,
396 pgoff_t idx, struct page *vmpage,
397 enum cl_page_type type,
398 struct cl_page *parent)
400 struct cl_page *page = NULL;
401 struct cl_page *ghost = NULL;
402 struct cl_object_header *hdr;
405 LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
410 hdr = cl_object_header(o);
411 CS_PAGE_INC(o, lookup);
413 CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
414 idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
416 if (type == CPT_CACHEABLE) {
417 /* vmpage lock is used to protect the child/parent
419 KLASSERT(PageLocked(vmpage));
421 * cl_vmpage_page() can be called here without any locks as
423 * - "vmpage" is locked (which prevents ->private from
424 * concurrent updates), and
426 * - "o" cannot be destroyed while current thread holds a
429 page = cl_vmpage_page(vmpage, o);
432 cl_page_vmpage(env, page) == vmpage &&
433 (void *)radix_tree_lookup(&hdr->coh_tree,
442 /* allocate and initialize cl_page */
443 err = cl_page_alloc(env, o, idx, vmpage, type, &page);
447 if (type == CPT_TRANSIENT) {
449 LASSERT(page->cp_parent == NULL);
450 page->cp_parent = parent;
451 parent->cp_child = page;
457 * XXX optimization: use radix_tree_preload() here, and change tree
458 * gfp mask to GFP_KERNEL in cl_object_header_init().
460 spin_lock(&hdr->coh_page_guard);
461 err = radix_tree_insert(&hdr->coh_tree, idx, page);
465 * Noted by Jay: a lock on \a vmpage protects cl_page_find()
466 * from this race, but
468 * 0. it's better to have cl_page interface "locally
469 * consistent" so that its correctness can be reasoned
470 * about without appealing to the (obscure world of) VM
473 * 1. handling this race allows ->coh_tree to remain
474 * consistent even when VM locking is somehow busted,
475 * which is very useful during diagnosing and debugging.
478 CL_PAGE_DEBUG(D_ERROR, env, ghost,
479 "fail to insert into radix tree: %d\n", err);
482 LASSERT(page->cp_parent == NULL);
483 page->cp_parent = parent;
484 parent->cp_child = page;
488 spin_unlock(&hdr->coh_page_guard);
490 if (unlikely(ghost != NULL)) {
491 cl_page_delete0(env, ghost, 0);
492 cl_page_free(env, ghost);
497 struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
498 pgoff_t idx, struct page *vmpage,
499 enum cl_page_type type)
501 return cl_page_find0(env, o, idx, vmpage, type, NULL);
503 EXPORT_SYMBOL(cl_page_find);
506 struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
507 pgoff_t idx, struct page *vmpage,
508 struct cl_page *parent)
510 return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
512 EXPORT_SYMBOL(cl_page_find_sub);
514 static inline int cl_page_invariant(const struct cl_page *pg)
516 struct cl_object_header *header;
517 struct cl_page *parent;
518 struct cl_page *child;
522 * Page invariant is protected by a VM lock.
524 LINVRNT(cl_page_is_vmlocked(NULL, pg));
526 header = cl_object_header(pg->cp_obj);
527 parent = pg->cp_parent;
528 child = pg->cp_child;
529 owner = pg->cp_owner;
531 return cl_page_in_use(pg) &&
532 ergo(parent != NULL, parent->cp_child == pg) &&
533 ergo(child != NULL, child->cp_parent == pg) &&
534 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
535 ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
536 ergo(owner != NULL && parent != NULL,
537 parent->cp_owner == pg->cp_owner->ci_parent) &&
538 ergo(owner != NULL && child != NULL,
539 child->cp_owner->ci_parent == owner) &&
541 * Either page is early in initialization (has neither child
542 * nor parent yet), or it is in the object radix tree.
544 ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
545 (void *)radix_tree_lookup(&header->coh_tree,
546 pg->cp_index) == pg ||
547 (child == NULL && parent == NULL));
550 static void cl_page_state_set0(const struct lu_env *env,
551 struct cl_page *page, enum cl_page_state state)
553 enum cl_page_state old;
556 * Matrix of allowed state transitions [old][new], for sanity
559 static const int allowed_transitions[CPS_NR][CPS_NR] = {
562 [CPS_OWNED] = 1, /* io finds existing cached page */
564 [CPS_PAGEOUT] = 1, /* write-out from the cache */
565 [CPS_FREEING] = 1, /* eviction on the memory pressure */
568 [CPS_CACHED] = 1, /* release to the cache */
570 [CPS_PAGEIN] = 1, /* start read immediately */
571 [CPS_PAGEOUT] = 1, /* start write immediately */
572 [CPS_FREEING] = 1, /* lock invalidation or truncate */
575 [CPS_CACHED] = 1, /* io completion */
582 [CPS_CACHED] = 1, /* io completion */
598 old = page->cp_state;
599 PASSERT(env, page, allowed_transitions[old][state]);
600 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
601 for (; page != NULL; page = page->cp_child) {
602 PASSERT(env, page, page->cp_state == old);
604 equi(state == CPS_OWNED, page->cp_owner != NULL));
606 CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
607 CS_PAGESTATE_INC(page->cp_obj, state);
608 cl_page_state_set_trust(page, state);
613 static void cl_page_state_set(const struct lu_env *env,
614 struct cl_page *page, enum cl_page_state state)
616 cl_page_state_set0(env, page, state);
620 * Acquires an additional reference to a page.
622 * This can be called only by caller already possessing a reference to \a
625 * \see cl_object_get(), cl_lock_get().
627 void cl_page_get(struct cl_page *page)
630 cl_page_get_trust(page);
633 EXPORT_SYMBOL(cl_page_get);
636 * Releases a reference to a page.
638 * When last reference is released, page is returned to the cache, unless it
639 * is in cl_page_state::CPS_FREEING state, in which case it is immediately
642 * \see cl_object_put(), cl_lock_put().
644 void cl_page_put(const struct lu_env *env, struct cl_page *page)
646 PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
649 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
650 cfs_atomic_read(&page->cp_ref));
652 if (cfs_atomic_dec_and_test(&page->cp_ref)) {
653 LASSERT(page->cp_state == CPS_FREEING);
655 LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
656 PASSERT(env, page, page->cp_owner == NULL);
657 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
659 * Page is no longer reachable by other threads. Tear
662 cl_page_free(env, page);
667 EXPORT_SYMBOL(cl_page_put);
670 * Returns a VM page associated with a given cl_page.
672 cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
674 const struct cl_page_slice *slice;
677 * Find uppermost layer with ->cpo_vmpage() method, and return its
680 page = cl_page_top(page);
682 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
683 if (slice->cpl_ops->cpo_vmpage != NULL)
684 RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
686 page = page->cp_child;
687 } while (page != NULL);
688 LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
690 EXPORT_SYMBOL(cl_page_vmpage);
693 * Returns a cl_page associated with a VM page, and given cl_object.
695 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
698 struct cl_page *page;
701 KLASSERT(PageLocked(vmpage));
704 * NOTE: absence of races and liveness of data are guaranteed by page
705 * lock on a "vmpage". That works because object destruction has
706 * bottom-to-top pass.
710 * This loop assumes that ->private points to the top-most page. This
711 * can be rectified easily.
713 top = (struct cl_page *)vmpage->private;
717 for (page = top; page != NULL; page = page->cp_child) {
718 if (cl_object_same(page->cp_obj, obj)) {
719 cl_page_get_trust(page);
723 LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
726 EXPORT_SYMBOL(cl_vmpage_page);
729 * Returns the top-page for a given page.
731 * \see cl_object_top(), cl_io_top()
733 struct cl_page *cl_page_top(struct cl_page *page)
735 return cl_page_top_trusted(page);
737 EXPORT_SYMBOL(cl_page_top);
739 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
740 const struct lu_device_type *dtype)
742 return cl_page_at_trusted(page, dtype);
744 EXPORT_SYMBOL(cl_page_at);
746 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
748 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
750 const struct lu_env *__env = (_env); \
751 struct cl_page *__page = (_page); \
752 const struct cl_page_slice *__scan; \
754 ptrdiff_t __op = (_op); \
755 int (*__method)_proto; \
758 __page = cl_page_top(__page); \
760 cfs_list_for_each_entry(__scan, &__page->cp_layers, \
762 __method = *(void **)((char *)__scan->cpl_ops + \
764 if (__method != NULL) { \
765 __result = (*__method)(__env, __scan, \
771 __page = __page->cp_child; \
772 } while (__page != NULL && __result == 0); \
778 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
780 const struct lu_env *__env = (_env); \
781 struct cl_page *__page = (_page); \
782 const struct cl_page_slice *__scan; \
783 ptrdiff_t __op = (_op); \
784 void (*__method)_proto; \
786 __page = cl_page_top(__page); \
788 cfs_list_for_each_entry(__scan, &__page->cp_layers, \
790 __method = *(void **)((char *)__scan->cpl_ops + \
792 if (__method != NULL) \
793 (*__method)(__env, __scan, \
796 __page = __page->cp_child; \
797 } while (__page != NULL); \
800 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
802 const struct lu_env *__env = (_env); \
803 struct cl_page *__page = (_page); \
804 const struct cl_page_slice *__scan; \
805 ptrdiff_t __op = (_op); \
806 void (*__method)_proto; \
808 /* get to the bottom page. */ \
809 while (__page->cp_child != NULL) \
810 __page = __page->cp_child; \
812 cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
814 __method = *(void **)((char *)__scan->cpl_ops + \
816 if (__method != NULL) \
817 (*__method)(__env, __scan, \
820 __page = __page->cp_parent; \
821 } while (__page != NULL); \
824 static int cl_page_invoke(const struct lu_env *env,
825 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
828 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
830 RETURN(CL_PAGE_INVOKE(env, page, op,
831 (const struct lu_env *,
832 const struct cl_page_slice *, struct cl_io *),
836 static void cl_page_invoid(const struct lu_env *env,
837 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
840 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
842 CL_PAGE_INVOID(env, page, op,
843 (const struct lu_env *,
844 const struct cl_page_slice *, struct cl_io *), io);
848 static void cl_page_owner_clear(struct cl_page *page)
851 for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
852 if (page->cp_owner != NULL) {
853 LASSERT(page->cp_owner->ci_owned_nr > 0);
854 page->cp_owner->ci_owned_nr--;
855 page->cp_owner = NULL;
856 page->cp_task = NULL;
862 static void cl_page_owner_set(struct cl_page *page)
865 for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
866 LASSERT(page->cp_owner != NULL);
867 page->cp_owner->ci_owned_nr++;
872 void cl_page_disown0(const struct lu_env *env,
873 struct cl_io *io, struct cl_page *pg)
875 enum cl_page_state state;
878 state = pg->cp_state;
879 PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
880 PINVRNT(env, pg, cl_page_invariant(pg));
881 cl_page_owner_clear(pg);
883 if (state == CPS_OWNED)
884 cl_page_state_set(env, pg, CPS_CACHED);
886 * Completion call-backs are executed in the bottom-up order, so that
887 * uppermost layer (llite), responsible for VFS/VM interaction runs
888 * last and can release locks safely.
890 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
891 (const struct lu_env *,
892 const struct cl_page_slice *, struct cl_io *),
898 * returns true, iff page is owned by the given io.
900 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
902 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
904 RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
906 EXPORT_SYMBOL(cl_page_is_owned);
909 * Try to own a page by IO.
911 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
912 * into cl_page_state::CPS_OWNED state.
914 * \pre !cl_page_is_owned(pg, io)
915 * \post result == 0 iff cl_page_is_owned(pg, io)
919 * \retval -ve failure, e.g., page was destroyed (and landed in
920 * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
921 * or, page was owned by another thread, or in IO.
923 * \see cl_page_disown()
924 * \see cl_page_operations::cpo_own()
925 * \see cl_page_own_try()
928 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
929 struct cl_page *pg, int nonblock)
933 PINVRNT(env, pg, !cl_page_is_owned(pg, io));
936 pg = cl_page_top(pg);
939 if (pg->cp_state == CPS_FREEING) {
942 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
943 (const struct lu_env *,
944 const struct cl_page_slice *,
945 struct cl_io *, int),
948 PASSERT(env, pg, pg->cp_owner == NULL);
949 PASSERT(env, pg, pg->cp_req == NULL);
951 pg->cp_task = current;
952 cl_page_owner_set(pg);
953 if (pg->cp_state != CPS_FREEING) {
954 cl_page_state_set(env, pg, CPS_OWNED);
956 cl_page_disown0(env, io, pg);
961 PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
966 * Own a page, might be blocked.
968 * \see cl_page_own0()
970 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
972 return cl_page_own0(env, io, pg, 0);
974 EXPORT_SYMBOL(cl_page_own);
977 * Nonblock version of cl_page_own().
979 * \see cl_page_own0()
981 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
984 return cl_page_own0(env, io, pg, 1);
986 EXPORT_SYMBOL(cl_page_own_try);
990 * Assume page ownership.
992 * Called when page is already locked by the hosting VM.
994 * \pre !cl_page_is_owned(pg, io)
995 * \post cl_page_is_owned(pg, io)
997 * \see cl_page_operations::cpo_assume()
999 void cl_page_assume(const struct lu_env *env,
1000 struct cl_io *io, struct cl_page *pg)
1002 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
1005 pg = cl_page_top(pg);
1008 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
1009 PASSERT(env, pg, pg->cp_owner == NULL);
1011 pg->cp_task = current;
1012 cl_page_owner_set(pg);
1013 cl_page_state_set(env, pg, CPS_OWNED);
1016 EXPORT_SYMBOL(cl_page_assume);
1019 * Releases page ownership without unlocking the page.
1021 * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
1022 * underlying VM page (as VM is supposed to do this itself).
1024 * \pre cl_page_is_owned(pg, io)
1025 * \post !cl_page_is_owned(pg, io)
1027 * \see cl_page_assume()
1029 void cl_page_unassume(const struct lu_env *env,
1030 struct cl_io *io, struct cl_page *pg)
1032 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1033 PINVRNT(env, pg, cl_page_invariant(pg));
1036 pg = cl_page_top(pg);
1038 cl_page_owner_clear(pg);
1039 cl_page_state_set(env, pg, CPS_CACHED);
1040 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
1041 (const struct lu_env *,
1042 const struct cl_page_slice *, struct cl_io *),
1046 EXPORT_SYMBOL(cl_page_unassume);
1049 * Releases page ownership.
1051 * Moves page into cl_page_state::CPS_CACHED.
1053 * \pre cl_page_is_owned(pg, io)
1054 * \post !cl_page_is_owned(pg, io)
1056 * \see cl_page_own()
1057 * \see cl_page_operations::cpo_disown()
1059 void cl_page_disown(const struct lu_env *env,
1060 struct cl_io *io, struct cl_page *pg)
1062 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1065 pg = cl_page_top(pg);
1067 cl_page_disown0(env, io, pg);
1070 EXPORT_SYMBOL(cl_page_disown);
1073 * Called when page is to be removed from the object, e.g., as a result of
1076 * Calls cl_page_operations::cpo_discard() top-to-bottom.
1078 * \pre cl_page_is_owned(pg, io)
1080 * \see cl_page_operations::cpo_discard()
1082 void cl_page_discard(const struct lu_env *env,
1083 struct cl_io *io, struct cl_page *pg)
1085 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1086 PINVRNT(env, pg, cl_page_invariant(pg));
1088 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
1090 EXPORT_SYMBOL(cl_page_discard);
1093 * Version of cl_page_delete() that can be called for not fully constructed
1094 * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
1095 * path. Doesn't check page invariant.
1097 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
1100 struct cl_page *tmp = pg;
1103 PASSERT(env, pg, pg == cl_page_top(pg));
1104 PASSERT(env, pg, pg->cp_state != CPS_FREEING);
1107 * Severe all ways to obtain new pointers to @pg.
1109 cl_page_owner_clear(pg);
1112 * unexport the page firstly before freeing it so that
1113 * the page content is considered to be invalid.
1114 * We have to do this because a CPS_FREEING cl_page may
1115 * be NOT under the protection of a cl_lock.
1116 * Afterwards, if this page is found by other threads, then this
1117 * page will be forced to reread.
1119 cl_page_export(env, pg, 0);
1120 cl_page_state_set0(env, pg, CPS_FREEING);
1122 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
1123 (const struct lu_env *, const struct cl_page_slice *));
1125 if (tmp->cp_type == CPT_CACHEABLE) {
1127 /* !radix means that @pg is not yet in the radix tree,
1131 for (; tmp != NULL; tmp = tmp->cp_child) {
1133 struct cl_object_header *hdr;
1135 hdr = cl_object_header(tmp->cp_obj);
1136 spin_lock(&hdr->coh_page_guard);
1137 value = radix_tree_delete(&hdr->coh_tree,
1139 PASSERT(env, tmp, value == tmp);
1140 PASSERT(env, tmp, hdr->coh_pages > 0);
1142 spin_unlock(&hdr->coh_page_guard);
1143 cl_page_put(env, tmp);
1151 * Called when a decision is made to throw page out of memory.
1153 * Notifies all layers about page destruction by calling
1154 * cl_page_operations::cpo_delete() method top-to-bottom.
1156 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
1157 * where transition to this state happens).
1159 * Eliminates all venues through which new references to the page can be
1162 * - removes page from the radix trees,
1164 * - breaks linkage from VM page to cl_page.
1166 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
1167 * drain after some time, at which point page will be recycled.
1169 * \pre pg == cl_page_top(pg)
1170 * \pre VM page is locked
1171 * \post pg->cp_state == CPS_FREEING
1173 * \see cl_page_operations::cpo_delete()
1175 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
1177 PINVRNT(env, pg, cl_page_invariant(pg));
1179 cl_page_delete0(env, pg, 1);
1182 EXPORT_SYMBOL(cl_page_delete);
1185 * Unmaps page from user virtual memory.
1187 * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
1188 * layer responsible for VM interaction has to unmap page from user space
1191 * \see cl_page_operations::cpo_unmap()
1193 int cl_page_unmap(const struct lu_env *env,
1194 struct cl_io *io, struct cl_page *pg)
1196 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1197 PINVRNT(env, pg, cl_page_invariant(pg));
1199 return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
1201 EXPORT_SYMBOL(cl_page_unmap);
1204 * Marks page up-to-date.
1206 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
1207 * layer responsible for VM interaction has to mark/clear page as up-to-date
1208 * by the \a uptodate argument.
1210 * \see cl_page_operations::cpo_export()
1212 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
1214 PINVRNT(env, pg, cl_page_invariant(pg));
1215 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
1216 (const struct lu_env *,
1217 const struct cl_page_slice *, int), uptodate);
1219 EXPORT_SYMBOL(cl_page_export);
1222 * Returns true, iff \a pg is VM locked in a suitable sense by the calling
1225 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
1228 const struct cl_page_slice *slice;
1231 pg = cl_page_top_trusted((struct cl_page *)pg);
1232 slice = container_of(pg->cp_layers.next,
1233 const struct cl_page_slice, cpl_linkage);
1234 PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
1236 * Call ->cpo_is_vmlocked() directly instead of going through
1237 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
1238 * cl_page_invariant().
1240 result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
1241 PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
1242 RETURN(result == -EBUSY);
1244 EXPORT_SYMBOL(cl_page_is_vmlocked);
1246 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
1249 RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
1252 static void cl_page_io_start(const struct lu_env *env,
1253 struct cl_page *pg, enum cl_req_type crt)
1256 * Page is queued for IO, change its state.
1259 cl_page_owner_clear(pg);
1260 cl_page_state_set(env, pg, cl_req_type_state(crt));
1265 * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
1266 * called top-to-bottom. Every layer either agrees to submit this page (by
1267 * returning 0), or requests to omit this page (by returning -EALREADY). Layer
1268 * handling interactions with the VM also has to inform VM that page is under
1271 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
1272 struct cl_page *pg, enum cl_req_type crt)
1276 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1277 PINVRNT(env, pg, cl_page_invariant(pg));
1278 PINVRNT(env, pg, crt < CRT_NR);
1281 * XXX this has to be called bottom-to-top, so that llite can set up
1282 * PG_writeback without risking other layers deciding to skip this
1285 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
1287 cl_page_io_start(env, pg, crt);
1289 KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
1291 PageWriteback(cl_page_vmpage(env, pg)))));
1292 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1295 EXPORT_SYMBOL(cl_page_prep);
1298 * Notify layers about transfer completion.
1300 * Invoked by transfer sub-system (which is a part of osc) to notify layers
1301 * that a transfer, of which this page is a part of has completed.
1303 * Completion call-backs are executed in the bottom-up order, so that
1304 * uppermost layer (llite), responsible for the VFS/VM interaction runs last
1305 * and can release locks safely.
1307 * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1308 * \post pg->cp_state == CPS_CACHED
1310 * \see cl_page_operations::cpo_completion()
1312 void cl_page_completion(const struct lu_env *env,
1313 struct cl_page *pg, enum cl_req_type crt, int ioret)
1315 struct cl_sync_io *anchor = pg->cp_sync_io;
1317 PASSERT(env, pg, crt < CRT_NR);
1318 /* cl_page::cp_req already cleared by the caller (osc_completion()) */
1319 PASSERT(env, pg, pg->cp_req == NULL);
1320 PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
1323 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
1324 if (crt == CRT_READ && ioret == 0) {
1325 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
1326 pg->cp_flags |= CPF_READ_COMPLETED;
1329 cl_page_state_set(env, pg, CPS_CACHED);
1330 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
1331 (const struct lu_env *,
1332 const struct cl_page_slice *, int), ioret);
1334 LASSERT(cl_page_is_vmlocked(env, pg));
1335 LASSERT(pg->cp_sync_io == anchor);
1336 pg->cp_sync_io = NULL;
1337 cl_sync_io_note(anchor, ioret);
1341 EXPORT_SYMBOL(cl_page_completion);
1344 * Notify layers that transfer formation engine decided to yank this page from
1345 * the cache and to make it a part of a transfer.
1347 * \pre pg->cp_state == CPS_CACHED
1348 * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1350 * \see cl_page_operations::cpo_make_ready()
1352 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
1353 enum cl_req_type crt)
1357 PINVRNT(env, pg, crt < CRT_NR);
1360 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
1361 (const struct lu_env *,
1362 const struct cl_page_slice *));
1364 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
1365 cl_page_io_start(env, pg, crt);
1367 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1370 EXPORT_SYMBOL(cl_page_make_ready);
1373 * Notify layers that high level io decided to place this page into a cache
1374 * for future transfer.
1376 * The layer implementing transfer engine (osc) has to register this page in
1379 * \pre cl_page_is_owned(pg, io)
1380 * \post cl_page_is_owned(pg, io)
1382 * \see cl_page_operations::cpo_cache_add()
1384 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
1385 struct cl_page *pg, enum cl_req_type crt)
1387 const struct cl_page_slice *scan;
1390 PINVRNT(env, pg, crt < CRT_NR);
1391 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1392 PINVRNT(env, pg, cl_page_invariant(pg));
1396 cfs_list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
1397 if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
1400 result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
1404 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1407 EXPORT_SYMBOL(cl_page_cache_add);
1410 * Called if a pge is being written back by kernel's intention.
1412 * \pre cl_page_is_owned(pg, io)
1413 * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
1415 * \see cl_page_operations::cpo_flush()
1417 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
1422 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1423 PINVRNT(env, pg, cl_page_invariant(pg));
1427 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
1429 CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
1432 EXPORT_SYMBOL(cl_page_flush);
1435 * Checks whether page is protected by any extent lock is at least required
1438 * \return the same as in cl_page_operations::cpo_is_under_lock() method.
1439 * \see cl_page_operations::cpo_is_under_lock()
1441 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1442 struct cl_page *page)
1446 PINVRNT(env, page, cl_page_invariant(page));
1449 rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1450 (const struct lu_env *,
1451 const struct cl_page_slice *, struct cl_io *),
1453 PASSERT(env, page, rc != 0);
1456 EXPORT_SYMBOL(cl_page_is_under_lock);
1458 static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
1459 struct cl_page *page, void *cbdata)
1461 cl_page_own(env, io, page);
1462 cl_page_unmap(env, io, page);
1463 cl_page_discard(env, io, page);
1464 cl_page_disown(env, io, page);
1465 return CLP_GANG_OKAY;
1469 * Purges all cached pages belonging to the object \a obj.
1471 int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
1473 struct cl_thread_info *info;
1474 struct cl_object *obj = cl_object_top(clobj);
1479 info = cl_env_info(env);
1483 * initialize the io. This is ugly since we never do IO in this
1484 * function, we just make cl_page_list functions happy. -jay
1487 io->ci_ignore_layout = 1;
1488 result = cl_io_init(env, io, CIT_MISC, obj);
1490 cl_io_fini(env, io);
1491 RETURN(io->ci_result);
1495 result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
1496 page_prune_cb, NULL);
1497 if (result == CLP_GANG_RESCHED)
1499 } while (result != CLP_GANG_OKAY);
1501 cl_io_fini(env, io);
1504 EXPORT_SYMBOL(cl_pages_prune);
1507 * Tells transfer engine that only part of a page is to be transmitted.
1509 * \see cl_page_operations::cpo_clip()
1511 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1514 PINVRNT(env, pg, cl_page_invariant(pg));
1516 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
1517 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1518 (const struct lu_env *,
1519 const struct cl_page_slice *,int, int),
1522 EXPORT_SYMBOL(cl_page_clip);
1525 * Prints human readable representation of \a pg to the \a f.
1527 void cl_page_header_print(const struct lu_env *env, void *cookie,
1528 lu_printer_t printer, const struct cl_page *pg)
1530 (*printer)(env, cookie,
1531 "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
1532 pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
1533 pg->cp_index, pg->cp_parent, pg->cp_child,
1534 pg->cp_state, pg->cp_error, pg->cp_type,
1535 pg->cp_owner, pg->cp_req, pg->cp_flags);
1537 EXPORT_SYMBOL(cl_page_header_print);
1540 * Prints human readable representation of \a pg to the \a f.
1542 void cl_page_print(const struct lu_env *env, void *cookie,
1543 lu_printer_t printer, const struct cl_page *pg)
1545 struct cl_page *scan;
1547 for (scan = cl_page_top((struct cl_page *)pg);
1548 scan != NULL; scan = scan->cp_child)
1549 cl_page_header_print(env, cookie, printer, scan);
1550 CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1551 (const struct lu_env *env,
1552 const struct cl_page_slice *slice,
1553 void *cookie, lu_printer_t p), cookie, printer);
1554 (*printer)(env, cookie, "end page@%p\n", pg);
1556 EXPORT_SYMBOL(cl_page_print);
1559 * Cancel a page which is still in a transfer.
1561 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1563 return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1564 (const struct lu_env *,
1565 const struct cl_page_slice *));
1567 EXPORT_SYMBOL(cl_page_cancel);
1570 * Converts a byte offset within object \a obj into a page index.
1572 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1577 return (loff_t)idx << CFS_PAGE_SHIFT;
1579 EXPORT_SYMBOL(cl_offset);
1582 * Converts a page index into a byte offset within object \a obj.
1584 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1589 return offset >> CFS_PAGE_SHIFT;
1591 EXPORT_SYMBOL(cl_index);
1593 int cl_page_size(const struct cl_object *obj)
1595 return 1 << CFS_PAGE_SHIFT;
1597 EXPORT_SYMBOL(cl_page_size);
1600 * Adds page slice to the compound page.
1602 * This is called by cl_object_operations::coo_page_init() methods to add a
1603 * per-layer state to the page. New state is added at the end of
1604 * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1606 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1608 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1609 struct cl_object *obj,
1610 const struct cl_page_operations *ops)
1613 cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1614 slice->cpl_obj = obj;
1615 slice->cpl_ops = ops;
1616 slice->cpl_page = page;
1619 EXPORT_SYMBOL(cl_page_slice_add);
1621 int cl_page_init(void)
1623 return lu_kmem_init(cl_page_caches);
1626 void cl_page_fini(void)
1628 lu_kmem_fini(cl_page_caches);