4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
42 #define DEBUG_SUBSYSTEM S_CLASS
44 #include <libcfs/libcfs.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <libcfs/list.h>
49 #include <cl_object.h>
50 #include "cl_internal.h"
52 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
55 # define PASSERT(env, page, expr) \
57 if (unlikely(!(expr))) { \
58 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
62 #else /* !LIBCFS_DEBUG */
63 # define PASSERT(env, page, exp) \
64 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
65 #endif /* !LIBCFS_DEBUG */
67 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
68 # define PINVRNT(env, page, expr) \
70 if (unlikely(!(expr))) { \
71 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
75 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
76 # define PINVRNT(env, page, exp) \
77 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
78 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
80 /* Disable page statistic by default due to huge performance penalty. */
81 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
82 #define CS_PAGE_INC(o, item) \
83 cfs_atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
84 #define CS_PAGE_DEC(o, item) \
85 cfs_atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
86 #define CS_PAGESTATE_INC(o, state) \
87 cfs_atomic_inc(&cl_object_site(o)->cs_pages_state[state])
88 #define CS_PAGESTATE_DEC(o, state) \
89 cfs_atomic_dec(&cl_object_site(o)->cs_pages_state[state])
91 #define CS_PAGE_INC(o, item)
92 #define CS_PAGE_DEC(o, item)
93 #define CS_PAGESTATE_INC(o, state)
94 #define CS_PAGESTATE_DEC(o, state)
98 * Internal version of cl_page_top, it should be called if the page is
99 * known to be not freed, says with page referenced, or radix tree lock held,
102 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
104 while (page->cp_parent != NULL)
105 page = page->cp_parent;
110 * Internal version of cl_page_get().
112 * This function can be used to obtain initial reference to previously
113 * unreferenced cached object. It can be called only if concurrent page
114 * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
115 * associated with \a page.
117 * Use with care! Not exported.
119 static void cl_page_get_trust(struct cl_page *page)
121 LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
122 cfs_atomic_inc(&page->cp_ref);
126 * Returns a slice within a page, corresponding to the given layer in the
131 static const struct cl_page_slice *
132 cl_page_at_trusted(const struct cl_page *page,
133 const struct lu_device_type *dtype)
135 const struct cl_page_slice *slice;
138 page = cl_page_top_trusted((struct cl_page *)page);
140 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
141 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
144 page = page->cp_child;
145 } while (page != NULL);
149 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
151 struct cl_object *obj = page->cp_obj;
152 int pagesize = cl_object_header(obj)->coh_page_bufsize;
154 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
155 PASSERT(env, page, page->cp_owner == NULL);
156 PASSERT(env, page, page->cp_req == NULL);
157 PASSERT(env, page, page->cp_parent == NULL);
158 PASSERT(env, page, page->cp_state == CPS_FREEING);
162 while (!cfs_list_empty(&page->cp_layers)) {
163 struct cl_page_slice *slice;
165 slice = cfs_list_entry(page->cp_layers.next,
166 struct cl_page_slice, cpl_linkage);
167 cfs_list_del_init(page->cp_layers.next);
168 slice->cpl_ops->cpo_fini(env, slice);
170 CS_PAGE_DEC(obj, total);
171 CS_PAGESTATE_DEC(obj, page->cp_state);
172 lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
173 cl_object_put(env, obj);
174 lu_ref_fini(&page->cp_reference);
175 OBD_FREE(page, pagesize);
180 * Helper function updating page state. This is the only place in the code
181 * where cl_page::cp_state field is mutated.
183 static inline void cl_page_state_set_trust(struct cl_page *page,
184 enum cl_page_state state)
187 *(enum cl_page_state *)&page->cp_state = state;
190 struct cl_page *cl_page_alloc(const struct lu_env *env,
191 struct cl_object *o, pgoff_t ind, struct page *vmpage,
192 enum cl_page_type type)
194 struct cl_page *page;
195 struct lu_object_header *head;
198 OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
202 cfs_atomic_set(&page->cp_ref, 1);
205 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
207 page->cp_index = ind;
208 cl_page_state_set_trust(page, CPS_CACHED);
209 page->cp_type = type;
210 CFS_INIT_LIST_HEAD(&page->cp_layers);
211 CFS_INIT_LIST_HEAD(&page->cp_batch);
212 CFS_INIT_LIST_HEAD(&page->cp_flight);
213 mutex_init(&page->cp_mutex);
214 lu_ref_init(&page->cp_reference);
215 head = o->co_lu.lo_header;
216 cfs_list_for_each_entry(o, &head->loh_layers,
218 if (o->co_ops->coo_page_init != NULL) {
219 result = o->co_ops->coo_page_init(env, o,
222 cl_page_delete0(env, page);
223 cl_page_free(env, page);
224 page = ERR_PTR(result);
230 CS_PAGE_INC(o, total);
231 CS_PAGE_INC(o, create);
232 CS_PAGESTATE_DEC(o, CPS_CACHED);
235 page = ERR_PTR(-ENOMEM);
239 EXPORT_SYMBOL(cl_page_alloc);
242 * Returns a cl_page with index \a idx at the object \a o, and associated with
243 * the VM page \a vmpage.
245 * This is the main entry point into the cl_page caching interface. First, a
246 * cache (implemented as a per-object radix tree) is consulted. If page is
247 * found there, it is returned immediately. Otherwise new page is allocated
248 * and returned. In any case, additional reference to page is acquired.
250 * \see cl_object_find(), cl_lock_find()
252 struct cl_page *cl_page_find(const struct lu_env *env,
254 pgoff_t idx, struct page *vmpage,
255 enum cl_page_type type)
257 struct cl_page *page = NULL;
258 struct cl_object_header *hdr;
260 LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
265 hdr = cl_object_header(o);
266 CS_PAGE_INC(o, lookup);
268 CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
269 idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
271 if (type == CPT_CACHEABLE) {
272 /* vmpage lock is used to protect the child/parent
274 KLASSERT(PageLocked(vmpage));
276 * cl_vmpage_page() can be called here without any locks as
278 * - "vmpage" is locked (which prevents ->private from
279 * concurrent updates), and
281 * - "o" cannot be destroyed while current thread holds a
284 page = cl_vmpage_page(vmpage, o);
291 /* allocate and initialize cl_page */
292 page = cl_page_alloc(env, o, idx, vmpage, type);
295 EXPORT_SYMBOL(cl_page_find);
297 static inline int cl_page_invariant(const struct cl_page *pg)
299 struct cl_page *parent;
300 struct cl_page *child;
304 * Page invariant is protected by a VM lock.
306 LINVRNT(cl_page_is_vmlocked(NULL, pg));
308 parent = pg->cp_parent;
309 child = pg->cp_child;
310 owner = pg->cp_owner;
312 return cl_page_in_use_noref(pg) &&
313 ergo(parent != NULL, parent->cp_child == pg) &&
314 ergo(child != NULL, child->cp_parent == pg) &&
315 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
316 ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
317 ergo(owner != NULL && parent != NULL,
318 parent->cp_owner == pg->cp_owner->ci_parent) &&
319 ergo(owner != NULL && child != NULL,
320 child->cp_owner->ci_parent == owner);
323 static void cl_page_state_set0(const struct lu_env *env,
324 struct cl_page *page, enum cl_page_state state)
326 enum cl_page_state old;
329 * Matrix of allowed state transitions [old][new], for sanity
332 static const int allowed_transitions[CPS_NR][CPS_NR] = {
335 [CPS_OWNED] = 1, /* io finds existing cached page */
337 [CPS_PAGEOUT] = 1, /* write-out from the cache */
338 [CPS_FREEING] = 1, /* eviction on the memory pressure */
341 [CPS_CACHED] = 1, /* release to the cache */
343 [CPS_PAGEIN] = 1, /* start read immediately */
344 [CPS_PAGEOUT] = 1, /* start write immediately */
345 [CPS_FREEING] = 1, /* lock invalidation or truncate */
348 [CPS_CACHED] = 1, /* io completion */
355 [CPS_CACHED] = 1, /* io completion */
371 old = page->cp_state;
372 PASSERT(env, page, allowed_transitions[old][state]);
373 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
374 for (; page != NULL; page = page->cp_child) {
375 PASSERT(env, page, page->cp_state == old);
377 equi(state == CPS_OWNED, page->cp_owner != NULL));
379 CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
380 CS_PAGESTATE_INC(page->cp_obj, state);
381 cl_page_state_set_trust(page, state);
386 static void cl_page_state_set(const struct lu_env *env,
387 struct cl_page *page, enum cl_page_state state)
389 cl_page_state_set0(env, page, state);
393 * Acquires an additional reference to a page.
395 * This can be called only by caller already possessing a reference to \a
398 * \see cl_object_get(), cl_lock_get().
400 void cl_page_get(struct cl_page *page)
403 cl_page_get_trust(page);
406 EXPORT_SYMBOL(cl_page_get);
409 * Releases a reference to a page.
411 * When last reference is released, page is returned to the cache, unless it
412 * is in cl_page_state::CPS_FREEING state, in which case it is immediately
415 * \see cl_object_put(), cl_lock_put().
417 void cl_page_put(const struct lu_env *env, struct cl_page *page)
419 PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
422 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
423 cfs_atomic_read(&page->cp_ref));
425 if (cfs_atomic_dec_and_test(&page->cp_ref)) {
426 LASSERT(page->cp_state == CPS_FREEING);
428 LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
429 PASSERT(env, page, page->cp_owner == NULL);
430 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
432 * Page is no longer reachable by other threads. Tear
435 cl_page_free(env, page);
440 EXPORT_SYMBOL(cl_page_put);
443 * Returns a VM page associated with a given cl_page.
445 struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
447 const struct cl_page_slice *slice;
450 * Find uppermost layer with ->cpo_vmpage() method, and return its
453 page = cl_page_top(page);
455 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
456 if (slice->cpl_ops->cpo_vmpage != NULL)
457 RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
459 page = page->cp_child;
460 } while (page != NULL);
461 LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
463 EXPORT_SYMBOL(cl_page_vmpage);
466 * Returns a cl_page associated with a VM page, and given cl_object.
468 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
471 struct cl_page *page;
474 KLASSERT(PageLocked(vmpage));
477 * NOTE: absence of races and liveness of data are guaranteed by page
478 * lock on a "vmpage". That works because object destruction has
479 * bottom-to-top pass.
483 * This loop assumes that ->private points to the top-most page. This
484 * can be rectified easily.
486 top = (struct cl_page *)vmpage->private;
490 for (page = top; page != NULL; page = page->cp_child) {
491 if (cl_object_same(page->cp_obj, obj)) {
492 cl_page_get_trust(page);
496 LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
499 EXPORT_SYMBOL(cl_vmpage_page);
502 * Returns the top-page for a given page.
504 * \see cl_object_top(), cl_io_top()
506 struct cl_page *cl_page_top(struct cl_page *page)
508 return cl_page_top_trusted(page);
510 EXPORT_SYMBOL(cl_page_top);
512 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
513 const struct lu_device_type *dtype)
515 return cl_page_at_trusted(page, dtype);
517 EXPORT_SYMBOL(cl_page_at);
519 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
521 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
523 const struct lu_env *__env = (_env); \
524 struct cl_page *__page = (_page); \
525 const struct cl_page_slice *__scan; \
527 ptrdiff_t __op = (_op); \
528 int (*__method)_proto; \
531 __page = cl_page_top(__page); \
533 cfs_list_for_each_entry(__scan, &__page->cp_layers, \
535 __method = *(void **)((char *)__scan->cpl_ops + \
537 if (__method != NULL) { \
538 __result = (*__method)(__env, __scan, \
544 __page = __page->cp_child; \
545 } while (__page != NULL && __result == 0); \
551 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
553 const struct lu_env *__env = (_env); \
554 struct cl_page *__page = (_page); \
555 const struct cl_page_slice *__scan; \
556 ptrdiff_t __op = (_op); \
557 void (*__method)_proto; \
559 __page = cl_page_top(__page); \
561 cfs_list_for_each_entry(__scan, &__page->cp_layers, \
563 __method = *(void **)((char *)__scan->cpl_ops + \
565 if (__method != NULL) \
566 (*__method)(__env, __scan, \
569 __page = __page->cp_child; \
570 } while (__page != NULL); \
573 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
575 const struct lu_env *__env = (_env); \
576 struct cl_page *__page = (_page); \
577 const struct cl_page_slice *__scan; \
578 ptrdiff_t __op = (_op); \
579 void (*__method)_proto; \
581 /* get to the bottom page. */ \
582 while (__page->cp_child != NULL) \
583 __page = __page->cp_child; \
585 cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
587 __method = *(void **)((char *)__scan->cpl_ops + \
589 if (__method != NULL) \
590 (*__method)(__env, __scan, \
593 __page = __page->cp_parent; \
594 } while (__page != NULL); \
597 static int cl_page_invoke(const struct lu_env *env,
598 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
601 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
603 RETURN(CL_PAGE_INVOKE(env, page, op,
604 (const struct lu_env *,
605 const struct cl_page_slice *, struct cl_io *),
609 static void cl_page_invoid(const struct lu_env *env,
610 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
613 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
615 CL_PAGE_INVOID(env, page, op,
616 (const struct lu_env *,
617 const struct cl_page_slice *, struct cl_io *), io);
621 static void cl_page_owner_clear(struct cl_page *page)
624 for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
625 if (page->cp_owner != NULL) {
626 LASSERT(page->cp_owner->ci_owned_nr > 0);
627 page->cp_owner->ci_owned_nr--;
628 page->cp_owner = NULL;
629 page->cp_task = NULL;
635 static void cl_page_owner_set(struct cl_page *page)
638 for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
639 LASSERT(page->cp_owner != NULL);
640 page->cp_owner->ci_owned_nr++;
645 void cl_page_disown0(const struct lu_env *env,
646 struct cl_io *io, struct cl_page *pg)
648 enum cl_page_state state;
651 state = pg->cp_state;
652 PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
653 PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
654 cl_page_owner_clear(pg);
656 if (state == CPS_OWNED)
657 cl_page_state_set(env, pg, CPS_CACHED);
659 * Completion call-backs are executed in the bottom-up order, so that
660 * uppermost layer (llite), responsible for VFS/VM interaction runs
661 * last and can release locks safely.
663 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
664 (const struct lu_env *,
665 const struct cl_page_slice *, struct cl_io *),
671 * returns true, iff page is owned by the given io.
673 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
675 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
677 RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
679 EXPORT_SYMBOL(cl_page_is_owned);
682 * Try to own a page by IO.
684 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
685 * into cl_page_state::CPS_OWNED state.
687 * \pre !cl_page_is_owned(pg, io)
688 * \post result == 0 iff cl_page_is_owned(pg, io)
692 * \retval -ve failure, e.g., page was destroyed (and landed in
693 * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
694 * or, page was owned by another thread, or in IO.
696 * \see cl_page_disown()
697 * \see cl_page_operations::cpo_own()
698 * \see cl_page_own_try()
701 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
702 struct cl_page *pg, int nonblock)
706 PINVRNT(env, pg, !cl_page_is_owned(pg, io));
709 pg = cl_page_top(pg);
712 if (pg->cp_state == CPS_FREEING) {
715 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
716 (const struct lu_env *,
717 const struct cl_page_slice *,
718 struct cl_io *, int),
721 PASSERT(env, pg, pg->cp_owner == NULL);
722 PASSERT(env, pg, pg->cp_req == NULL);
724 pg->cp_task = current;
725 cl_page_owner_set(pg);
726 if (pg->cp_state != CPS_FREEING) {
727 cl_page_state_set(env, pg, CPS_OWNED);
729 cl_page_disown0(env, io, pg);
734 PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
739 * Own a page, might be blocked.
741 * \see cl_page_own0()
743 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
745 return cl_page_own0(env, io, pg, 0);
747 EXPORT_SYMBOL(cl_page_own);
750 * Nonblock version of cl_page_own().
752 * \see cl_page_own0()
754 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
757 return cl_page_own0(env, io, pg, 1);
759 EXPORT_SYMBOL(cl_page_own_try);
763 * Assume page ownership.
765 * Called when page is already locked by the hosting VM.
767 * \pre !cl_page_is_owned(pg, io)
768 * \post cl_page_is_owned(pg, io)
770 * \see cl_page_operations::cpo_assume()
772 void cl_page_assume(const struct lu_env *env,
773 struct cl_io *io, struct cl_page *pg)
775 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
778 pg = cl_page_top(pg);
781 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
782 PASSERT(env, pg, pg->cp_owner == NULL);
784 pg->cp_task = current;
785 cl_page_owner_set(pg);
786 cl_page_state_set(env, pg, CPS_OWNED);
789 EXPORT_SYMBOL(cl_page_assume);
792 * Releases page ownership without unlocking the page.
794 * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
795 * underlying VM page (as VM is supposed to do this itself).
797 * \pre cl_page_is_owned(pg, io)
798 * \post !cl_page_is_owned(pg, io)
800 * \see cl_page_assume()
802 void cl_page_unassume(const struct lu_env *env,
803 struct cl_io *io, struct cl_page *pg)
805 PINVRNT(env, pg, cl_page_is_owned(pg, io));
806 PINVRNT(env, pg, cl_page_invariant(pg));
809 pg = cl_page_top(pg);
811 cl_page_owner_clear(pg);
812 cl_page_state_set(env, pg, CPS_CACHED);
813 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
814 (const struct lu_env *,
815 const struct cl_page_slice *, struct cl_io *),
819 EXPORT_SYMBOL(cl_page_unassume);
822 * Releases page ownership.
824 * Moves page into cl_page_state::CPS_CACHED.
826 * \pre cl_page_is_owned(pg, io)
827 * \post !cl_page_is_owned(pg, io)
830 * \see cl_page_operations::cpo_disown()
832 void cl_page_disown(const struct lu_env *env,
833 struct cl_io *io, struct cl_page *pg)
835 PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
836 pg->cp_state == CPS_FREEING);
839 pg = cl_page_top(pg);
841 cl_page_disown0(env, io, pg);
844 EXPORT_SYMBOL(cl_page_disown);
847 * Called when page is to be removed from the object, e.g., as a result of
850 * Calls cl_page_operations::cpo_discard() top-to-bottom.
852 * \pre cl_page_is_owned(pg, io)
854 * \see cl_page_operations::cpo_discard()
856 void cl_page_discard(const struct lu_env *env,
857 struct cl_io *io, struct cl_page *pg)
859 PINVRNT(env, pg, cl_page_is_owned(pg, io));
860 PINVRNT(env, pg, cl_page_invariant(pg));
862 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
864 EXPORT_SYMBOL(cl_page_discard);
867 * Version of cl_page_delete() that can be called for not fully constructed
868 * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
869 * path. Doesn't check page invariant.
871 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
875 PASSERT(env, pg, pg == cl_page_top(pg));
876 PASSERT(env, pg, pg->cp_state != CPS_FREEING);
879 * Severe all ways to obtain new pointers to @pg.
881 cl_page_owner_clear(pg);
883 cl_page_state_set0(env, pg, CPS_FREEING);
885 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
886 (const struct lu_env *, const struct cl_page_slice *));
892 * Called when a decision is made to throw page out of memory.
894 * Notifies all layers about page destruction by calling
895 * cl_page_operations::cpo_delete() method top-to-bottom.
897 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
898 * where transition to this state happens).
900 * Eliminates all venues through which new references to the page can be
903 * - removes page from the radix trees,
905 * - breaks linkage from VM page to cl_page.
907 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
908 * drain after some time, at which point page will be recycled.
910 * \pre pg == cl_page_top(pg)
911 * \pre VM page is locked
912 * \post pg->cp_state == CPS_FREEING
914 * \see cl_page_operations::cpo_delete()
916 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
918 PINVRNT(env, pg, cl_page_invariant(pg));
920 cl_page_delete0(env, pg);
923 EXPORT_SYMBOL(cl_page_delete);
926 * Marks page up-to-date.
928 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
929 * layer responsible for VM interaction has to mark/clear page as up-to-date
930 * by the \a uptodate argument.
932 * \see cl_page_operations::cpo_export()
934 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
936 PINVRNT(env, pg, cl_page_invariant(pg));
937 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
938 (const struct lu_env *,
939 const struct cl_page_slice *, int), uptodate);
941 EXPORT_SYMBOL(cl_page_export);
944 * Returns true, iff \a pg is VM locked in a suitable sense by the calling
947 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
950 const struct cl_page_slice *slice;
953 pg = cl_page_top_trusted((struct cl_page *)pg);
954 slice = container_of(pg->cp_layers.next,
955 const struct cl_page_slice, cpl_linkage);
956 PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
958 * Call ->cpo_is_vmlocked() directly instead of going through
959 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
960 * cl_page_invariant().
962 result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
963 PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
964 RETURN(result == -EBUSY);
966 EXPORT_SYMBOL(cl_page_is_vmlocked);
968 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
971 RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
974 static void cl_page_io_start(const struct lu_env *env,
975 struct cl_page *pg, enum cl_req_type crt)
978 * Page is queued for IO, change its state.
981 cl_page_owner_clear(pg);
982 cl_page_state_set(env, pg, cl_req_type_state(crt));
987 * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
988 * called top-to-bottom. Every layer either agrees to submit this page (by
989 * returning 0), or requests to omit this page (by returning -EALREADY). Layer
990 * handling interactions with the VM also has to inform VM that page is under
993 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
994 struct cl_page *pg, enum cl_req_type crt)
998 PINVRNT(env, pg, cl_page_is_owned(pg, io));
999 PINVRNT(env, pg, cl_page_invariant(pg));
1000 PINVRNT(env, pg, crt < CRT_NR);
1003 * XXX this has to be called bottom-to-top, so that llite can set up
1004 * PG_writeback without risking other layers deciding to skip this
1009 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
1011 cl_page_io_start(env, pg, crt);
1013 KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
1015 PageWriteback(cl_page_vmpage(env, pg)))));
1016 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1019 EXPORT_SYMBOL(cl_page_prep);
1022 * Notify layers about transfer completion.
1024 * Invoked by transfer sub-system (which is a part of osc) to notify layers
1025 * that a transfer, of which this page is a part of has completed.
1027 * Completion call-backs are executed in the bottom-up order, so that
1028 * uppermost layer (llite), responsible for the VFS/VM interaction runs last
1029 * and can release locks safely.
1031 * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1032 * \post pg->cp_state == CPS_CACHED
1034 * \see cl_page_operations::cpo_completion()
1036 void cl_page_completion(const struct lu_env *env,
1037 struct cl_page *pg, enum cl_req_type crt, int ioret)
1039 struct cl_sync_io *anchor = pg->cp_sync_io;
1041 PASSERT(env, pg, crt < CRT_NR);
1042 /* cl_page::cp_req already cleared by the caller (osc_completion()) */
1043 PASSERT(env, pg, pg->cp_req == NULL);
1044 PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
1047 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
1048 if (crt == CRT_READ && ioret == 0) {
1049 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
1050 pg->cp_flags |= CPF_READ_COMPLETED;
1053 cl_page_state_set(env, pg, CPS_CACHED);
1056 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
1057 (const struct lu_env *,
1058 const struct cl_page_slice *, int), ioret);
1060 LASSERT(cl_page_is_vmlocked(env, pg));
1061 LASSERT(pg->cp_sync_io == anchor);
1062 pg->cp_sync_io = NULL;
1065 * As page->cp_obj is pinned by a reference from page->cp_req, it is
1066 * safe to call cl_page_put() without risking object destruction in a
1067 * non-blocking context.
1069 cl_page_put(env, pg);
1072 cl_sync_io_note(anchor, ioret);
1076 EXPORT_SYMBOL(cl_page_completion);
1079 * Notify layers that transfer formation engine decided to yank this page from
1080 * the cache and to make it a part of a transfer.
1082 * \pre pg->cp_state == CPS_CACHED
1083 * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1085 * \see cl_page_operations::cpo_make_ready()
1087 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
1088 enum cl_req_type crt)
1092 PINVRNT(env, pg, crt < CRT_NR);
1097 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
1098 (const struct lu_env *,
1099 const struct cl_page_slice *));
1101 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
1102 cl_page_io_start(env, pg, crt);
1104 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1107 EXPORT_SYMBOL(cl_page_make_ready);
1110 * Notify layers that high level io decided to place this page into a cache
1111 * for future transfer.
1113 * The layer implementing transfer engine (osc) has to register this page in
1116 * \pre cl_page_is_owned(pg, io)
1117 * \post cl_page_is_owned(pg, io)
1119 * \see cl_page_operations::cpo_cache_add()
1121 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
1122 struct cl_page *pg, enum cl_req_type crt)
1124 const struct cl_page_slice *scan;
1127 PINVRNT(env, pg, crt < CRT_NR);
1128 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1129 PINVRNT(env, pg, cl_page_invariant(pg));
1136 cfs_list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
1137 if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
1140 result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
1144 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1147 EXPORT_SYMBOL(cl_page_cache_add);
1150 * Called if a pge is being written back by kernel's intention.
1152 * \pre cl_page_is_owned(pg, io)
1153 * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
1155 * \see cl_page_operations::cpo_flush()
1157 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
1162 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1163 PINVRNT(env, pg, cl_page_invariant(pg));
1167 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
1169 CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
1172 EXPORT_SYMBOL(cl_page_flush);
1175 * Checks whether page is protected by any extent lock is at least required
1178 * \return the same as in cl_page_operations::cpo_is_under_lock() method.
1179 * \see cl_page_operations::cpo_is_under_lock()
1181 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1182 struct cl_page *page)
1186 PINVRNT(env, page, cl_page_invariant(page));
1189 rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1190 (const struct lu_env *,
1191 const struct cl_page_slice *, struct cl_io *),
1193 PASSERT(env, page, rc != 0);
1196 EXPORT_SYMBOL(cl_page_is_under_lock);
1199 * Tells transfer engine that only part of a page is to be transmitted.
1201 * \see cl_page_operations::cpo_clip()
1203 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1206 PINVRNT(env, pg, cl_page_invariant(pg));
1208 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
1209 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1210 (const struct lu_env *,
1211 const struct cl_page_slice *,int, int),
1214 EXPORT_SYMBOL(cl_page_clip);
1217 * Prints human readable representation of \a pg to the \a f.
1219 void cl_page_header_print(const struct lu_env *env, void *cookie,
1220 lu_printer_t printer, const struct cl_page *pg)
1222 (*printer)(env, cookie,
1223 "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
1224 pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
1225 pg->cp_index, pg->cp_parent, pg->cp_child,
1226 pg->cp_state, pg->cp_error, pg->cp_type,
1227 pg->cp_owner, pg->cp_req, pg->cp_flags);
1229 EXPORT_SYMBOL(cl_page_header_print);
1232 * Prints human readable representation of \a pg to the \a f.
1234 void cl_page_print(const struct lu_env *env, void *cookie,
1235 lu_printer_t printer, const struct cl_page *pg)
1237 struct cl_page *scan;
1239 for (scan = cl_page_top((struct cl_page *)pg);
1240 scan != NULL; scan = scan->cp_child)
1241 cl_page_header_print(env, cookie, printer, scan);
1242 CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1243 (const struct lu_env *env,
1244 const struct cl_page_slice *slice,
1245 void *cookie, lu_printer_t p), cookie, printer);
1246 (*printer)(env, cookie, "end page@%p\n", pg);
1248 EXPORT_SYMBOL(cl_page_print);
1251 * Cancel a page which is still in a transfer.
1253 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1255 return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1256 (const struct lu_env *,
1257 const struct cl_page_slice *));
1259 EXPORT_SYMBOL(cl_page_cancel);
1262 * Converts a byte offset within object \a obj into a page index.
1264 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1266 return (loff_t)idx << PAGE_CACHE_SHIFT;
1268 EXPORT_SYMBOL(cl_offset);
1271 * Converts a page index into a byte offset within object \a obj.
1273 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1275 return offset >> PAGE_CACHE_SHIFT;
1277 EXPORT_SYMBOL(cl_index);
1279 int cl_page_size(const struct cl_object *obj)
1281 return 1 << PAGE_CACHE_SHIFT;
1283 EXPORT_SYMBOL(cl_page_size);
1286 * Adds page slice to the compound page.
1288 * This is called by cl_object_operations::coo_page_init() methods to add a
1289 * per-layer state to the page. New state is added at the end of
1290 * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1292 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1294 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1295 struct cl_object *obj,
1296 const struct cl_page_operations *ops)
1299 cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1300 slice->cpl_obj = obj;
1301 slice->cpl_ops = ops;
1302 slice->cpl_page = page;
1305 EXPORT_SYMBOL(cl_page_slice_add);
1307 int cl_page_init(void)
1312 void cl_page_fini(void)