4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
42 #define DEBUG_SUBSYSTEM S_CLASS
44 #include <libcfs/libcfs.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <libcfs/list.h>
49 #include <cl_object.h>
50 #include "cl_internal.h"
52 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
55 # define PASSERT(env, page, expr) \
57 if (unlikely(!(expr))) { \
58 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
62 #else /* !LIBCFS_DEBUG */
63 # define PASSERT(env, page, exp) \
64 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
65 #endif /* !LIBCFS_DEBUG */
67 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
68 # define PINVRNT(env, page, expr) \
70 if (unlikely(!(expr))) { \
71 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
75 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
76 # define PINVRNT(env, page, exp) \
77 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
78 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
80 /* Disable page statistic by default due to huge performance penalty. */
81 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
82 #define CS_PAGE_INC(o, item) \
83 atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
84 #define CS_PAGE_DEC(o, item) \
85 atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
86 #define CS_PAGESTATE_INC(o, state) \
87 atomic_inc(&cl_object_site(o)->cs_pages_state[state])
88 #define CS_PAGESTATE_DEC(o, state) \
89 atomic_dec(&cl_object_site(o)->cs_pages_state[state])
91 #define CS_PAGE_INC(o, item)
92 #define CS_PAGE_DEC(o, item)
93 #define CS_PAGESTATE_INC(o, state)
94 #define CS_PAGESTATE_DEC(o, state)
98 * Internal version of cl_page_get().
100 * This function can be used to obtain initial reference to previously
101 * unreferenced cached object. It can be called only if concurrent page
102 * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
103 * associated with \a page.
105 * Use with care! Not exported.
107 static void cl_page_get_trust(struct cl_page *page)
109 LASSERT(atomic_read(&page->cp_ref) > 0);
110 atomic_inc(&page->cp_ref);
114 * Returns a slice within a page, corresponding to the given layer in the
119 static const struct cl_page_slice *
120 cl_page_at_trusted(const struct cl_page *page,
121 const struct lu_device_type *dtype)
123 const struct cl_page_slice *slice;
126 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
127 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
133 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
135 struct cl_object *obj = page->cp_obj;
136 int pagesize = cl_object_header(obj)->coh_page_bufsize;
138 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
139 PASSERT(env, page, page->cp_owner == NULL);
140 PASSERT(env, page, page->cp_req == NULL);
141 PASSERT(env, page, page->cp_state == CPS_FREEING);
144 while (!cfs_list_empty(&page->cp_layers)) {
145 struct cl_page_slice *slice;
147 slice = cfs_list_entry(page->cp_layers.next,
148 struct cl_page_slice, cpl_linkage);
149 cfs_list_del_init(page->cp_layers.next);
150 if (unlikely(slice->cpl_ops->cpo_fini != NULL))
151 slice->cpl_ops->cpo_fini(env, slice);
153 CS_PAGE_DEC(obj, total);
154 CS_PAGESTATE_DEC(obj, page->cp_state);
155 lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
156 cl_object_put(env, obj);
157 lu_ref_fini(&page->cp_reference);
158 OBD_FREE(page, pagesize);
163 * Helper function updating page state. This is the only place in the code
164 * where cl_page::cp_state field is mutated.
166 static inline void cl_page_state_set_trust(struct cl_page *page,
167 enum cl_page_state state)
170 *(enum cl_page_state *)&page->cp_state = state;
173 struct cl_page *cl_page_alloc(const struct lu_env *env,
174 struct cl_object *o, pgoff_t ind, struct page *vmpage,
175 enum cl_page_type type)
177 struct cl_page *page;
178 struct lu_object_header *head;
181 OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
185 atomic_set(&page->cp_ref, 1);
188 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
190 page->cp_vmpage = vmpage;
191 cl_page_state_set_trust(page, CPS_CACHED);
192 page->cp_type = type;
193 CFS_INIT_LIST_HEAD(&page->cp_layers);
194 CFS_INIT_LIST_HEAD(&page->cp_batch);
195 CFS_INIT_LIST_HEAD(&page->cp_flight);
196 mutex_init(&page->cp_mutex);
197 lu_ref_init(&page->cp_reference);
198 head = o->co_lu.lo_header;
199 cfs_list_for_each_entry(o, &head->loh_layers,
201 if (o->co_ops->coo_page_init != NULL) {
202 result = o->co_ops->coo_page_init(env, o, page,
205 cl_page_delete0(env, page);
206 cl_page_free(env, page);
207 page = ERR_PTR(result);
213 CS_PAGE_INC(o, total);
214 CS_PAGE_INC(o, create);
215 CS_PAGESTATE_DEC(o, CPS_CACHED);
218 page = ERR_PTR(-ENOMEM);
222 EXPORT_SYMBOL(cl_page_alloc);
225 * Returns a cl_page with index \a idx at the object \a o, and associated with
226 * the VM page \a vmpage.
228 * This is the main entry point into the cl_page caching interface. First, a
229 * cache (implemented as a per-object radix tree) is consulted. If page is
230 * found there, it is returned immediately. Otherwise new page is allocated
231 * and returned. In any case, additional reference to page is acquired.
233 * \see cl_object_find(), cl_lock_find()
235 struct cl_page *cl_page_find(const struct lu_env *env,
237 pgoff_t idx, struct page *vmpage,
238 enum cl_page_type type)
240 struct cl_page *page = NULL;
241 struct cl_object_header *hdr;
243 LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
248 hdr = cl_object_header(o);
249 CS_PAGE_INC(o, lookup);
251 CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
252 idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
254 if (type == CPT_CACHEABLE) {
255 /* vmpage lock is used to protect the child/parent
257 KLASSERT(PageLocked(vmpage));
259 * cl_vmpage_page() can be called here without any locks as
261 * - "vmpage" is locked (which prevents ->private from
262 * concurrent updates), and
264 * - "o" cannot be destroyed while current thread holds a
267 page = cl_vmpage_page(vmpage, o);
274 /* allocate and initialize cl_page */
275 page = cl_page_alloc(env, o, idx, vmpage, type);
278 EXPORT_SYMBOL(cl_page_find);
280 static inline int cl_page_invariant(const struct cl_page *pg)
283 * Page invariant is protected by a VM lock.
285 LINVRNT(cl_page_is_vmlocked(NULL, pg));
287 return cl_page_in_use_noref(pg);
290 static void cl_page_state_set0(const struct lu_env *env,
291 struct cl_page *page, enum cl_page_state state)
293 enum cl_page_state old;
296 * Matrix of allowed state transitions [old][new], for sanity
299 static const int allowed_transitions[CPS_NR][CPS_NR] = {
302 [CPS_OWNED] = 1, /* io finds existing cached page */
304 [CPS_PAGEOUT] = 1, /* write-out from the cache */
305 [CPS_FREEING] = 1, /* eviction on the memory pressure */
308 [CPS_CACHED] = 1, /* release to the cache */
310 [CPS_PAGEIN] = 1, /* start read immediately */
311 [CPS_PAGEOUT] = 1, /* start write immediately */
312 [CPS_FREEING] = 1, /* lock invalidation or truncate */
315 [CPS_CACHED] = 1, /* io completion */
322 [CPS_CACHED] = 1, /* io completion */
338 old = page->cp_state;
339 PASSERT(env, page, allowed_transitions[old][state]);
340 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
341 PASSERT(env, page, page->cp_state == old);
342 PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner != NULL));
344 CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
345 CS_PAGESTATE_INC(page->cp_obj, state);
346 cl_page_state_set_trust(page, state);
350 static void cl_page_state_set(const struct lu_env *env,
351 struct cl_page *page, enum cl_page_state state)
353 cl_page_state_set0(env, page, state);
357 * Acquires an additional reference to a page.
359 * This can be called only by caller already possessing a reference to \a
362 * \see cl_object_get(), cl_lock_get().
364 void cl_page_get(struct cl_page *page)
367 cl_page_get_trust(page);
370 EXPORT_SYMBOL(cl_page_get);
373 * Releases a reference to a page.
375 * When last reference is released, page is returned to the cache, unless it
376 * is in cl_page_state::CPS_FREEING state, in which case it is immediately
379 * \see cl_object_put(), cl_lock_put().
381 void cl_page_put(const struct lu_env *env, struct cl_page *page)
384 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
385 atomic_read(&page->cp_ref));
387 if (atomic_dec_and_test(&page->cp_ref)) {
388 LASSERT(page->cp_state == CPS_FREEING);
390 LASSERT(atomic_read(&page->cp_ref) == 0);
391 PASSERT(env, page, page->cp_owner == NULL);
392 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
394 * Page is no longer reachable by other threads. Tear
397 cl_page_free(env, page);
402 EXPORT_SYMBOL(cl_page_put);
405 * Returns a cl_page associated with a VM page, and given cl_object.
407 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
409 struct cl_page *page;
412 KLASSERT(PageLocked(vmpage));
415 * NOTE: absence of races and liveness of data are guaranteed by page
416 * lock on a "vmpage". That works because object destruction has
417 * bottom-to-top pass.
420 page = (struct cl_page *)vmpage->private;
422 cl_page_get_trust(page);
423 LASSERT(page->cp_type == CPT_CACHEABLE);
427 EXPORT_SYMBOL(cl_vmpage_page);
429 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
430 const struct lu_device_type *dtype)
432 return cl_page_at_trusted(page, dtype);
434 EXPORT_SYMBOL(cl_page_at);
436 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
438 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
440 const struct lu_env *__env = (_env); \
441 struct cl_page *__page = (_page); \
442 const struct cl_page_slice *__scan; \
444 ptrdiff_t __op = (_op); \
445 int (*__method)_proto; \
448 cfs_list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
449 __method = *(void **)((char *)__scan->cpl_ops + __op); \
450 if (__method != NULL) { \
451 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
461 #define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
463 const struct lu_env *__env = (_env); \
464 struct cl_page *__page = (_page); \
465 const struct cl_page_slice *__scan; \
467 ptrdiff_t __op = (_op); \
468 int (*__method)_proto; \
471 list_for_each_entry_reverse(__scan, &__page->cp_layers, \
473 __method = *(void **)((char *)__scan->cpl_ops + __op); \
474 if (__method != NULL) { \
475 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
485 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
487 const struct lu_env *__env = (_env); \
488 struct cl_page *__page = (_page); \
489 const struct cl_page_slice *__scan; \
490 ptrdiff_t __op = (_op); \
491 void (*__method)_proto; \
493 cfs_list_for_each_entry(__scan, &__page->cp_layers, \
495 __method = *(void **)((char *)__scan->cpl_ops + __op); \
496 if (__method != NULL) \
497 (*__method)(__env, __scan, ## __VA_ARGS__); \
501 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
503 const struct lu_env *__env = (_env); \
504 struct cl_page *__page = (_page); \
505 const struct cl_page_slice *__scan; \
506 ptrdiff_t __op = (_op); \
507 void (*__method)_proto; \
509 /* get to the bottom page. */ \
510 cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
512 __method = *(void **)((char *)__scan->cpl_ops + __op); \
513 if (__method != NULL) \
514 (*__method)(__env, __scan, ## __VA_ARGS__); \
518 static int cl_page_invoke(const struct lu_env *env,
519 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
522 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
524 RETURN(CL_PAGE_INVOKE(env, page, op,
525 (const struct lu_env *,
526 const struct cl_page_slice *, struct cl_io *),
530 static void cl_page_invoid(const struct lu_env *env,
531 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
534 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
536 CL_PAGE_INVOID(env, page, op,
537 (const struct lu_env *,
538 const struct cl_page_slice *, struct cl_io *), io);
542 static void cl_page_owner_clear(struct cl_page *page)
545 if (page->cp_owner != NULL) {
546 LASSERT(page->cp_owner->ci_owned_nr > 0);
547 page->cp_owner->ci_owned_nr--;
548 page->cp_owner = NULL;
549 page->cp_task = NULL;
554 static void cl_page_owner_set(struct cl_page *page)
557 LASSERT(page->cp_owner != NULL);
558 page->cp_owner->ci_owned_nr++;
562 void cl_page_disown0(const struct lu_env *env,
563 struct cl_io *io, struct cl_page *pg)
565 enum cl_page_state state;
568 state = pg->cp_state;
569 PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
570 PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
571 cl_page_owner_clear(pg);
573 if (state == CPS_OWNED)
574 cl_page_state_set(env, pg, CPS_CACHED);
576 * Completion call-backs are executed in the bottom-up order, so that
577 * uppermost layer (llite), responsible for VFS/VM interaction runs
578 * last and can release locks safely.
580 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
581 (const struct lu_env *,
582 const struct cl_page_slice *, struct cl_io *),
588 * returns true, iff page is owned by the given io.
590 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
592 struct cl_io *top = cl_io_top((struct cl_io *)io);
593 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
595 RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
597 EXPORT_SYMBOL(cl_page_is_owned);
600 * Try to own a page by IO.
602 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
603 * into cl_page_state::CPS_OWNED state.
605 * \pre !cl_page_is_owned(pg, io)
606 * \post result == 0 iff cl_page_is_owned(pg, io)
610 * \retval -ve failure, e.g., page was destroyed (and landed in
611 * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
612 * or, page was owned by another thread, or in IO.
614 * \see cl_page_disown()
615 * \see cl_page_operations::cpo_own()
616 * \see cl_page_own_try()
619 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
620 struct cl_page *pg, int nonblock)
624 PINVRNT(env, pg, !cl_page_is_owned(pg, io));
629 if (pg->cp_state == CPS_FREEING) {
632 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
633 (const struct lu_env *,
634 const struct cl_page_slice *,
635 struct cl_io *, int),
638 PASSERT(env, pg, pg->cp_owner == NULL);
639 PASSERT(env, pg, pg->cp_req == NULL);
640 pg->cp_owner = cl_io_top(io);;
641 pg->cp_task = current;
642 cl_page_owner_set(pg);
643 if (pg->cp_state != CPS_FREEING) {
644 cl_page_state_set(env, pg, CPS_OWNED);
646 cl_page_disown0(env, io, pg);
651 PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
656 * Own a page, might be blocked.
658 * \see cl_page_own0()
660 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
662 return cl_page_own0(env, io, pg, 0);
664 EXPORT_SYMBOL(cl_page_own);
667 * Nonblock version of cl_page_own().
669 * \see cl_page_own0()
671 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
674 return cl_page_own0(env, io, pg, 1);
676 EXPORT_SYMBOL(cl_page_own_try);
680 * Assume page ownership.
682 * Called when page is already locked by the hosting VM.
684 * \pre !cl_page_is_owned(pg, io)
685 * \post cl_page_is_owned(pg, io)
687 * \see cl_page_operations::cpo_assume()
689 void cl_page_assume(const struct lu_env *env,
690 struct cl_io *io, struct cl_page *pg)
692 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
697 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
698 PASSERT(env, pg, pg->cp_owner == NULL);
699 pg->cp_owner = cl_io_top(io);
700 pg->cp_task = current;
701 cl_page_owner_set(pg);
702 cl_page_state_set(env, pg, CPS_OWNED);
705 EXPORT_SYMBOL(cl_page_assume);
708 * Releases page ownership without unlocking the page.
710 * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
711 * underlying VM page (as VM is supposed to do this itself).
713 * \pre cl_page_is_owned(pg, io)
714 * \post !cl_page_is_owned(pg, io)
716 * \see cl_page_assume()
718 void cl_page_unassume(const struct lu_env *env,
719 struct cl_io *io, struct cl_page *pg)
721 PINVRNT(env, pg, cl_page_is_owned(pg, io));
722 PINVRNT(env, pg, cl_page_invariant(pg));
726 cl_page_owner_clear(pg);
727 cl_page_state_set(env, pg, CPS_CACHED);
728 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
729 (const struct lu_env *,
730 const struct cl_page_slice *, struct cl_io *),
734 EXPORT_SYMBOL(cl_page_unassume);
737 * Releases page ownership.
739 * Moves page into cl_page_state::CPS_CACHED.
741 * \pre cl_page_is_owned(pg, io)
742 * \post !cl_page_is_owned(pg, io)
745 * \see cl_page_operations::cpo_disown()
747 void cl_page_disown(const struct lu_env *env,
748 struct cl_io *io, struct cl_page *pg)
750 PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
751 pg->cp_state == CPS_FREEING);
755 cl_page_disown0(env, io, pg);
758 EXPORT_SYMBOL(cl_page_disown);
761 * Called when page is to be removed from the object, e.g., as a result of
764 * Calls cl_page_operations::cpo_discard() top-to-bottom.
766 * \pre cl_page_is_owned(pg, io)
768 * \see cl_page_operations::cpo_discard()
770 void cl_page_discard(const struct lu_env *env,
771 struct cl_io *io, struct cl_page *pg)
773 PINVRNT(env, pg, cl_page_is_owned(pg, io));
774 PINVRNT(env, pg, cl_page_invariant(pg));
776 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
778 EXPORT_SYMBOL(cl_page_discard);
781 * Version of cl_page_delete() that can be called for not fully constructed
782 * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
783 * path. Doesn't check page invariant.
785 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
789 PASSERT(env, pg, pg->cp_state != CPS_FREEING);
792 * Severe all ways to obtain new pointers to @pg.
794 cl_page_owner_clear(pg);
796 cl_page_state_set0(env, pg, CPS_FREEING);
798 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
799 (const struct lu_env *, const struct cl_page_slice *));
805 * Called when a decision is made to throw page out of memory.
807 * Notifies all layers about page destruction by calling
808 * cl_page_operations::cpo_delete() method top-to-bottom.
810 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
811 * where transition to this state happens).
813 * Eliminates all venues through which new references to the page can be
816 * - removes page from the radix trees,
818 * - breaks linkage from VM page to cl_page.
820 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
821 * drain after some time, at which point page will be recycled.
823 * \pre VM page is locked
824 * \post pg->cp_state == CPS_FREEING
826 * \see cl_page_operations::cpo_delete()
828 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
830 PINVRNT(env, pg, cl_page_invariant(pg));
832 cl_page_delete0(env, pg);
835 EXPORT_SYMBOL(cl_page_delete);
838 * Marks page up-to-date.
840 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
841 * layer responsible for VM interaction has to mark/clear page as up-to-date
842 * by the \a uptodate argument.
844 * \see cl_page_operations::cpo_export()
846 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
848 PINVRNT(env, pg, cl_page_invariant(pg));
849 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
850 (const struct lu_env *,
851 const struct cl_page_slice *, int), uptodate);
853 EXPORT_SYMBOL(cl_page_export);
856 * Returns true, iff \a pg is VM locked in a suitable sense by the calling
859 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
862 const struct cl_page_slice *slice;
865 slice = container_of(pg->cp_layers.next,
866 const struct cl_page_slice, cpl_linkage);
867 PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
869 * Call ->cpo_is_vmlocked() directly instead of going through
870 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
871 * cl_page_invariant().
873 result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
874 PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
875 RETURN(result == -EBUSY);
877 EXPORT_SYMBOL(cl_page_is_vmlocked);
879 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
882 RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
885 static void cl_page_io_start(const struct lu_env *env,
886 struct cl_page *pg, enum cl_req_type crt)
889 * Page is queued for IO, change its state.
892 cl_page_owner_clear(pg);
893 cl_page_state_set(env, pg, cl_req_type_state(crt));
898 * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
899 * called top-to-bottom. Every layer either agrees to submit this page (by
900 * returning 0), or requests to omit this page (by returning -EALREADY). Layer
901 * handling interactions with the VM also has to inform VM that page is under
904 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
905 struct cl_page *pg, enum cl_req_type crt)
909 PINVRNT(env, pg, cl_page_is_owned(pg, io));
910 PINVRNT(env, pg, cl_page_invariant(pg));
911 PINVRNT(env, pg, crt < CRT_NR);
914 * XXX this has to be called bottom-to-top, so that llite can set up
915 * PG_writeback without risking other layers deciding to skip this
920 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
922 cl_page_io_start(env, pg, crt);
924 KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
926 PageWriteback(cl_page_vmpage(pg)))));
927 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
930 EXPORT_SYMBOL(cl_page_prep);
933 * Notify layers about transfer completion.
935 * Invoked by transfer sub-system (which is a part of osc) to notify layers
936 * that a transfer, of which this page is a part of has completed.
938 * Completion call-backs are executed in the bottom-up order, so that
939 * uppermost layer (llite), responsible for the VFS/VM interaction runs last
940 * and can release locks safely.
942 * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
943 * \post pg->cp_state == CPS_CACHED
945 * \see cl_page_operations::cpo_completion()
947 void cl_page_completion(const struct lu_env *env,
948 struct cl_page *pg, enum cl_req_type crt, int ioret)
950 struct cl_sync_io *anchor = pg->cp_sync_io;
952 PASSERT(env, pg, crt < CRT_NR);
953 /* cl_page::cp_req already cleared by the caller (osc_completion()) */
954 PASSERT(env, pg, pg->cp_req == NULL);
955 PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
958 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
959 if (crt == CRT_READ && ioret == 0) {
960 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
961 pg->cp_flags |= CPF_READ_COMPLETED;
964 cl_page_state_set(env, pg, CPS_CACHED);
967 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
968 (const struct lu_env *,
969 const struct cl_page_slice *, int), ioret);
971 LASSERT(cl_page_is_vmlocked(env, pg));
972 LASSERT(pg->cp_sync_io == anchor);
973 pg->cp_sync_io = NULL;
976 * As page->cp_obj is pinned by a reference from page->cp_req, it is
977 * safe to call cl_page_put() without risking object destruction in a
978 * non-blocking context.
980 cl_page_put(env, pg);
983 cl_sync_io_note(anchor, ioret);
987 EXPORT_SYMBOL(cl_page_completion);
990 * Notify layers that transfer formation engine decided to yank this page from
991 * the cache and to make it a part of a transfer.
993 * \pre pg->cp_state == CPS_CACHED
994 * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
996 * \see cl_page_operations::cpo_make_ready()
998 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
999 enum cl_req_type crt)
1003 PINVRNT(env, pg, crt < CRT_NR);
1008 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
1009 (const struct lu_env *,
1010 const struct cl_page_slice *));
1012 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
1013 cl_page_io_start(env, pg, crt);
1015 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1018 EXPORT_SYMBOL(cl_page_make_ready);
1021 * Called if a pge is being written back by kernel's intention.
1023 * \pre cl_page_is_owned(pg, io)
1024 * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
1026 * \see cl_page_operations::cpo_flush()
1028 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
1033 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1034 PINVRNT(env, pg, cl_page_invariant(pg));
1038 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
1040 CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
1043 EXPORT_SYMBOL(cl_page_flush);
1046 * Checks whether page is protected by any extent lock is at least required
1049 * \return the same as in cl_page_operations::cpo_is_under_lock() method.
1050 * \see cl_page_operations::cpo_is_under_lock()
1052 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1053 struct cl_page *page, pgoff_t *max_index)
1057 PINVRNT(env, page, cl_page_invariant(page));
1060 rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1061 (const struct lu_env *,
1062 const struct cl_page_slice *,
1063 struct cl_io *, pgoff_t *),
1067 EXPORT_SYMBOL(cl_page_is_under_lock);
1070 * Tells transfer engine that only part of a page is to be transmitted.
1072 * \see cl_page_operations::cpo_clip()
1074 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1077 PINVRNT(env, pg, cl_page_invariant(pg));
1079 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
1080 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1081 (const struct lu_env *,
1082 const struct cl_page_slice *,int, int),
1085 EXPORT_SYMBOL(cl_page_clip);
1088 * Prints human readable representation of \a pg to the \a f.
1090 void cl_page_header_print(const struct lu_env *env, void *cookie,
1091 lu_printer_t printer, const struct cl_page *pg)
1093 (*printer)(env, cookie,
1094 "page@%p[%d %p %d %d %d %p %p %#x]\n",
1095 pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1096 pg->cp_state, pg->cp_error, pg->cp_type,
1097 pg->cp_owner, pg->cp_req, pg->cp_flags);
1099 EXPORT_SYMBOL(cl_page_header_print);
1102 * Prints human readable representation of \a pg to the \a f.
1104 void cl_page_print(const struct lu_env *env, void *cookie,
1105 lu_printer_t printer, const struct cl_page *pg)
1107 cl_page_header_print(env, cookie, printer, pg);
1108 CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1109 (const struct lu_env *env,
1110 const struct cl_page_slice *slice,
1111 void *cookie, lu_printer_t p), cookie, printer);
1112 (*printer)(env, cookie, "end page@%p\n", pg);
1114 EXPORT_SYMBOL(cl_page_print);
1117 * Cancel a page which is still in a transfer.
1119 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1121 return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1122 (const struct lu_env *,
1123 const struct cl_page_slice *));
1125 EXPORT_SYMBOL(cl_page_cancel);
1128 * Converts a byte offset within object \a obj into a page index.
1130 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1132 return (loff_t)idx << PAGE_CACHE_SHIFT;
1134 EXPORT_SYMBOL(cl_offset);
1137 * Converts a page index into a byte offset within object \a obj.
1139 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1141 return offset >> PAGE_CACHE_SHIFT;
1143 EXPORT_SYMBOL(cl_index);
1145 int cl_page_size(const struct cl_object *obj)
1147 return 1 << PAGE_CACHE_SHIFT;
1149 EXPORT_SYMBOL(cl_page_size);
1152 * Adds page slice to the compound page.
1154 * This is called by cl_object_operations::coo_page_init() methods to add a
1155 * per-layer state to the page. New state is added at the end of
1156 * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1158 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1160 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1161 struct cl_object *obj, pgoff_t index,
1162 const struct cl_page_operations *ops)
1165 list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1166 slice->cpl_obj = obj;
1167 slice->cpl_index = index;
1168 slice->cpl_ops = ops;
1169 slice->cpl_page = page;
1172 EXPORT_SYMBOL(cl_page_slice_add);
1174 int cl_page_init(void)
1179 void cl_page_fini(void)