4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
38 #define DEBUG_SUBSYSTEM S_CLASS
40 #include <linux/list.h>
41 #include <libcfs/libcfs.h>
42 #include <obd_class.h>
43 #include <obd_support.h>
45 #include <cl_object.h>
46 #include "cl_internal.h"
48 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
51 # define PASSERT(env, page, expr) \
53 if (unlikely(!(expr))) { \
54 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
58 #else /* !LIBCFS_DEBUG */
59 # define PASSERT(env, page, exp) \
60 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
61 #endif /* !LIBCFS_DEBUG */
63 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
64 # define PINVRNT(env, page, expr) \
66 if (unlikely(!(expr))) { \
67 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
71 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
72 # define PINVRNT(env, page, exp) \
73 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
74 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
76 /* Disable page statistic by default due to huge performance penalty. */
77 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
78 #define CS_PAGE_INC(o, item) \
79 atomic_inc(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
80 #define CS_PAGE_DEC(o, item) \
81 atomic_dec(&cl_object_site(o)->cs_pages.cs_stats[CS_##item])
82 #define CS_PAGESTATE_INC(o, state) \
83 atomic_inc(&cl_object_site(o)->cs_pages_state[state])
84 #define CS_PAGESTATE_DEC(o, state) \
85 atomic_dec(&cl_object_site(o)->cs_pages_state[state])
87 #define CS_PAGE_INC(o, item)
88 #define CS_PAGE_DEC(o, item)
89 #define CS_PAGESTATE_INC(o, state)
90 #define CS_PAGESTATE_DEC(o, state)
94 * Internal version of cl_page_get().
96 * This function can be used to obtain initial reference to previously
97 * unreferenced cached object. It can be called only if concurrent page
98 * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
99 * associated with \a page.
101 * Use with care! Not exported.
103 static void cl_page_get_trust(struct cl_page *page)
105 LASSERT(atomic_read(&page->cp_ref) > 0);
106 atomic_inc(&page->cp_ref);
110 * Returns a slice within a page, corresponding to the given layer in the
115 static const struct cl_page_slice *
116 cl_page_at_trusted(const struct cl_page *page,
117 const struct lu_device_type *dtype)
119 const struct cl_page_slice *slice;
122 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
123 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
129 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
131 struct cl_object *obj = page->cp_obj;
132 int pagesize = cl_object_header(obj)->coh_page_bufsize;
134 PASSERT(env, page, list_empty(&page->cp_batch));
135 PASSERT(env, page, page->cp_owner == NULL);
136 PASSERT(env, page, page->cp_state == CPS_FREEING);
139 while (!list_empty(&page->cp_layers)) {
140 struct cl_page_slice *slice;
142 slice = list_entry(page->cp_layers.next,
143 struct cl_page_slice, cpl_linkage);
144 list_del_init(page->cp_layers.next);
145 if (unlikely(slice->cpl_ops->cpo_fini != NULL))
146 slice->cpl_ops->cpo_fini(env, slice);
148 CS_PAGE_DEC(obj, total);
149 CS_PAGESTATE_DEC(obj, page->cp_state);
150 lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
151 cl_object_put(env, obj);
152 lu_ref_fini(&page->cp_reference);
153 OBD_FREE(page, pagesize);
158 * Helper function updating page state. This is the only place in the code
159 * where cl_page::cp_state field is mutated.
161 static inline void cl_page_state_set_trust(struct cl_page *page,
162 enum cl_page_state state)
165 *(enum cl_page_state *)&page->cp_state = state;
168 struct cl_page *cl_page_alloc(const struct lu_env *env,
169 struct cl_object *o, pgoff_t ind, struct page *vmpage,
170 enum cl_page_type type)
172 struct cl_page *page;
173 struct lu_object_header *head;
176 OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
180 atomic_set(&page->cp_ref, 1);
183 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
185 page->cp_vmpage = vmpage;
186 cl_page_state_set_trust(page, CPS_CACHED);
187 page->cp_type = type;
188 INIT_LIST_HEAD(&page->cp_layers);
189 INIT_LIST_HEAD(&page->cp_batch);
190 lu_ref_init(&page->cp_reference);
191 head = o->co_lu.lo_header;
192 list_for_each_entry(o, &head->loh_layers,
194 if (o->co_ops->coo_page_init != NULL) {
195 result = o->co_ops->coo_page_init(env, o, page,
198 cl_page_delete0(env, page);
199 cl_page_free(env, page);
200 page = ERR_PTR(result);
206 CS_PAGE_INC(o, total);
207 CS_PAGE_INC(o, create);
208 CS_PAGESTATE_DEC(o, CPS_CACHED);
211 page = ERR_PTR(-ENOMEM);
217 * Returns a cl_page with index \a idx at the object \a o, and associated with
218 * the VM page \a vmpage.
220 * This is the main entry point into the cl_page caching interface. First, a
221 * cache (implemented as a per-object radix tree) is consulted. If page is
222 * found there, it is returned immediately. Otherwise new page is allocated
223 * and returned. In any case, additional reference to page is acquired.
225 * \see cl_object_find(), cl_lock_find()
227 struct cl_page *cl_page_find(const struct lu_env *env,
229 pgoff_t idx, struct page *vmpage,
230 enum cl_page_type type)
232 struct cl_page *page = NULL;
233 struct cl_object_header *hdr;
235 LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
240 hdr = cl_object_header(o);
241 CS_PAGE_INC(o, lookup);
243 CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
244 idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
246 if (type == CPT_CACHEABLE) {
247 /* vmpage lock is used to protect the child/parent
249 KLASSERT(PageLocked(vmpage));
251 * cl_vmpage_page() can be called here without any locks as
253 * - "vmpage" is locked (which prevents ->private from
254 * concurrent updates), and
256 * - "o" cannot be destroyed while current thread holds a
259 page = cl_vmpage_page(vmpage, o);
266 /* allocate and initialize cl_page */
267 page = cl_page_alloc(env, o, idx, vmpage, type);
270 EXPORT_SYMBOL(cl_page_find);
272 static inline int cl_page_invariant(const struct cl_page *pg)
274 return cl_page_in_use_noref(pg);
277 static void cl_page_state_set0(const struct lu_env *env,
278 struct cl_page *page, enum cl_page_state state)
280 enum cl_page_state old;
283 * Matrix of allowed state transitions [old][new], for sanity
286 static const int allowed_transitions[CPS_NR][CPS_NR] = {
289 [CPS_OWNED] = 1, /* io finds existing cached page */
291 [CPS_PAGEOUT] = 1, /* write-out from the cache */
292 [CPS_FREEING] = 1, /* eviction on the memory pressure */
295 [CPS_CACHED] = 1, /* release to the cache */
297 [CPS_PAGEIN] = 1, /* start read immediately */
298 [CPS_PAGEOUT] = 1, /* start write immediately */
299 [CPS_FREEING] = 1, /* lock invalidation or truncate */
302 [CPS_CACHED] = 1, /* io completion */
309 [CPS_CACHED] = 1, /* io completion */
325 old = page->cp_state;
326 PASSERT(env, page, allowed_transitions[old][state]);
327 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
328 PASSERT(env, page, page->cp_state == old);
329 PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner != NULL));
331 CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
332 CS_PAGESTATE_INC(page->cp_obj, state);
333 cl_page_state_set_trust(page, state);
337 static void cl_page_state_set(const struct lu_env *env,
338 struct cl_page *page, enum cl_page_state state)
340 cl_page_state_set0(env, page, state);
344 * Acquires an additional reference to a page.
346 * This can be called only by caller already possessing a reference to \a
349 * \see cl_object_get(), cl_lock_get().
351 void cl_page_get(struct cl_page *page)
354 cl_page_get_trust(page);
357 EXPORT_SYMBOL(cl_page_get);
360 * Releases a reference to a page.
362 * When last reference is released, page is returned to the cache, unless it
363 * is in cl_page_state::CPS_FREEING state, in which case it is immediately
366 * \see cl_object_put(), cl_lock_put().
368 void cl_page_put(const struct lu_env *env, struct cl_page *page)
371 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
372 atomic_read(&page->cp_ref));
374 if (atomic_dec_and_test(&page->cp_ref)) {
375 LASSERT(page->cp_state == CPS_FREEING);
377 LASSERT(atomic_read(&page->cp_ref) == 0);
378 PASSERT(env, page, page->cp_owner == NULL);
379 PASSERT(env, page, list_empty(&page->cp_batch));
381 * Page is no longer reachable by other threads. Tear
384 cl_page_free(env, page);
389 EXPORT_SYMBOL(cl_page_put);
392 * Returns a cl_page associated with a VM page, and given cl_object.
394 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
396 struct cl_page *page;
399 KLASSERT(PageLocked(vmpage));
402 * NOTE: absence of races and liveness of data are guaranteed by page
403 * lock on a "vmpage". That works because object destruction has
404 * bottom-to-top pass.
407 page = (struct cl_page *)vmpage->private;
409 cl_page_get_trust(page);
410 LASSERT(page->cp_type == CPT_CACHEABLE);
414 EXPORT_SYMBOL(cl_vmpage_page);
416 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
417 const struct lu_device_type *dtype)
419 return cl_page_at_trusted(page, dtype);
421 EXPORT_SYMBOL(cl_page_at);
423 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
425 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
427 const struct lu_env *__env = (_env); \
428 struct cl_page *__page = (_page); \
429 const struct cl_page_slice *__scan; \
431 ptrdiff_t __op = (_op); \
432 int (*__method)_proto; \
435 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
436 __method = *(void **)((char *)__scan->cpl_ops + __op); \
437 if (__method != NULL) { \
438 __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
448 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
450 const struct lu_env *__env = (_env); \
451 struct cl_page *__page = (_page); \
452 const struct cl_page_slice *__scan; \
453 ptrdiff_t __op = (_op); \
454 void (*__method)_proto; \
456 list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
457 __method = *(void **)((char *)__scan->cpl_ops + __op); \
458 if (__method != NULL) \
459 (*__method)(__env, __scan, ## __VA_ARGS__); \
463 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
465 const struct lu_env *__env = (_env); \
466 struct cl_page *__page = (_page); \
467 const struct cl_page_slice *__scan; \
468 ptrdiff_t __op = (_op); \
469 void (*__method)_proto; \
471 /* get to the bottom page. */ \
472 list_for_each_entry_reverse(__scan, &__page->cp_layers, \
474 __method = *(void **)((char *)__scan->cpl_ops + __op); \
475 if (__method != NULL) \
476 (*__method)(__env, __scan, ## __VA_ARGS__); \
480 static int cl_page_invoke(const struct lu_env *env,
481 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
484 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
486 RETURN(CL_PAGE_INVOKE(env, page, op,
487 (const struct lu_env *,
488 const struct cl_page_slice *, struct cl_io *),
492 static void cl_page_invoid(const struct lu_env *env,
493 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
496 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
498 CL_PAGE_INVOID(env, page, op,
499 (const struct lu_env *,
500 const struct cl_page_slice *, struct cl_io *), io);
504 static void cl_page_owner_clear(struct cl_page *page)
507 if (page->cp_owner != NULL) {
508 LASSERT(page->cp_owner->ci_owned_nr > 0);
509 page->cp_owner->ci_owned_nr--;
510 page->cp_owner = NULL;
515 static void cl_page_owner_set(struct cl_page *page)
518 LASSERT(page->cp_owner != NULL);
519 page->cp_owner->ci_owned_nr++;
523 void cl_page_disown0(const struct lu_env *env,
524 struct cl_io *io, struct cl_page *pg)
526 enum cl_page_state state;
529 state = pg->cp_state;
530 PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
531 PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
532 cl_page_owner_clear(pg);
534 if (state == CPS_OWNED)
535 cl_page_state_set(env, pg, CPS_CACHED);
537 * Completion call-backs are executed in the bottom-up order, so that
538 * uppermost layer (llite), responsible for VFS/VM interaction runs
539 * last and can release locks safely.
541 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
542 (const struct lu_env *,
543 const struct cl_page_slice *, struct cl_io *),
549 * returns true, iff page is owned by the given io.
551 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
553 struct cl_io *top = cl_io_top((struct cl_io *)io);
554 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
556 RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
558 EXPORT_SYMBOL(cl_page_is_owned);
561 * Try to own a page by IO.
563 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
564 * into cl_page_state::CPS_OWNED state.
566 * \pre !cl_page_is_owned(pg, io)
567 * \post result == 0 iff cl_page_is_owned(pg, io)
571 * \retval -ve failure, e.g., page was destroyed (and landed in
572 * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
573 * or, page was owned by another thread, or in IO.
575 * \see cl_page_disown()
576 * \see cl_page_operations::cpo_own()
577 * \see cl_page_own_try()
580 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
581 struct cl_page *pg, int nonblock)
585 PINVRNT(env, pg, !cl_page_is_owned(pg, io));
590 if (pg->cp_state == CPS_FREEING) {
593 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
594 (const struct lu_env *,
595 const struct cl_page_slice *,
596 struct cl_io *, int),
599 PASSERT(env, pg, pg->cp_owner == NULL);
600 pg->cp_owner = cl_io_top(io);;
601 cl_page_owner_set(pg);
602 if (pg->cp_state != CPS_FREEING) {
603 cl_page_state_set(env, pg, CPS_OWNED);
605 cl_page_disown0(env, io, pg);
610 PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
615 * Own a page, might be blocked.
617 * \see cl_page_own0()
619 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
621 return cl_page_own0(env, io, pg, 0);
623 EXPORT_SYMBOL(cl_page_own);
626 * Nonblock version of cl_page_own().
628 * \see cl_page_own0()
630 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
633 return cl_page_own0(env, io, pg, 1);
635 EXPORT_SYMBOL(cl_page_own_try);
639 * Assume page ownership.
641 * Called when page is already locked by the hosting VM.
643 * \pre !cl_page_is_owned(pg, io)
644 * \post cl_page_is_owned(pg, io)
646 * \see cl_page_operations::cpo_assume()
648 void cl_page_assume(const struct lu_env *env,
649 struct cl_io *io, struct cl_page *pg)
651 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
656 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
657 PASSERT(env, pg, pg->cp_owner == NULL);
658 pg->cp_owner = cl_io_top(io);
659 cl_page_owner_set(pg);
660 cl_page_state_set(env, pg, CPS_OWNED);
663 EXPORT_SYMBOL(cl_page_assume);
666 * Releases page ownership without unlocking the page.
668 * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
669 * underlying VM page (as VM is supposed to do this itself).
671 * \pre cl_page_is_owned(pg, io)
672 * \post !cl_page_is_owned(pg, io)
674 * \see cl_page_assume()
676 void cl_page_unassume(const struct lu_env *env,
677 struct cl_io *io, struct cl_page *pg)
679 PINVRNT(env, pg, cl_page_is_owned(pg, io));
680 PINVRNT(env, pg, cl_page_invariant(pg));
684 cl_page_owner_clear(pg);
685 cl_page_state_set(env, pg, CPS_CACHED);
686 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
687 (const struct lu_env *,
688 const struct cl_page_slice *, struct cl_io *),
692 EXPORT_SYMBOL(cl_page_unassume);
695 * Releases page ownership.
697 * Moves page into cl_page_state::CPS_CACHED.
699 * \pre cl_page_is_owned(pg, io)
700 * \post !cl_page_is_owned(pg, io)
703 * \see cl_page_operations::cpo_disown()
705 void cl_page_disown(const struct lu_env *env,
706 struct cl_io *io, struct cl_page *pg)
708 PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
709 pg->cp_state == CPS_FREEING);
713 cl_page_disown0(env, io, pg);
716 EXPORT_SYMBOL(cl_page_disown);
719 * Called when page is to be removed from the object, e.g., as a result of
722 * Calls cl_page_operations::cpo_discard() top-to-bottom.
724 * \pre cl_page_is_owned(pg, io)
726 * \see cl_page_operations::cpo_discard()
728 void cl_page_discard(const struct lu_env *env,
729 struct cl_io *io, struct cl_page *pg)
731 PINVRNT(env, pg, cl_page_is_owned(pg, io));
732 PINVRNT(env, pg, cl_page_invariant(pg));
734 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
736 EXPORT_SYMBOL(cl_page_discard);
739 * Version of cl_page_delete() that can be called for not fully constructed
740 * pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
741 * path. Doesn't check page invariant.
743 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
747 PASSERT(env, pg, pg->cp_state != CPS_FREEING);
750 * Severe all ways to obtain new pointers to @pg.
752 cl_page_owner_clear(pg);
754 cl_page_state_set0(env, pg, CPS_FREEING);
756 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
757 (const struct lu_env *, const struct cl_page_slice *));
763 * Called when a decision is made to throw page out of memory.
765 * Notifies all layers about page destruction by calling
766 * cl_page_operations::cpo_delete() method top-to-bottom.
768 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
769 * where transition to this state happens).
771 * Eliminates all venues through which new references to the page can be
774 * - removes page from the radix trees,
776 * - breaks linkage from VM page to cl_page.
778 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
779 * drain after some time, at which point page will be recycled.
781 * \pre VM page is locked
782 * \post pg->cp_state == CPS_FREEING
784 * \see cl_page_operations::cpo_delete()
786 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
788 PINVRNT(env, pg, cl_page_invariant(pg));
790 cl_page_delete0(env, pg);
793 EXPORT_SYMBOL(cl_page_delete);
796 * Marks page up-to-date.
798 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
799 * layer responsible for VM interaction has to mark/clear page as up-to-date
800 * by the \a uptodate argument.
802 * \see cl_page_operations::cpo_export()
804 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
806 PINVRNT(env, pg, cl_page_invariant(pg));
807 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
808 (const struct lu_env *,
809 const struct cl_page_slice *, int), uptodate);
811 EXPORT_SYMBOL(cl_page_export);
814 * Returns true, iff \a pg is VM locked in a suitable sense by the calling
817 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
820 const struct cl_page_slice *slice;
823 slice = container_of(pg->cp_layers.next,
824 const struct cl_page_slice, cpl_linkage);
825 PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
827 * Call ->cpo_is_vmlocked() directly instead of going through
828 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
829 * cl_page_invariant().
831 result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
832 PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
833 RETURN(result == -EBUSY);
835 EXPORT_SYMBOL(cl_page_is_vmlocked);
837 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
840 RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
843 static void cl_page_io_start(const struct lu_env *env,
844 struct cl_page *pg, enum cl_req_type crt)
847 * Page is queued for IO, change its state.
850 cl_page_owner_clear(pg);
851 cl_page_state_set(env, pg, cl_req_type_state(crt));
856 * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
857 * called top-to-bottom. Every layer either agrees to submit this page (by
858 * returning 0), or requests to omit this page (by returning -EALREADY). Layer
859 * handling interactions with the VM also has to inform VM that page is under
862 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
863 struct cl_page *pg, enum cl_req_type crt)
867 PINVRNT(env, pg, cl_page_is_owned(pg, io));
868 PINVRNT(env, pg, cl_page_invariant(pg));
869 PINVRNT(env, pg, crt < CRT_NR);
872 * XXX this has to be called bottom-to-top, so that llite can set up
873 * PG_writeback without risking other layers deciding to skip this
878 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
880 cl_page_io_start(env, pg, crt);
882 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
885 EXPORT_SYMBOL(cl_page_prep);
888 * Notify layers about transfer completion.
890 * Invoked by transfer sub-system (which is a part of osc) to notify layers
891 * that a transfer, of which this page is a part of has completed.
893 * Completion call-backs are executed in the bottom-up order, so that
894 * uppermost layer (llite), responsible for the VFS/VM interaction runs last
895 * and can release locks safely.
897 * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
898 * \post pg->cp_state == CPS_CACHED
900 * \see cl_page_operations::cpo_completion()
902 void cl_page_completion(const struct lu_env *env,
903 struct cl_page *pg, enum cl_req_type crt, int ioret)
905 struct cl_sync_io *anchor = pg->cp_sync_io;
907 PASSERT(env, pg, crt < CRT_NR);
908 PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
911 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
912 cl_page_state_set(env, pg, CPS_CACHED);
915 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
916 (const struct lu_env *,
917 const struct cl_page_slice *, int), ioret);
918 if (anchor != NULL) {
919 LASSERT(pg->cp_sync_io == anchor);
920 pg->cp_sync_io = NULL;
921 cl_sync_io_note(env, anchor, ioret);
925 EXPORT_SYMBOL(cl_page_completion);
928 * Notify layers that transfer formation engine decided to yank this page from
929 * the cache and to make it a part of a transfer.
931 * \pre pg->cp_state == CPS_CACHED
932 * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
934 * \see cl_page_operations::cpo_make_ready()
936 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
937 enum cl_req_type crt)
941 PINVRNT(env, pg, crt < CRT_NR);
946 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
947 (const struct lu_env *,
948 const struct cl_page_slice *));
950 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
951 cl_page_io_start(env, pg, crt);
953 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
956 EXPORT_SYMBOL(cl_page_make_ready);
959 * Called if a pge is being written back by kernel's intention.
961 * \pre cl_page_is_owned(pg, io)
962 * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
964 * \see cl_page_operations::cpo_flush()
966 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
971 PINVRNT(env, pg, cl_page_is_owned(pg, io));
972 PINVRNT(env, pg, cl_page_invariant(pg));
976 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
978 CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
981 EXPORT_SYMBOL(cl_page_flush);
984 * Tells transfer engine that only part of a page is to be transmitted.
986 * \see cl_page_operations::cpo_clip()
988 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
991 PINVRNT(env, pg, cl_page_invariant(pg));
993 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
994 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
995 (const struct lu_env *,
996 const struct cl_page_slice *,int, int),
999 EXPORT_SYMBOL(cl_page_clip);
1002 * Prints human readable representation of \a pg to the \a f.
1004 void cl_page_header_print(const struct lu_env *env, void *cookie,
1005 lu_printer_t printer, const struct cl_page *pg)
1007 (*printer)(env, cookie,
1008 "page@%p[%d %p %d %d %p]\n",
1009 pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1010 pg->cp_state, pg->cp_type,
1013 EXPORT_SYMBOL(cl_page_header_print);
1016 * Prints human readable representation of \a pg to the \a f.
1018 void cl_page_print(const struct lu_env *env, void *cookie,
1019 lu_printer_t printer, const struct cl_page *pg)
1021 cl_page_header_print(env, cookie, printer, pg);
1022 CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1023 (const struct lu_env *env,
1024 const struct cl_page_slice *slice,
1025 void *cookie, lu_printer_t p), cookie, printer);
1026 (*printer)(env, cookie, "end page@%p\n", pg);
1028 EXPORT_SYMBOL(cl_page_print);
1031 * Cancel a page which is still in a transfer.
1033 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1035 return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1036 (const struct lu_env *,
1037 const struct cl_page_slice *));
1041 * Converts a byte offset within object \a obj into a page index.
1043 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1045 return (loff_t)idx << PAGE_SHIFT;
1047 EXPORT_SYMBOL(cl_offset);
1050 * Converts a page index into a byte offset within object \a obj.
1052 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1054 return offset >> PAGE_SHIFT;
1056 EXPORT_SYMBOL(cl_index);
1058 size_t cl_page_size(const struct cl_object *obj)
1060 return 1UL << PAGE_SHIFT;
1062 EXPORT_SYMBOL(cl_page_size);
1065 * Adds page slice to the compound page.
1067 * This is called by cl_object_operations::coo_page_init() methods to add a
1068 * per-layer state to the page. New state is added at the end of
1069 * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1071 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1073 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1074 struct cl_object *obj, pgoff_t index,
1075 const struct cl_page_operations *ops)
1078 list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1079 slice->cpl_obj = obj;
1080 slice->cpl_index = index;
1081 slice->cpl_ops = ops;
1082 slice->cpl_page = page;
1085 EXPORT_SYMBOL(cl_page_slice_add);
1088 * Allocate and initialize cl_cache, called by ll_init_sbi().
1090 struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
1092 struct cl_client_cache *cache = NULL;
1095 OBD_ALLOC(cache, sizeof(*cache));
1099 /* Initialize cache data */
1100 atomic_set(&cache->ccc_users, 1);
1101 cache->ccc_lru_max = lru_page_max;
1102 atomic_long_set(&cache->ccc_lru_left, lru_page_max);
1103 spin_lock_init(&cache->ccc_lru_lock);
1104 INIT_LIST_HEAD(&cache->ccc_lru);
1106 /* turn unstable check off by default as it impacts performance */
1107 cache->ccc_unstable_check = 0;
1108 atomic_long_set(&cache->ccc_unstable_nr, 0);
1109 init_waitqueue_head(&cache->ccc_unstable_waitq);
1113 EXPORT_SYMBOL(cl_cache_init);
1116 * Increase cl_cache refcount
1118 void cl_cache_incref(struct cl_client_cache *cache)
1120 atomic_inc(&cache->ccc_users);
1122 EXPORT_SYMBOL(cl_cache_incref);
1125 * Decrease cl_cache refcount and free the cache if refcount=0.
1126 * Since llite, lov and osc all hold cl_cache refcount,
1127 * the free will not cause race. (LU-6173)
1129 void cl_cache_decref(struct cl_client_cache *cache)
1131 if (atomic_dec_and_test(&cache->ccc_users))
1132 OBD_FREE(cache, sizeof(*cache));
1134 EXPORT_SYMBOL(cl_cache_decref);