4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
37 #define DEBUG_SUBSYSTEM S_CLASS
39 #include <linux/list.h>
40 #include <libcfs/libcfs.h>
41 #include <obd_class.h>
42 #include <obd_support.h>
44 #include <cl_object.h>
45 #include "cl_internal.h"
47 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
48 static DEFINE_MUTEX(cl_page_kmem_mutex);
51 # define PASSERT(env, page, expr) \
53 if (unlikely(!(expr))) { \
54 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
58 #else /* !LIBCFS_DEBUG */
59 # define PASSERT(env, page, exp) \
60 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
61 #endif /* !LIBCFS_DEBUG */
63 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
64 # define PINVRNT(env, page, expr) \
66 if (unlikely(!(expr))) { \
67 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
71 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
72 # define PINVRNT(env, page, exp) \
73 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
74 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
76 /* Disable page statistic by default due to huge performance penalty. */
77 static void cs_page_inc(const struct cl_object *obj,
78 enum cache_stats_item item)
80 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
81 atomic_inc(&cl_object_site(obj)->cs_pages.cs_stats[item]);
85 static void cs_page_dec(const struct cl_object *obj,
86 enum cache_stats_item item)
88 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
89 atomic_dec(&cl_object_site(obj)->cs_pages.cs_stats[item]);
93 static void cs_pagestate_inc(const struct cl_object *obj,
94 enum cl_page_state state)
96 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
97 atomic_inc(&cl_object_site(obj)->cs_pages_state[state]);
101 static void cs_pagestate_dec(const struct cl_object *obj,
102 enum cl_page_state state)
104 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
105 atomic_dec(&cl_object_site(obj)->cs_pages_state[state]);
110 * Internal version of cl_page_get().
112 * This function can be used to obtain initial reference to previously
113 * unreferenced cached object. It can be called only if concurrent page
114 * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
115 * associated with \a page.
117 * Use with care! Not exported.
119 static void cl_page_get_trust(struct cl_page *page)
121 LASSERT(atomic_read(&page->cp_ref) > 0);
122 atomic_inc(&page->cp_ref);
125 static struct cl_page_slice *
126 cl_page_slice_get(const struct cl_page *cl_page, int index)
128 if (index < 0 || index >= cl_page->cp_layer_count)
131 /* To get the cp_layer_offset values fit under 256 bytes, we
132 * use the offset beyond the end of struct cl_page.
134 return (struct cl_page_slice *)((char *)cl_page + sizeof(*cl_page) +
135 cl_page->cp_layer_offset[index]);
138 #define cl_page_slice_for_each(cl_page, slice, i) \
139 for (i = 0, slice = cl_page_slice_get(cl_page, 0); \
140 i < (cl_page)->cp_layer_count; \
141 slice = cl_page_slice_get(cl_page, ++i))
143 #define cl_page_slice_for_each_reverse(cl_page, slice, i) \
144 for (i = (cl_page)->cp_layer_count - 1, \
145 slice = cl_page_slice_get(cl_page, i); i >= 0; \
146 slice = cl_page_slice_get(cl_page, --i))
149 * Returns a slice within a cl_page, corresponding to the given layer in the
154 static const struct cl_page_slice *
155 cl_page_at_trusted(const struct cl_page *cl_page,
156 const struct lu_device_type *dtype)
158 const struct cl_page_slice *slice;
163 cl_page_slice_for_each(cl_page, slice, i) {
164 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
171 static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
173 int index = cl_page->cp_kmem_index;
176 LASSERT(index < ARRAY_SIZE(cl_page_kmem_array));
177 LASSERT(cl_page_kmem_size_array[index] == bufsize);
178 OBD_SLAB_FREE(cl_page, cl_page_kmem_array[index], bufsize);
180 OBD_FREE(cl_page, bufsize);
184 static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
185 struct pagevec *pvec)
187 struct cl_object *obj = cp->cp_obj;
188 unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
192 PASSERT(env, cp, list_empty(&cp->cp_batch));
193 PASSERT(env, cp, cp->cp_owner == NULL);
194 PASSERT(env, cp, cp->cp_state == CPS_FREEING);
196 if (cp->cp_type == CPT_CACHEABLE) {
197 /* vmpage->private was already cleared when page was
198 * moved into CPS_FREEING state. */
199 vmpage = cp->cp_vmpage;
200 LASSERT(vmpage != NULL);
201 LASSERT((struct cl_page *)vmpage->private != cp);
204 if (!pagevec_add(pvec, vmpage))
205 pagevec_release(pvec);
211 cp->cp_layer_count = 0;
212 cs_page_dec(obj, CS_total);
213 cs_pagestate_dec(obj, cp->cp_state);
214 lu_object_ref_del_at(&obj->co_lu, &cp->cp_obj_ref, "cl_page", cp);
215 if (cp->cp_type != CPT_TRANSIENT)
216 cl_object_put(env, obj);
217 lu_ref_fini(&cp->cp_reference);
218 __cl_page_free(cp, bufsize);
222 static struct cl_page *__cl_page_alloc(struct cl_object *o)
225 struct cl_page *cl_page = NULL;
226 unsigned short bufsize = cl_object_header(o)->coh_page_bufsize;
228 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_PAGE_ALLOC))
232 /* the number of entries in cl_page_kmem_array is expected to
233 * only be 2-3 entries, so the lookup overhead should be low.
235 for ( ; i < ARRAY_SIZE(cl_page_kmem_array); i++) {
236 if (smp_load_acquire(&cl_page_kmem_size_array[i])
238 OBD_SLAB_ALLOC_GFP(cl_page, cl_page_kmem_array[i],
241 cl_page->cp_kmem_index = i;
244 if (cl_page_kmem_size_array[i] == 0)
248 if (i < ARRAY_SIZE(cl_page_kmem_array)) {
251 mutex_lock(&cl_page_kmem_mutex);
252 if (cl_page_kmem_size_array[i]) {
253 mutex_unlock(&cl_page_kmem_mutex);
256 snprintf(cache_name, sizeof(cache_name),
257 "cl_page_kmem-%u", bufsize);
258 cl_page_kmem_array[i] =
259 kmem_cache_create(cache_name, bufsize,
261 if (cl_page_kmem_array[i] == NULL) {
262 mutex_unlock(&cl_page_kmem_mutex);
265 smp_store_release(&cl_page_kmem_size_array[i],
267 mutex_unlock(&cl_page_kmem_mutex);
270 OBD_ALLOC_GFP(cl_page, bufsize, GFP_NOFS);
272 cl_page->cp_kmem_index = -1;
278 struct cl_page *cl_page_alloc(const struct lu_env *env, struct cl_object *o,
279 pgoff_t ind, struct page *vmpage,
280 enum cl_page_type type)
282 struct cl_page *cl_page;
283 struct cl_object *head;
287 cl_page = __cl_page_alloc(o);
288 if (cl_page != NULL) {
292 * Please fix cl_page:cp_state/type declaration if
293 * these assertions fail in the future.
295 BUILD_BUG_ON((1 << CP_STATE_BITS) < CPS_NR); /* cp_state */
296 BUILD_BUG_ON((1 << CP_TYPE_BITS) < CPT_NR); /* cp_type */
297 atomic_set(&cl_page->cp_ref, 1);
299 if (type != CPT_TRANSIENT)
301 lu_object_ref_add_at(&o->co_lu, &cl_page->cp_obj_ref,
303 cl_page->cp_vmpage = vmpage;
304 cl_page->cp_state = CPS_CACHED;
305 cl_page->cp_type = type;
306 if (type == CPT_TRANSIENT)
307 /* ref to correct inode will be added
308 * in ll_direct_rw_pages
310 cl_page->cp_inode = NULL;
312 cl_page->cp_inode = page2inode(vmpage);
313 INIT_LIST_HEAD(&cl_page->cp_batch);
314 lu_ref_init(&cl_page->cp_reference);
316 cl_page->cp_page_index = ind;
317 cl_object_for_each(o, head) {
318 if (o->co_ops->coo_page_init != NULL) {
319 result = o->co_ops->coo_page_init(env, o,
322 cl_page_delete0(env, cl_page);
323 cl_page_free(env, cl_page, NULL);
324 cl_page = ERR_PTR(result);
330 cs_page_inc(o, CS_total);
331 cs_page_inc(o, CS_create);
332 cs_pagestate_dec(o, CPS_CACHED);
335 cl_page = ERR_PTR(-ENOMEM);
341 * Returns a cl_page with index \a idx at the object \a o, and associated with
342 * the VM page \a vmpage.
344 * This is the main entry point into the cl_page caching interface. First, a
345 * cache (implemented as a per-object radix tree) is consulted. If page is
346 * found there, it is returned immediately. Otherwise new page is allocated
347 * and returned. In any case, additional reference to page is acquired.
349 * \see cl_object_find(), cl_lock_find()
351 struct cl_page *cl_page_find(const struct lu_env *env,
353 pgoff_t idx, struct page *vmpage,
354 enum cl_page_type type)
356 struct cl_page *page = NULL;
357 struct cl_object_header *hdr;
359 LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
364 hdr = cl_object_header(o);
365 cs_page_inc(o, CS_lookup);
367 CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
368 idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
370 if (type == CPT_CACHEABLE) {
371 /* vmpage lock is used to protect the child/parent
373 LASSERT(PageLocked(vmpage));
375 * cl_vmpage_page() can be called here without any locks as
377 * - "vmpage" is locked (which prevents ->private from
378 * concurrent updates), and
380 * - "o" cannot be destroyed while current thread holds a
383 page = cl_vmpage_page(vmpage, o);
385 cs_page_inc(o, CS_hit);
390 /* allocate and initialize cl_page */
391 page = cl_page_alloc(env, o, idx, vmpage, type);
394 EXPORT_SYMBOL(cl_page_find);
396 static inline int cl_page_invariant(const struct cl_page *pg)
398 return cl_page_in_use_noref(pg);
401 static void cl_page_state_set0(const struct lu_env *env,
402 struct cl_page *cl_page,
403 enum cl_page_state state)
405 enum cl_page_state old;
408 * Matrix of allowed state transitions [old][new], for sanity
411 static const int allowed_transitions[CPS_NR][CPS_NR] = {
414 [CPS_OWNED] = 1, /* io finds existing cached page */
416 [CPS_PAGEOUT] = 1, /* write-out from the cache */
417 [CPS_FREEING] = 1, /* eviction on the memory pressure */
420 [CPS_CACHED] = 1, /* release to the cache */
422 [CPS_PAGEIN] = 1, /* start read immediately */
423 [CPS_PAGEOUT] = 1, /* start write immediately */
424 [CPS_FREEING] = 1, /* lock invalidation or truncate */
427 [CPS_CACHED] = 1, /* io completion */
434 [CPS_CACHED] = 1, /* io completion */
450 old = cl_page->cp_state;
451 PASSERT(env, cl_page, allowed_transitions[old][state]);
452 CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d -> %d\n", old, state);
453 PASSERT(env, cl_page, cl_page->cp_state == old);
454 PASSERT(env, cl_page, equi(state == CPS_OWNED,
455 cl_page->cp_owner != NULL));
457 cs_pagestate_dec(cl_page->cp_obj, cl_page->cp_state);
458 cs_pagestate_inc(cl_page->cp_obj, state);
459 cl_page->cp_state = state;
463 static void cl_page_state_set(const struct lu_env *env,
464 struct cl_page *page, enum cl_page_state state)
466 cl_page_state_set0(env, page, state);
470 * Acquires an additional reference to a page.
472 * This can be called only by caller already possessing a reference to \a
475 * \see cl_object_get(), cl_lock_get().
477 void cl_page_get(struct cl_page *page)
480 cl_page_get_trust(page);
483 EXPORT_SYMBOL(cl_page_get);
486 * Releases a reference to a page, use the pagevec to release the pages
487 * in batch if provided.
489 * Users need to do a final pagevec_release() to release any trailing pages.
491 void cl_pagevec_put(const struct lu_env *env, struct cl_page *page,
492 struct pagevec *pvec)
495 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
496 atomic_read(&page->cp_ref));
498 if (atomic_dec_and_test(&page->cp_ref)) {
499 LASSERT(page->cp_state == CPS_FREEING);
501 LASSERT(atomic_read(&page->cp_ref) == 0);
502 PASSERT(env, page, page->cp_owner == NULL);
503 PASSERT(env, page, list_empty(&page->cp_batch));
505 * Page is no longer reachable by other threads. Tear
508 cl_page_free(env, page, pvec);
513 EXPORT_SYMBOL(cl_pagevec_put);
516 * Releases a reference to a page, wrapper to cl_pagevec_put
518 * When last reference is released, page is returned to the cache, unless it
519 * is in cl_page_state::CPS_FREEING state, in which case it is immediately
522 * \see cl_object_put(), cl_lock_put().
524 void cl_page_put(const struct lu_env *env, struct cl_page *page)
526 cl_pagevec_put(env, page, NULL);
528 EXPORT_SYMBOL(cl_page_put);
531 * Returns a cl_page associated with a VM page, and given cl_object.
533 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
535 struct cl_page *page;
538 LASSERT(PageLocked(vmpage));
541 * NOTE: absence of races and liveness of data are guaranteed by page
542 * lock on a "vmpage". That works because object destruction has
543 * bottom-to-top pass.
546 page = (struct cl_page *)vmpage->private;
548 cl_page_get_trust(page);
549 LASSERT(page->cp_type == CPT_CACHEABLE);
553 EXPORT_SYMBOL(cl_vmpage_page);
555 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
556 const struct lu_device_type *dtype)
558 return cl_page_at_trusted(page, dtype);
560 EXPORT_SYMBOL(cl_page_at);
562 static void cl_page_owner_clear(struct cl_page *page)
565 if (page->cp_owner != NULL) {
566 LASSERT(page->cp_owner->ci_owned_nr > 0);
567 page->cp_owner->ci_owned_nr--;
568 page->cp_owner = NULL;
573 static void cl_page_owner_set(struct cl_page *page)
576 LASSERT(page->cp_owner != NULL);
577 page->cp_owner->ci_owned_nr++;
581 void cl_page_disown0(const struct lu_env *env, struct cl_page *cp)
584 enum cl_page_state state;
587 state = cp->cp_state;
588 PINVRNT(env, cp, state == CPS_OWNED || state == CPS_FREEING);
589 PINVRNT(env, cp, cl_page_invariant(cp) || state == CPS_FREEING);
590 cl_page_owner_clear(cp);
592 if (state == CPS_OWNED)
593 cl_page_state_set(env, cp, CPS_CACHED);
595 if (cp->cp_type == CPT_CACHEABLE) {
596 vmpage = cp->cp_vmpage;
597 LASSERT(vmpage != NULL);
598 LASSERT(PageLocked(vmpage));
606 * returns true, iff page is owned by the given io.
608 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
610 struct cl_io *top = cl_io_top((struct cl_io *)io);
611 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
613 RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == top);
615 EXPORT_SYMBOL(cl_page_is_owned);
618 * Try to own a page by IO.
620 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
621 * into cl_page_state::CPS_OWNED state.
623 * \pre !cl_page_is_owned(cl_page, io)
624 * \post result == 0 iff cl_page_is_owned(cl_page, io)
628 * \retval -ve failure, e.g., cl_page was destroyed (and landed in
629 * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
630 * or, page was owned by another thread, or in IO.
632 * \see cl_page_disown()
633 * \see cl_page_own_try()
636 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
637 struct cl_page *cl_page, int nonblock)
639 struct page *vmpage = cl_page->cp_vmpage;
643 PINVRNT(env, cl_page, !cl_page_is_owned(cl_page, io));
645 if (cl_page->cp_state == CPS_FREEING) {
650 LASSERT(vmpage != NULL);
652 if (cl_page->cp_type == CPT_TRANSIENT) {
654 } else if (nonblock) {
655 if (!trylock_page(vmpage)) {
660 if (unlikely(PageWriteback(vmpage))) {
667 wait_on_page_writeback(vmpage);
670 PASSERT(env, cl_page, cl_page->cp_owner == NULL);
671 cl_page->cp_owner = cl_io_top(io);
672 cl_page_owner_set(cl_page);
674 if (cl_page->cp_state == CPS_FREEING) {
675 cl_page_disown0(env, cl_page);
680 cl_page_state_set(env, cl_page, CPS_OWNED);
683 PINVRNT(env, cl_page, ergo(result == 0,
684 cl_page_invariant(cl_page)));
689 * Own a page, might be blocked.
691 * \see cl_page_own0()
693 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
695 return cl_page_own0(env, io, pg, 0);
697 EXPORT_SYMBOL(cl_page_own);
700 * Nonblock version of cl_page_own().
702 * \see cl_page_own0()
704 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
707 return cl_page_own0(env, io, pg, 1);
709 EXPORT_SYMBOL(cl_page_own_try);
713 * Assume page ownership.
715 * Called when page is already locked by the hosting VM.
717 * \pre !cl_page_is_owned(cp, io)
718 * \post cl_page_is_owned(cp, io)
720 void cl_page_assume(const struct lu_env *env,
721 struct cl_io *io, struct cl_page *cp)
726 PINVRNT(env, cp, cl_object_same(cp->cp_obj, io->ci_obj));
728 if (cp->cp_type == CPT_CACHEABLE) {
729 vmpage = cp->cp_vmpage;
730 LASSERT(vmpage != NULL);
731 LASSERT(PageLocked(vmpage));
732 wait_on_page_writeback(vmpage);
735 PASSERT(env, cp, cp->cp_owner == NULL);
736 cp->cp_owner = cl_io_top(io);
737 cl_page_owner_set(cp);
738 cl_page_state_set(env, cp, CPS_OWNED);
741 EXPORT_SYMBOL(cl_page_assume);
744 * Releases page ownership without unlocking the page.
746 * Moves cl_page into cl_page_state::CPS_CACHED without releasing a lock
747 * on the underlying VM page (as VM is supposed to do this itself).
749 * \pre cl_page_is_owned(cp, io)
750 * \post !cl_page_is_owned(cp, io)
752 void cl_page_unassume(const struct lu_env *env,
753 struct cl_io *io, struct cl_page *cp)
758 PINVRNT(env, cp, cl_page_is_owned(cp, io));
759 PINVRNT(env, cp, cl_page_invariant(cp));
761 cl_page_owner_clear(cp);
762 cl_page_state_set(env, cp, CPS_CACHED);
764 if (cp->cp_type == CPT_CACHEABLE) {
765 vmpage = cp->cp_vmpage;
766 LASSERT(vmpage != NULL);
767 LASSERT(PageLocked(vmpage));
772 EXPORT_SYMBOL(cl_page_unassume);
775 * Releases page ownership.
777 * Moves page into cl_page_state::CPS_CACHED.
779 * \pre cl_page_is_owned(pg, io)
780 * \post !cl_page_is_owned(pg, io)
784 void cl_page_disown(const struct lu_env *env,
785 struct cl_io *io, struct cl_page *pg)
787 PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
788 pg->cp_state == CPS_FREEING);
790 cl_page_disown0(env, pg);
792 EXPORT_SYMBOL(cl_page_disown);
795 * Called when cl_page is to be removed from the object, e.g.,
796 * as a result of truncate.
798 * Calls cl_page_operations::cpo_discard() top-to-bottom.
800 * \pre cl_page_is_owned(cl_page, io)
802 * \see cl_page_operations::cpo_discard()
804 void cl_page_discard(const struct lu_env *env,
805 struct cl_io *io, struct cl_page *cp)
808 const struct cl_page_slice *slice;
811 PINVRNT(env, cp, cl_page_is_owned(cp, io));
812 PINVRNT(env, cp, cl_page_invariant(cp));
814 cl_page_slice_for_each(cp, slice, i) {
815 if (slice->cpl_ops->cpo_discard != NULL)
816 (*slice->cpl_ops->cpo_discard)(env, slice, io);
819 if (cp->cp_type == CPT_CACHEABLE) {
820 vmpage = cp->cp_vmpage;
821 LASSERT(vmpage != NULL);
822 LASSERT(PageLocked(vmpage));
823 generic_error_remove_page(vmpage->mapping, vmpage);
825 cl_page_delete(env, cp);
828 EXPORT_SYMBOL(cl_page_discard);
831 * Version of cl_page_delete() that can be called for not fully constructed
832 * cl_pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
833 * path. Doesn't check cl_page invariant.
835 static void cl_page_delete0(const struct lu_env *env, struct cl_page *cp)
838 const struct cl_page_slice *slice;
843 PASSERT(env, cp, cp->cp_state != CPS_FREEING);
846 * Severe all ways to obtain new pointers to @pg.
848 cl_page_owner_clear(cp);
849 cl_page_state_set0(env, cp, CPS_FREEING);
851 cl_page_slice_for_each_reverse(cp, slice, i) {
852 if (slice->cpl_ops->cpo_delete != NULL)
853 (*slice->cpl_ops->cpo_delete)(env, slice);
856 if (cp->cp_type == CPT_CACHEABLE) {
857 vmpage = cp->cp_vmpage;
858 LASSERT(PageLocked(vmpage));
859 LASSERT((struct cl_page *)vmpage->private == cp);
861 /* Drop the reference count held in vvp_page_init */
862 refc = atomic_dec_return(&cp->cp_ref);
863 LASSERTF(refc >= 1, "page = %p, refc = %d\n", cp, refc);
865 ClearPagePrivate(vmpage);
869 * The reference from vmpage to cl_page is removed,
870 * but the reference back is still here. It is removed
871 * later in cl_page_free().
879 * Called when a decision is made to throw page out of memory.
881 * Notifies all layers about page destruction by calling
882 * cl_page_operations::cpo_delete() method top-to-bottom.
884 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
885 * where transition to this state happens).
887 * Eliminates all venues through which new references to the page can be
890 * - removes page from the radix trees,
892 * - breaks linkage from VM page to cl_page.
894 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
895 * drain after some time, at which point page will be recycled.
897 * \pre VM page is locked
898 * \post pg->cp_state == CPS_FREEING
900 * \see cl_page_operations::cpo_delete()
902 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
904 PINVRNT(env, pg, cl_page_invariant(pg));
906 cl_page_delete0(env, pg);
909 EXPORT_SYMBOL(cl_page_delete);
911 void cl_page_touch(const struct lu_env *env,
912 const struct cl_page *cl_page, size_t to)
914 const struct cl_page_slice *slice;
919 cl_page_slice_for_each(cl_page, slice, i) {
920 if (slice->cpl_ops->cpo_page_touch != NULL)
921 (*slice->cpl_ops->cpo_page_touch)(env, slice, to);
926 EXPORT_SYMBOL(cl_page_touch);
928 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
931 RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
934 static void cl_page_io_start(const struct lu_env *env,
935 struct cl_page *pg, enum cl_req_type crt)
938 * Page is queued for IO, change its state.
941 cl_page_owner_clear(pg);
942 cl_page_state_set(env, pg, cl_req_type_state(crt));
947 * Prepares page for immediate transfer. Return -EALREADY if this page
948 * should be omitted from transfer.
950 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
951 struct cl_page *cp, enum cl_req_type crt)
953 struct page *vmpage = cp->cp_vmpage;
956 PASSERT(env, cp, crt < CRT_NR);
957 PINVRNT(env, cp, cl_page_is_owned(cp, io));
958 PINVRNT(env, cp, cl_page_invariant(cp));
960 if (cp->cp_type == CPT_TRANSIENT) {
962 } else if (crt == CRT_READ) {
963 if (PageUptodate(vmpage))
964 GOTO(out, rc = -EALREADY);
966 LASSERT(PageLocked(vmpage));
967 LASSERT(!PageDirty(vmpage));
969 /* ll_writepage path is not a sync write, so need to
970 * set page writeback flag
972 if (cp->cp_sync_io == NULL)
973 set_page_writeback(vmpage);
976 cl_page_io_start(env, cp, crt);
979 CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
983 EXPORT_SYMBOL(cl_page_prep);
986 * Notify layers about transfer completion.
988 * Invoked by transfer sub-system (which is a part of osc) to notify layers
989 * that a transfer, of which this page is a part of has completed.
991 * Completion call-backs are executed in the bottom-up order, so that
992 * uppermost layer (llite), responsible for the VFS/VM interaction runs last
993 * and can release locks safely.
995 * \pre cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
996 * \post cl_page->cl_page_state == CPS_CACHED
998 * \see cl_page_operations::cpo_completion()
1000 void cl_page_completion(const struct lu_env *env,
1001 struct cl_page *cl_page, enum cl_req_type crt,
1004 const struct cl_page_slice *slice;
1005 struct cl_sync_io *anchor = cl_page->cp_sync_io;
1009 PASSERT(env, cl_page, crt < CRT_NR);
1010 PASSERT(env, cl_page, cl_page->cp_state == cl_req_type_state(crt));
1012 CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, ioret);
1013 cl_page_state_set(env, cl_page, CPS_CACHED);
1017 cl_page_slice_for_each_reverse(cl_page, slice, i) {
1018 if (slice->cpl_ops->io[crt].cpo_completion != NULL)
1019 (*slice->cpl_ops->io[crt].cpo_completion)(env, slice,
1023 if (anchor != NULL) {
1024 LASSERT(cl_page->cp_sync_io == anchor);
1025 cl_page->cp_sync_io = NULL;
1026 cl_sync_io_note(env, anchor, ioret);
1030 EXPORT_SYMBOL(cl_page_completion);
1033 * Notify layers that transfer formation engine decided to yank this page from
1034 * the cache and to make it a part of a transfer.
1036 * \pre cl_page->cp_state == CPS_CACHED
1037 * \post cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
1039 int cl_page_make_ready(const struct lu_env *env, struct cl_page *cp,
1040 enum cl_req_type crt)
1042 struct page *vmpage = cp->cp_vmpage;
1046 PASSERT(env, cp, crt == CRT_WRITE);
1048 if (cp->cp_type == CPT_TRANSIENT)
1053 if (clear_page_dirty_for_io(vmpage)) {
1054 LASSERT(cp->cp_state == CPS_CACHED);
1055 /* This actually clears the dirty bit in the
1058 set_page_writeback(vmpage);
1059 CL_PAGE_HEADER(D_PAGE, env, cp, "readied\n");
1061 } else if (cp->cp_state == CPS_PAGEOUT) {
1062 /* is it possible for osc_flush_async_page()
1063 * to already make it ready?
1067 CL_PAGE_DEBUG(D_ERROR, env, cp,
1068 "unexpecting page state %d\n",
1073 unlock_page(vmpage);
1076 PASSERT(env, cp, cp->cp_state == CPS_CACHED);
1077 cl_page_io_start(env, cp, crt);
1080 CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
1084 EXPORT_SYMBOL(cl_page_make_ready);
1087 * Called if a page is being written back by kernel's intention.
1089 * \pre cl_page_is_owned(cl_page, io)
1090 * \post ergo(result == 0, cl_page->cp_state == CPS_PAGEOUT)
1092 * \see cl_page_operations::cpo_flush()
1094 int cl_page_flush(const struct lu_env *env, struct cl_io *io,
1095 struct cl_page *cl_page)
1097 const struct cl_page_slice *slice;
1102 PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
1103 PINVRNT(env, cl_page, cl_page_invariant(cl_page));
1105 cl_page_slice_for_each(cl_page, slice, i) {
1106 if (slice->cpl_ops->cpo_flush != NULL)
1107 result = (*slice->cpl_ops->cpo_flush)(env, slice, io);
1114 CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d\n", result);
1117 EXPORT_SYMBOL(cl_page_flush);
1120 * Tells transfer engine that only part of a page is to be transmitted.
1122 * \see cl_page_operations::cpo_clip()
1124 void cl_page_clip(const struct lu_env *env, struct cl_page *cl_page,
1127 const struct cl_page_slice *slice;
1130 PINVRNT(env, cl_page, cl_page_invariant(cl_page));
1132 CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", from, to);
1133 cl_page_slice_for_each(cl_page, slice, i) {
1134 if (slice->cpl_ops->cpo_clip != NULL)
1135 (*slice->cpl_ops->cpo_clip)(env, slice, from, to);
1138 EXPORT_SYMBOL(cl_page_clip);
1141 * Prints human readable representation of \a pg to the \a f.
1143 void cl_page_header_print(const struct lu_env *env, void *cookie,
1144 lu_printer_t printer, const struct cl_page *pg)
1146 (*printer)(env, cookie,
1147 "page@%p[%d %p %d %d %p]\n",
1148 pg, atomic_read(&pg->cp_ref), pg->cp_obj,
1149 pg->cp_state, pg->cp_type,
1152 EXPORT_SYMBOL(cl_page_header_print);
1155 * Prints human readable representation of \a cl_page to the \a f.
1157 void cl_page_print(const struct lu_env *env, void *cookie,
1158 lu_printer_t printer, const struct cl_page *cp)
1160 struct page *vmpage = cp->cp_vmpage;
1161 const struct cl_page_slice *slice;
1165 cl_page_header_print(env, cookie, printer, cp);
1167 (*printer)(env, cookie, "vmpage @%p", vmpage);
1169 if (vmpage != NULL) {
1170 (*printer)(env, cookie, " %lx %d:%d %lx %lu %slru",
1171 (long)vmpage->flags, page_count(vmpage),
1172 page_mapcount(vmpage), vmpage->private,
1174 list_empty(&vmpage->lru) ? "not-" : "");
1177 (*printer)(env, cookie, "\n");
1179 cl_page_slice_for_each(cp, slice, i) {
1180 if (slice->cpl_ops->cpo_print != NULL)
1181 result = (*slice->cpl_ops->cpo_print)(env, slice,
1187 (*printer)(env, cookie, "end page@%p\n", cp);
1189 EXPORT_SYMBOL(cl_page_print);
1192 * Converts a byte offset within object \a obj into a page index.
1194 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1196 return (loff_t)idx << PAGE_SHIFT;
1198 EXPORT_SYMBOL(cl_offset);
1201 * Converts a page index into a byte offset within object \a obj.
1203 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1205 return offset >> PAGE_SHIFT;
1207 EXPORT_SYMBOL(cl_index);
1209 size_t cl_page_size(const struct cl_object *obj)
1211 return 1UL << PAGE_SHIFT;
1213 EXPORT_SYMBOL(cl_page_size);
1216 * Adds page slice to the compound page.
1218 * This is called by cl_object_operations::coo_page_init() methods to add a
1219 * per-layer state to the page. New state is added at the end of
1220 * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1222 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1224 void cl_page_slice_add(struct cl_page *cl_page, struct cl_page_slice *slice,
1225 struct cl_object *obj,
1226 const struct cl_page_operations *ops)
1228 unsigned int offset = (char *)slice -
1229 ((char *)cl_page + sizeof(*cl_page));
1232 LASSERT(cl_page->cp_layer_count < CP_MAX_LAYER);
1233 LASSERT(offset < (1 << sizeof(cl_page->cp_layer_offset[0]) * 8));
1234 cl_page->cp_layer_offset[cl_page->cp_layer_count++] = offset;
1235 slice->cpl_obj = obj;
1236 slice->cpl_ops = ops;
1237 slice->cpl_page = cl_page;
1241 EXPORT_SYMBOL(cl_page_slice_add);
1244 * Allocate and initialize cl_cache, called by ll_init_sbi().
1246 struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
1248 struct cl_client_cache *cache = NULL;
1251 OBD_ALLOC(cache, sizeof(*cache));
1255 /* Initialize cache data */
1256 atomic_set(&cache->ccc_users, 1);
1257 cache->ccc_lru_max = lru_page_max;
1258 atomic_long_set(&cache->ccc_lru_left, lru_page_max);
1259 spin_lock_init(&cache->ccc_lru_lock);
1260 INIT_LIST_HEAD(&cache->ccc_lru);
1262 /* turn unstable check off by default as it impacts performance */
1263 cache->ccc_unstable_check = 0;
1264 atomic_long_set(&cache->ccc_unstable_nr, 0);
1265 init_waitqueue_head(&cache->ccc_unstable_waitq);
1266 mutex_init(&cache->ccc_max_cache_mb_lock);
1270 EXPORT_SYMBOL(cl_cache_init);
1273 * Increase cl_cache refcount
1275 void cl_cache_incref(struct cl_client_cache *cache)
1277 atomic_inc(&cache->ccc_users);
1279 EXPORT_SYMBOL(cl_cache_incref);
1282 * Decrease cl_cache refcount and free the cache if refcount=0.
1283 * Since llite, lov and osc all hold cl_cache refcount,
1284 * the free will not cause race. (LU-6173)
1286 void cl_cache_decref(struct cl_client_cache *cache)
1288 if (atomic_dec_and_test(&cache->ccc_users))
1289 OBD_FREE(cache, sizeof(*cache));
1291 EXPORT_SYMBOL(cl_cache_decref);