1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011 Whamcloud, Inc.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
41 * Author: Nikita Danilov <nikita.danilov@sun.com>
44 #define DEBUG_SUBSYSTEM S_CLASS
46 # define EXPORT_SYMTAB
49 #include <libcfs/libcfs.h>
50 #include <obd_class.h>
51 #include <obd_support.h>
52 #include <libcfs/list.h>
54 #include <cl_object.h>
55 #include "cl_internal.h"
57 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
60 static cfs_mem_cache_t *cl_page_kmem = NULL;
62 static struct lu_kmem_descr cl_page_caches[] = {
64 .ckd_cache = &cl_page_kmem,
65 .ckd_name = "cl_page_kmem",
66 .ckd_size = sizeof (struct cl_page)
74 # define PASSERT(env, page, expr) \
76 if (unlikely(!(expr))) { \
77 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
81 #else /* !LIBCFS_DEBUG */
82 # define PASSERT(env, page, exp) \
83 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
84 #endif /* !LIBCFS_DEBUG */
86 #ifdef INVARIANT_CHECK
87 # define PINVRNT(env, page, expr) \
89 if (unlikely(!(expr))) { \
90 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
94 #else /* !INVARIANT_CHECK */
95 # define PINVRNT(env, page, exp) \
96 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
97 #endif /* !INVARIANT_CHECK */
100 * Internal version of cl_page_top, it should be called with page referenced,
101 * or coh_page_guard held.
103 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
105 while (page->cp_parent != NULL)
106 page = page->cp_parent;
111 * Internal version of cl_page_get().
113 * This function can be used to obtain initial reference to previously
114 * unreferenced cached object. It can be called only if concurrent page
115 * reclamation is somehow prevented, e.g., by locking page radix-tree
116 * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
117 * associated with \a page.
119 * Use with care! Not exported.
121 static void cl_page_get_trust(struct cl_page *page)
124 * Checkless version for trusted users.
126 if (cfs_atomic_inc_return(&page->cp_ref) == 1)
127 cfs_atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
131 * Returns a slice within a page, corresponding to the given layer in the
136 static const struct cl_page_slice *
137 cl_page_at_trusted(const struct cl_page *page,
138 const struct lu_device_type *dtype)
140 const struct cl_page_slice *slice;
142 #ifdef INVARIANT_CHECK
143 struct cl_object_header *ch = cl_object_header(page->cp_obj);
145 if (!cfs_atomic_read(&page->cp_ref))
146 LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
150 page = cl_page_top_trusted((struct cl_page *)page);
152 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
153 if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
156 page = page->cp_child;
157 } while (page != NULL);
162 * Returns a page with given index in the given object, or NULL if no page is
163 * found. Acquires a reference on \a page.
165 * Locking: called under cl_object_header::coh_page_guard spin-lock.
167 struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
169 struct cl_page *page;
171 LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
173 page = radix_tree_lookup(&hdr->coh_tree, index);
175 cl_page_get_trust(page);
179 EXPORT_SYMBOL(cl_page_lookup);
182 * Returns a list of pages by a given [start, end] of \a obj.
184 * \param resched If not NULL, then we give up before hogging CPU for too
185 * long and set *resched = 1, in that case caller should implement a retry
188 * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
189 * crucial in the face of [offset, EOF] locks.
191 * Return at least one page in @queue unless there is no covered page.
193 int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
194 struct cl_io *io, pgoff_t start, pgoff_t end,
195 struct cl_page_list *queue)
197 struct cl_object_header *hdr;
198 struct cl_page *page;
199 struct cl_page **pvec;
200 const struct cl_page_slice *slice;
201 const struct lu_device_type *dtype;
206 int res = CLP_GANG_OKAY;
211 hdr = cl_object_header(obj);
212 pvec = cl_env_info(env)->clt_pvec;
213 dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
214 cfs_spin_lock(&hdr->coh_page_guard);
215 while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
216 idx, CLT_PVEC_SIZE)) > 0) {
217 int end_of_region = 0;
218 idx = pvec[nr - 1]->cp_index + 1;
219 for (i = 0, j = 0; i < nr; ++i) {
223 LASSERT(page->cp_type == CPT_CACHEABLE);
224 if (page->cp_index > end) {
228 if (page->cp_state == CPS_FREEING)
231 slice = cl_page_at_trusted(page, dtype);
233 * Pages for lsm-less file has no underneath sub-page
234 * for osc, in case of ...
236 PASSERT(env, page, slice != NULL);
238 page = slice->cpl_page;
240 * Can safely call cl_page_get_trust() under
241 * radix-tree spin-lock.
243 * XXX not true, because @page is from object another
244 * than @hdr and protected by different tree lock.
246 cl_page_get_trust(page);
247 lu_ref_add_atomic(&page->cp_reference,
248 "page_list", cfs_current());
253 * Here a delicate locking dance is performed. Current thread
254 * holds a reference to a page, but has to own it before it
255 * can be placed into queue. Owning implies waiting, so
256 * radix-tree lock is to be released. After a wait one has to
257 * check that pages weren't truncated (cl_page_own() returns
258 * error in the latter case).
260 cfs_spin_unlock(&hdr->coh_page_guard);
263 for (i = 0; i < j; ++i) {
265 if (res == CLP_GANG_OKAY) {
266 typeof(cl_page_own) *page_own;
268 page_own = queue->pl_nr ?
269 cl_page_own_try : cl_page_own;
270 if (page_own(env, io, page) == 0) {
271 cl_page_list_add(queue, page);
272 } else if (page->cp_state != CPS_FREEING) {
273 /* cl_page_own() won't fail unless
274 * the page is being freed. */
275 LASSERT(queue->pl_nr != 0);
276 res = CLP_GANG_AGAIN;
279 lu_ref_del(&page->cp_reference,
280 "page_list", cfs_current());
281 cl_page_put(env, page);
283 if (nr < CLT_PVEC_SIZE || end_of_region)
286 /* if the number of pages is zero, this will mislead the caller
287 * that there is no page any more. */
288 if (queue->pl_nr && cfs_need_resched())
289 res = CLP_GANG_RESCHED;
290 if (res != CLP_GANG_OKAY)
293 cfs_spin_lock(&hdr->coh_page_guard);
297 cfs_spin_unlock(&hdr->coh_page_guard);
300 EXPORT_SYMBOL(cl_page_gang_lookup);
302 static void cl_page_free(const struct lu_env *env, struct cl_page *page)
304 struct cl_object *obj = page->cp_obj;
305 struct cl_site *site = cl_object_site(obj);
307 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
308 PASSERT(env, page, page->cp_owner == NULL);
309 PASSERT(env, page, page->cp_req == NULL);
310 PASSERT(env, page, page->cp_parent == NULL);
311 PASSERT(env, page, page->cp_state == CPS_FREEING);
315 while (!cfs_list_empty(&page->cp_layers)) {
316 struct cl_page_slice *slice;
318 slice = cfs_list_entry(page->cp_layers.next,
319 struct cl_page_slice, cpl_linkage);
320 cfs_list_del_init(page->cp_layers.next);
321 slice->cpl_ops->cpo_fini(env, slice);
323 cfs_atomic_dec(&site->cs_pages.cs_total);
325 #ifdef LUSTRE_PAGESTATE_TRACKING
326 cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
328 lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
329 cl_object_put(env, obj);
330 lu_ref_fini(&page->cp_reference);
331 OBD_SLAB_FREE_PTR(page, cl_page_kmem);
336 * Helper function updating page state. This is the only place in the code
337 * where cl_page::cp_state field is mutated.
339 static inline void cl_page_state_set_trust(struct cl_page *page,
340 enum cl_page_state state)
343 *(enum cl_page_state *)&page->cp_state = state;
346 static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
347 pgoff_t ind, struct page *vmpage,
348 enum cl_page_type type, struct cl_page **out)
350 struct cl_page *page;
351 struct cl_page *err = NULL;
352 struct lu_object_header *head;
353 struct cl_site *site = cl_object_site(o);
358 OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
360 cfs_atomic_set(&page->cp_ref, 1);
363 page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
365 page->cp_index = ind;
366 cl_page_state_set_trust(page, CPS_CACHED);
367 page->cp_type = type;
368 CFS_INIT_LIST_HEAD(&page->cp_layers);
369 CFS_INIT_LIST_HEAD(&page->cp_batch);
370 CFS_INIT_LIST_HEAD(&page->cp_flight);
371 cfs_mutex_init(&page->cp_mutex);
372 lu_ref_init(&page->cp_reference);
373 head = o->co_lu.lo_header;
374 cfs_list_for_each_entry(o, &head->loh_layers,
376 if (o->co_ops->coo_page_init != NULL) {
377 err = o->co_ops->coo_page_init(env, o,
380 cl_page_delete0(env, page, 0);
381 cl_page_free(env, page);
388 cfs_atomic_inc(&site->cs_pages.cs_busy);
389 cfs_atomic_inc(&site->cs_pages.cs_total);
391 #ifdef LUSTRE_PAGESTATE_TRACKING
392 cfs_atomic_inc(&site->cs_pages_state[CPS_CACHED]);
394 cfs_atomic_inc(&site->cs_pages.cs_created);
398 page = ERR_PTR(-ENOMEM);
404 * Returns a cl_page with index \a idx at the object \a o, and associated with
405 * the VM page \a vmpage.
407 * This is the main entry point into the cl_page caching interface. First, a
408 * cache (implemented as a per-object radix tree) is consulted. If page is
409 * found there, it is returned immediately. Otherwise new page is allocated
410 * and returned. In any case, additional reference to page is acquired.
412 * \see cl_object_find(), cl_lock_find()
414 static struct cl_page *cl_page_find0(const struct lu_env *env,
416 pgoff_t idx, struct page *vmpage,
417 enum cl_page_type type,
418 struct cl_page *parent)
420 struct cl_page *page = NULL;
421 struct cl_page *ghost = NULL;
422 struct cl_object_header *hdr;
423 struct cl_site *site = cl_object_site(o);
426 LINVRNT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
431 hdr = cl_object_header(o);
432 cfs_atomic_inc(&site->cs_pages.cs_lookup);
434 CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
435 idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
437 if (type == CPT_CACHEABLE) {
439 * cl_vmpage_page() can be called here without any locks as
441 * - "vmpage" is locked (which prevents ->private from
442 * concurrent updates), and
444 * - "o" cannot be destroyed while current thread holds a
447 page = cl_vmpage_page(vmpage, o);
450 cl_page_vmpage(env, page) == vmpage &&
451 (void *)radix_tree_lookup(&hdr->coh_tree,
456 cfs_atomic_inc(&site->cs_pages.cs_hit);
460 /* allocate and initialize cl_page */
461 err = cl_page_alloc(env, o, idx, vmpage, type, &page);
465 if (type == CPT_TRANSIENT) {
467 LASSERT(page->cp_parent == NULL);
468 page->cp_parent = parent;
469 parent->cp_child = page;
475 * XXX optimization: use radix_tree_preload() here, and change tree
476 * gfp mask to GFP_KERNEL in cl_object_header_init().
478 cfs_spin_lock(&hdr->coh_page_guard);
479 err = radix_tree_insert(&hdr->coh_tree, idx, page);
483 * Noted by Jay: a lock on \a vmpage protects cl_page_find()
484 * from this race, but
486 * 0. it's better to have cl_page interface "locally
487 * consistent" so that its correctness can be reasoned
488 * about without appealing to the (obscure world of) VM
491 * 1. handling this race allows ->coh_tree to remain
492 * consistent even when VM locking is somehow busted,
493 * which is very useful during diagnosing and debugging.
496 CL_PAGE_DEBUG(D_ERROR, env, ghost,
497 "fail to insert into radix tree: %d\n", err);
500 LASSERT(page->cp_parent == NULL);
501 page->cp_parent = parent;
502 parent->cp_child = page;
506 cfs_spin_unlock(&hdr->coh_page_guard);
508 if (unlikely(ghost != NULL)) {
509 cfs_atomic_dec(&site->cs_pages.cs_busy);
510 cl_page_delete0(env, ghost, 0);
511 cl_page_free(env, ghost);
516 struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
517 pgoff_t idx, struct page *vmpage,
518 enum cl_page_type type)
520 return cl_page_find0(env, o, idx, vmpage, type, NULL);
522 EXPORT_SYMBOL(cl_page_find);
525 struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
526 pgoff_t idx, struct page *vmpage,
527 struct cl_page *parent)
529 return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
531 EXPORT_SYMBOL(cl_page_find_sub);
533 static inline int cl_page_invariant(const struct cl_page *pg)
535 struct cl_object_header *header;
536 struct cl_page *parent;
537 struct cl_page *child;
541 * Page invariant is protected by a VM lock.
543 LINVRNT(cl_page_is_vmlocked(NULL, pg));
545 header = cl_object_header(pg->cp_obj);
546 parent = pg->cp_parent;
547 child = pg->cp_child;
548 owner = pg->cp_owner;
550 return cfs_atomic_read(&pg->cp_ref) > 0 &&
551 ergo(parent != NULL, parent->cp_child == pg) &&
552 ergo(child != NULL, child->cp_parent == pg) &&
553 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
554 ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
555 ergo(owner != NULL && parent != NULL,
556 parent->cp_owner == pg->cp_owner->ci_parent) &&
557 ergo(owner != NULL && child != NULL,
558 child->cp_owner->ci_parent == owner) &&
560 * Either page is early in initialization (has neither child
561 * nor parent yet), or it is in the object radix tree.
563 ergo(pg->cp_state < CPS_FREEING,
564 (void *)radix_tree_lookup(&header->coh_tree,
565 pg->cp_index) == pg ||
566 (child == NULL && parent == NULL));
569 static void cl_page_state_set0(const struct lu_env *env,
570 struct cl_page *page, enum cl_page_state state)
572 enum cl_page_state old;
573 #ifdef LUSTRE_PAGESTATE_TRACKING
574 struct cl_site *site = cl_object_site(page->cp_obj);
578 * Matrix of allowed state transitions [old][new], for sanity
581 static const int allowed_transitions[CPS_NR][CPS_NR] = {
584 [CPS_OWNED] = 1, /* io finds existing cached page */
586 [CPS_PAGEOUT] = 1, /* write-out from the cache */
587 [CPS_FREEING] = 1, /* eviction on the memory pressure */
590 [CPS_CACHED] = 1, /* release to the cache */
592 [CPS_PAGEIN] = 1, /* start read immediately */
593 [CPS_PAGEOUT] = 1, /* start write immediately */
594 [CPS_FREEING] = 1, /* lock invalidation or truncate */
597 [CPS_CACHED] = 1, /* io completion */
604 [CPS_CACHED] = 1, /* io completion */
620 old = page->cp_state;
621 PASSERT(env, page, allowed_transitions[old][state]);
622 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
623 for (; page != NULL; page = page->cp_child) {
624 PASSERT(env, page, page->cp_state == old);
626 equi(state == CPS_OWNED, page->cp_owner != NULL));
628 #ifdef LUSTRE_PAGESTATE_TRACKING
629 cfs_atomic_dec(&site->cs_pages_state[page->cp_state]);
630 cfs_atomic_inc(&site->cs_pages_state[state]);
632 cl_page_state_set_trust(page, state);
637 static void cl_page_state_set(const struct lu_env *env,
638 struct cl_page *page, enum cl_page_state state)
640 PINVRNT(env, page, cl_page_invariant(page));
641 cl_page_state_set0(env, page, state);
645 * Acquires an additional reference to a page.
647 * This can be called only by caller already possessing a reference to \a
650 * \see cl_object_get(), cl_lock_get().
652 void cl_page_get(struct cl_page *page)
655 LASSERT(page->cp_state != CPS_FREEING);
656 cl_page_get_trust(page);
659 EXPORT_SYMBOL(cl_page_get);
662 * Releases a reference to a page.
664 * When last reference is released, page is returned to the cache, unless it
665 * is in cl_page_state::CPS_FREEING state, in which case it is immediately
668 * \see cl_object_put(), cl_lock_put().
670 void cl_page_put(const struct lu_env *env, struct cl_page *page)
672 struct cl_object_header *hdr;
673 struct cl_site *site = cl_object_site(page->cp_obj);
675 PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
678 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
679 cfs_atomic_read(&page->cp_ref));
681 hdr = cl_object_header(cl_object_top(page->cp_obj));
682 if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
683 cfs_atomic_dec(&site->cs_pages.cs_busy);
684 /* We're going to access the page w/o a reference, but it's
685 * ok because we have grabbed the lock coh_page_guard, which
686 * means nobody is able to free this page behind us.
688 if (page->cp_state == CPS_FREEING) {
689 /* We drop the page reference and check the page state
690 * inside the coh_page_guard. So that if it gets here,
691 * it is the REALLY last reference to this page.
693 cfs_spin_unlock(&hdr->coh_page_guard);
695 LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
696 PASSERT(env, page, page->cp_owner == NULL);
697 PASSERT(env, page, cfs_list_empty(&page->cp_batch));
699 * Page is no longer reachable by other threads. Tear
702 cl_page_free(env, page);
707 cfs_spin_unlock(&hdr->coh_page_guard);
712 EXPORT_SYMBOL(cl_page_put);
715 * Returns a VM page associated with a given cl_page.
717 cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
719 const struct cl_page_slice *slice;
722 * Find uppermost layer with ->cpo_vmpage() method, and return its
725 page = cl_page_top(page);
727 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
728 if (slice->cpl_ops->cpo_vmpage != NULL)
729 RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
731 page = page->cp_child;
732 } while (page != NULL);
733 LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
735 EXPORT_SYMBOL(cl_page_vmpage);
738 * Returns a cl_page associated with a VM page, and given cl_object.
740 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
742 struct cl_page *page;
743 struct cl_object_header *hdr;
746 KLASSERT(PageLocked(vmpage));
749 * NOTE: absence of races and liveness of data are guaranteed by page
750 * lock on a "vmpage". That works because object destruction has
751 * bottom-to-top pass.
755 * This loop assumes that ->private points to the top-most page. This
756 * can be rectified easily.
758 hdr = cl_object_header(cl_object_top(obj));
759 cfs_spin_lock(&hdr->coh_page_guard);
760 for (page = (void *)vmpage->private;
761 page != NULL; page = page->cp_child) {
762 if (cl_object_same(page->cp_obj, obj)) {
763 cl_page_get_trust(page);
767 cfs_spin_unlock(&hdr->coh_page_guard);
768 LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
771 EXPORT_SYMBOL(cl_vmpage_page);
774 * Returns the top-page for a given page.
776 * \see cl_object_top(), cl_io_top()
778 struct cl_page *cl_page_top(struct cl_page *page)
780 return cl_page_top_trusted(page);
782 EXPORT_SYMBOL(cl_page_top);
784 const struct cl_page_slice *cl_page_at(const struct cl_page *page,
785 const struct lu_device_type *dtype)
787 return cl_page_at_trusted(page, dtype);
789 EXPORT_SYMBOL(cl_page_at);
791 #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
793 #define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
795 const struct lu_env *__env = (_env); \
796 struct cl_page *__page = (_page); \
797 const struct cl_page_slice *__scan; \
799 ptrdiff_t __op = (_op); \
800 int (*__method)_proto; \
803 __page = cl_page_top(__page); \
805 cfs_list_for_each_entry(__scan, &__page->cp_layers, \
807 __method = *(void **)((char *)__scan->cpl_ops + \
809 if (__method != NULL) { \
810 __result = (*__method)(__env, __scan, \
816 __page = __page->cp_child; \
817 } while (__page != NULL && __result == 0); \
823 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
825 const struct lu_env *__env = (_env); \
826 struct cl_page *__page = (_page); \
827 const struct cl_page_slice *__scan; \
828 ptrdiff_t __op = (_op); \
829 void (*__method)_proto; \
831 __page = cl_page_top(__page); \
833 cfs_list_for_each_entry(__scan, &__page->cp_layers, \
835 __method = *(void **)((char *)__scan->cpl_ops + \
837 if (__method != NULL) \
838 (*__method)(__env, __scan, \
841 __page = __page->cp_child; \
842 } while (__page != NULL); \
845 #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
847 const struct lu_env *__env = (_env); \
848 struct cl_page *__page = (_page); \
849 const struct cl_page_slice *__scan; \
850 ptrdiff_t __op = (_op); \
851 void (*__method)_proto; \
853 /* get to the bottom page. */ \
854 while (__page->cp_child != NULL) \
855 __page = __page->cp_child; \
857 cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \
859 __method = *(void **)((char *)__scan->cpl_ops + \
861 if (__method != NULL) \
862 (*__method)(__env, __scan, \
865 __page = __page->cp_parent; \
866 } while (__page != NULL); \
869 static int cl_page_invoke(const struct lu_env *env,
870 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
873 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
875 RETURN(CL_PAGE_INVOKE(env, page, op,
876 (const struct lu_env *,
877 const struct cl_page_slice *, struct cl_io *),
881 static void cl_page_invoid(const struct lu_env *env,
882 struct cl_io *io, struct cl_page *page, ptrdiff_t op)
885 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
887 CL_PAGE_INVOID(env, page, op,
888 (const struct lu_env *,
889 const struct cl_page_slice *, struct cl_io *), io);
893 static void cl_page_owner_clear(struct cl_page *page)
896 for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
897 if (page->cp_owner != NULL) {
898 LASSERT(page->cp_owner->ci_owned_nr > 0);
899 page->cp_owner->ci_owned_nr--;
900 page->cp_owner = NULL;
901 page->cp_task = NULL;
907 static void cl_page_owner_set(struct cl_page *page)
910 for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
911 LASSERT(page->cp_owner != NULL);
912 page->cp_owner->ci_owned_nr++;
917 void cl_page_disown0(const struct lu_env *env,
918 struct cl_io *io, struct cl_page *pg)
920 enum cl_page_state state;
923 state = pg->cp_state;
924 PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
925 PINVRNT(env, pg, cl_page_invariant(pg));
926 cl_page_owner_clear(pg);
928 if (state == CPS_OWNED)
929 cl_page_state_set(env, pg, CPS_CACHED);
931 * Completion call-backs are executed in the bottom-up order, so that
932 * uppermost layer (llite), responsible for VFS/VM interaction runs
933 * last and can release locks safely.
935 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
936 (const struct lu_env *,
937 const struct cl_page_slice *, struct cl_io *),
943 * returns true, iff page is owned by the given io.
945 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
947 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
949 RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
951 EXPORT_SYMBOL(cl_page_is_owned);
954 * Try to own a page by IO.
956 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
957 * into cl_page_state::CPS_OWNED state.
959 * \pre !cl_page_is_owned(pg, io)
960 * \post result == 0 iff cl_page_is_owned(pg, io)
964 * \retval -ve failure, e.g., page was destroyed (and landed in
965 * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
966 * or, page was owned by another thread, or in IO.
968 * \see cl_page_disown()
969 * \see cl_page_operations::cpo_own()
970 * \see cl_page_own_try()
973 static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
974 struct cl_page *pg, int nonblock)
978 PINVRNT(env, pg, !cl_page_is_owned(pg, io));
981 pg = cl_page_top(pg);
984 if (pg->cp_state == CPS_FREEING) {
987 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
988 (const struct lu_env *,
989 const struct cl_page_slice *,
990 struct cl_io *, int),
993 PASSERT(env, pg, pg->cp_owner == NULL);
994 PASSERT(env, pg, pg->cp_req == NULL);
996 pg->cp_task = current;
997 cl_page_owner_set(pg);
998 if (pg->cp_state != CPS_FREEING) {
999 cl_page_state_set(env, pg, CPS_OWNED);
1001 cl_page_disown0(env, io, pg);
1006 PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
1011 * Own a page, might be blocked.
1013 * \see cl_page_own0()
1015 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
1017 return cl_page_own0(env, io, pg, 0);
1019 EXPORT_SYMBOL(cl_page_own);
1022 * Nonblock version of cl_page_own().
1024 * \see cl_page_own0()
1026 int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
1029 return cl_page_own0(env, io, pg, 1);
1031 EXPORT_SYMBOL(cl_page_own_try);
1035 * Assume page ownership.
1037 * Called when page is already locked by the hosting VM.
1039 * \pre !cl_page_is_owned(pg, io)
1040 * \post cl_page_is_owned(pg, io)
1042 * \see cl_page_operations::cpo_assume()
1044 void cl_page_assume(const struct lu_env *env,
1045 struct cl_io *io, struct cl_page *pg)
1047 PASSERT(env, pg, pg->cp_state < CPS_OWNED);
1048 PASSERT(env, pg, pg->cp_owner == NULL);
1049 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
1050 PINVRNT(env, pg, cl_page_invariant(pg));
1053 pg = cl_page_top(pg);
1056 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
1058 pg->cp_task = current;
1059 cl_page_owner_set(pg);
1060 cl_page_state_set(env, pg, CPS_OWNED);
1063 EXPORT_SYMBOL(cl_page_assume);
1066 * Releases page ownership without unlocking the page.
1068 * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
1069 * underlying VM page (as VM is supposed to do this itself).
1071 * \pre cl_page_is_owned(pg, io)
1072 * \post !cl_page_is_owned(pg, io)
1074 * \see cl_page_assume()
1076 void cl_page_unassume(const struct lu_env *env,
1077 struct cl_io *io, struct cl_page *pg)
1079 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1080 PINVRNT(env, pg, cl_page_invariant(pg));
1083 pg = cl_page_top(pg);
1085 cl_page_owner_clear(pg);
1086 cl_page_state_set(env, pg, CPS_CACHED);
1087 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
1088 (const struct lu_env *,
1089 const struct cl_page_slice *, struct cl_io *),
1093 EXPORT_SYMBOL(cl_page_unassume);
1096 * Releases page ownership.
1098 * Moves page into cl_page_state::CPS_CACHED.
1100 * \pre cl_page_is_owned(pg, io)
1101 * \post !cl_page_is_owned(pg, io)
1103 * \see cl_page_own()
1104 * \see cl_page_operations::cpo_disown()
1106 void cl_page_disown(const struct lu_env *env,
1107 struct cl_io *io, struct cl_page *pg)
1109 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1112 pg = cl_page_top(pg);
1114 cl_page_disown0(env, io, pg);
1117 EXPORT_SYMBOL(cl_page_disown);
1120 * Called when page is to be removed from the object, e.g., as a result of
1123 * Calls cl_page_operations::cpo_discard() top-to-bottom.
1125 * \pre cl_page_is_owned(pg, io)
1127 * \see cl_page_operations::cpo_discard()
1129 void cl_page_discard(const struct lu_env *env,
1130 struct cl_io *io, struct cl_page *pg)
1132 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1133 PINVRNT(env, pg, cl_page_invariant(pg));
1135 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
1137 EXPORT_SYMBOL(cl_page_discard);
1140 * Version of cl_page_delete() that can be called for not fully constructed
1141 * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
1142 * path. Doesn't check page invariant.
1144 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
1147 struct cl_page *tmp = pg;
1150 PASSERT(env, pg, pg == cl_page_top(pg));
1151 PASSERT(env, pg, pg->cp_state != CPS_FREEING);
1154 * Severe all ways to obtain new pointers to @pg.
1156 cl_page_owner_clear(pg);
1159 * unexport the page firstly before freeing it so that
1160 * the page content is considered to be invalid.
1161 * We have to do this because a CPS_FREEING cl_page may
1162 * be NOT under the protection of a cl_lock.
1163 * Afterwards, if this page is found by other threads, then this
1164 * page will be forced to reread.
1166 cl_page_export(env, pg, 0);
1167 cl_page_state_set0(env, pg, CPS_FREEING);
1169 if (tmp->cp_type == CPT_CACHEABLE) {
1171 /* !radix means that @pg is not yet in the radix tree,
1175 for (; tmp != NULL; tmp = tmp->cp_child) {
1177 struct cl_object_header *hdr;
1179 hdr = cl_object_header(tmp->cp_obj);
1180 cfs_spin_lock(&hdr->coh_page_guard);
1181 value = radix_tree_delete(&hdr->coh_tree,
1183 PASSERT(env, tmp, value == tmp);
1184 PASSERT(env, tmp, hdr->coh_pages > 0);
1186 cfs_spin_unlock(&hdr->coh_page_guard);
1190 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
1191 (const struct lu_env *, const struct cl_page_slice *));
1196 * Called when a decision is made to throw page out of memory.
1198 * Notifies all layers about page destruction by calling
1199 * cl_page_operations::cpo_delete() method top-to-bottom.
1201 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
1202 * where transition to this state happens).
1204 * Eliminates all venues through which new references to the page can be
1207 * - removes page from the radix trees,
1209 * - breaks linkage from VM page to cl_page.
1211 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
1212 * drain after some time, at which point page will be recycled.
1214 * \pre pg == cl_page_top(pg)
1215 * \pre VM page is locked
1216 * \post pg->cp_state == CPS_FREEING
1218 * \see cl_page_operations::cpo_delete()
1220 void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
1222 PINVRNT(env, pg, cl_page_invariant(pg));
1224 cl_page_delete0(env, pg, 1);
1227 EXPORT_SYMBOL(cl_page_delete);
1230 * Unmaps page from user virtual memory.
1232 * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
1233 * layer responsible for VM interaction has to unmap page from user space
1236 * \see cl_page_operations::cpo_unmap()
1238 int cl_page_unmap(const struct lu_env *env,
1239 struct cl_io *io, struct cl_page *pg)
1241 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1242 PINVRNT(env, pg, cl_page_invariant(pg));
1244 return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
1246 EXPORT_SYMBOL(cl_page_unmap);
1249 * Marks page up-to-date.
1251 * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
1252 * layer responsible for VM interaction has to mark/clear page as up-to-date
1253 * by the \a uptodate argument.
1255 * \see cl_page_operations::cpo_export()
1257 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
1259 PINVRNT(env, pg, cl_page_invariant(pg));
1260 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
1261 (const struct lu_env *,
1262 const struct cl_page_slice *, int), uptodate);
1264 EXPORT_SYMBOL(cl_page_export);
1267 * Returns true, iff \a pg is VM locked in a suitable sense by the calling
1270 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
1273 const struct cl_page_slice *slice;
1276 pg = cl_page_top_trusted((struct cl_page *)pg);
1277 slice = container_of(pg->cp_layers.next,
1278 const struct cl_page_slice, cpl_linkage);
1279 PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
1281 * Call ->cpo_is_vmlocked() directly instead of going through
1282 * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
1283 * cl_page_invariant().
1285 result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
1286 PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
1287 RETURN(result == -EBUSY);
1289 EXPORT_SYMBOL(cl_page_is_vmlocked);
1291 static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
1294 RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
1297 static void cl_page_io_start(const struct lu_env *env,
1298 struct cl_page *pg, enum cl_req_type crt)
1301 * Page is queued for IO, change its state.
1304 cl_page_owner_clear(pg);
1305 cl_page_state_set(env, pg, cl_req_type_state(crt));
1310 * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
1311 * called top-to-bottom. Every layer either agrees to submit this page (by
1312 * returning 0), or requests to omit this page (by returning -EALREADY). Layer
1313 * handling interactions with the VM also has to inform VM that page is under
1316 int cl_page_prep(const struct lu_env *env, struct cl_io *io,
1317 struct cl_page *pg, enum cl_req_type crt)
1321 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1322 PINVRNT(env, pg, cl_page_invariant(pg));
1323 PINVRNT(env, pg, crt < CRT_NR);
1326 * XXX this has to be called bottom-to-top, so that llite can set up
1327 * PG_writeback without risking other layers deciding to skip this
1330 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
1332 cl_page_io_start(env, pg, crt);
1334 KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
1336 PageWriteback(cl_page_vmpage(env, pg)))));
1337 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1340 EXPORT_SYMBOL(cl_page_prep);
1343 * Notify layers about transfer completion.
1345 * Invoked by transfer sub-system (which is a part of osc) to notify layers
1346 * that a transfer, of which this page is a part of has completed.
1348 * Completion call-backs are executed in the bottom-up order, so that
1349 * uppermost layer (llite), responsible for the VFS/VM interaction runs last
1350 * and can release locks safely.
1352 * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1353 * \post pg->cp_state == CPS_CACHED
1355 * \see cl_page_operations::cpo_completion()
1357 void cl_page_completion(const struct lu_env *env,
1358 struct cl_page *pg, enum cl_req_type crt, int ioret)
1360 struct cl_sync_io *anchor = pg->cp_sync_io;
1362 PASSERT(env, pg, crt < CRT_NR);
1363 /* cl_page::cp_req already cleared by the caller (osc_completion()) */
1364 PASSERT(env, pg, pg->cp_req == NULL);
1365 PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
1366 PINVRNT(env, pg, cl_page_invariant(pg));
1369 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
1370 if (crt == CRT_READ && ioret == 0) {
1371 PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
1372 pg->cp_flags |= CPF_READ_COMPLETED;
1375 cl_page_state_set(env, pg, CPS_CACHED);
1376 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
1377 (const struct lu_env *,
1378 const struct cl_page_slice *, int), ioret);
1380 LASSERT(pg->cp_sync_io == anchor);
1381 pg->cp_sync_io = NULL;
1382 cl_sync_io_note(anchor, ioret);
1385 /* Don't assert the page writeback bit here because the lustre file
1386 * may be as a backend of swap space. in this case, the page writeback
1387 * is set by VM, and obvious we shouldn't clear it at all. Fortunately
1388 * this type of pages are all TRANSIENT pages. */
1389 KLASSERT(ergo(pg->cp_type == CPT_CACHEABLE,
1390 !PageWriteback(cl_page_vmpage(env, pg))));
1393 EXPORT_SYMBOL(cl_page_completion);
1396 * Notify layers that transfer formation engine decided to yank this page from
1397 * the cache and to make it a part of a transfer.
1399 * \pre pg->cp_state == CPS_CACHED
1400 * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
1402 * \see cl_page_operations::cpo_make_ready()
1404 int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
1405 enum cl_req_type crt)
1409 PINVRNT(env, pg, crt < CRT_NR);
1412 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
1413 (const struct lu_env *,
1414 const struct cl_page_slice *));
1416 PASSERT(env, pg, pg->cp_state == CPS_CACHED);
1417 cl_page_io_start(env, pg, crt);
1419 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1422 EXPORT_SYMBOL(cl_page_make_ready);
1425 * Notify layers that high level io decided to place this page into a cache
1426 * for future transfer.
1428 * The layer implementing transfer engine (osc) has to register this page in
1431 * \pre cl_page_is_owned(pg, io)
1432 * \post ergo(result == 0,
1433 * pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT)
1435 * \see cl_page_operations::cpo_cache_add()
1437 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
1438 struct cl_page *pg, enum cl_req_type crt)
1442 PINVRNT(env, pg, crt < CRT_NR);
1443 PINVRNT(env, pg, cl_page_is_owned(pg, io));
1444 PINVRNT(env, pg, cl_page_invariant(pg));
1447 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_cache_add));
1449 cl_page_owner_clear(pg);
1450 cl_page_state_set(env, pg, CPS_CACHED);
1452 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
1455 EXPORT_SYMBOL(cl_page_cache_add);
1458 * Checks whether page is protected by any extent lock is at least required
1461 * \return the same as in cl_page_operations::cpo_is_under_lock() method.
1462 * \see cl_page_operations::cpo_is_under_lock()
1464 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
1465 struct cl_page *page)
1469 PINVRNT(env, page, cl_page_invariant(page));
1472 rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
1473 (const struct lu_env *,
1474 const struct cl_page_slice *, struct cl_io *),
1476 PASSERT(env, page, rc != 0);
1479 EXPORT_SYMBOL(cl_page_is_under_lock);
1482 * Purges all cached pages belonging to the object \a obj.
1484 int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
1486 struct cl_thread_info *info;
1487 struct cl_object *obj = cl_object_top(clobj);
1489 struct cl_page_list *plist;
1493 info = cl_env_info(env);
1494 plist = &info->clt_list;
1498 * initialize the io. This is ugly since we never do IO in this
1499 * function, we just make cl_page_list functions happy. -jay
1502 result = cl_io_init(env, io, CIT_MISC, obj);
1504 cl_io_fini(env, io);
1505 RETURN(io->ci_result);
1509 cl_page_list_init(plist);
1510 result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
1513 * Since we're purging the pages of an object, we don't care
1514 * the possible outcomes of the following functions.
1516 cl_page_list_unmap(env, io, plist);
1517 cl_page_list_discard(env, io, plist);
1518 cl_page_list_disown(env, io, plist);
1519 cl_page_list_fini(env, plist);
1521 if (result == CLP_GANG_RESCHED)
1523 } while (result != CLP_GANG_OKAY);
1525 cl_io_fini(env, io);
1528 EXPORT_SYMBOL(cl_pages_prune);
1531 * Tells transfer engine that only part of a page is to be transmitted.
1533 * \see cl_page_operations::cpo_clip()
1535 void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
1538 PINVRNT(env, pg, cl_page_invariant(pg));
1540 CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
1541 CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
1542 (const struct lu_env *,
1543 const struct cl_page_slice *,int, int),
1546 EXPORT_SYMBOL(cl_page_clip);
1549 * Prints human readable representation of \a pg to the \a f.
1551 void cl_page_header_print(const struct lu_env *env, void *cookie,
1552 lu_printer_t printer, const struct cl_page *pg)
1554 (*printer)(env, cookie,
1555 "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
1556 pg, cfs_atomic_read(&pg->cp_ref), pg->cp_obj,
1557 pg->cp_index, pg->cp_parent, pg->cp_child,
1558 pg->cp_state, pg->cp_error, pg->cp_type,
1559 pg->cp_owner, pg->cp_req, pg->cp_flags);
1561 EXPORT_SYMBOL(cl_page_header_print);
1564 * Prints human readable representation of \a pg to the \a f.
1566 void cl_page_print(const struct lu_env *env, void *cookie,
1567 lu_printer_t printer, const struct cl_page *pg)
1569 struct cl_page *scan;
1571 for (scan = cl_page_top((struct cl_page *)pg);
1572 scan != NULL; scan = scan->cp_child)
1573 cl_page_header_print(env, cookie, printer, scan);
1574 CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
1575 (const struct lu_env *env,
1576 const struct cl_page_slice *slice,
1577 void *cookie, lu_printer_t p), cookie, printer);
1578 (*printer)(env, cookie, "end page@%p\n", pg);
1580 EXPORT_SYMBOL(cl_page_print);
1583 * Cancel a page which is still in a transfer.
1585 int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
1587 return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
1588 (const struct lu_env *,
1589 const struct cl_page_slice *));
1591 EXPORT_SYMBOL(cl_page_cancel);
1594 * Converts a byte offset within object \a obj into a page index.
1596 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1601 return (loff_t)idx << CFS_PAGE_SHIFT;
1603 EXPORT_SYMBOL(cl_offset);
1606 * Converts a page index into a byte offset within object \a obj.
1608 pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1613 return offset >> CFS_PAGE_SHIFT;
1615 EXPORT_SYMBOL(cl_index);
1617 int cl_page_size(const struct cl_object *obj)
1619 return 1 << CFS_PAGE_SHIFT;
1621 EXPORT_SYMBOL(cl_page_size);
1624 * Adds page slice to the compound page.
1626 * This is called by cl_object_operations::coo_page_init() methods to add a
1627 * per-layer state to the page. New state is added at the end of
1628 * cl_page::cp_layers list, that is, it is at the bottom of the stack.
1630 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
1632 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
1633 struct cl_object *obj,
1634 const struct cl_page_operations *ops)
1637 cfs_list_add_tail(&slice->cpl_linkage, &page->cp_layers);
1638 slice->cpl_obj = obj;
1639 slice->cpl_ops = ops;
1640 slice->cpl_page = page;
1643 EXPORT_SYMBOL(cl_page_slice_add);
1645 int cl_page_init(void)
1647 return lu_kmem_init(cl_page_caches);
1650 void cl_page_fini(void)
1652 lu_kmem_fini(cl_page_caches);