X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fvvp_page.c;h=7fb019cb4dc467f094f03dbd253ee461493a3df1;hp=bc0e2b1d91974cea1b2df434da8cfea04669abef;hb=6bce536725efd166d2772f13fe954f271f9c53b8;hpb=e3a7c58aebafce40323db54bf6056029e5af4a70 diff --git a/lustre/llite/vvp_page.c b/lustre/llite/vvp_page.c index bc0e2b1..7fb019c 100644 --- a/lustre/llite/vvp_page.c +++ b/lustre/llite/vvp_page.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, Whamcloud, Inc. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -36,17 +32,20 @@ * Implementation of cl_page for VVP layer. * * Author: Nikita Danilov + * Author: Jinshan Xiong */ #define DEBUG_SUBSYSTEM S_LLITE -#ifndef __KERNEL__ -# error This file is kernel only. -#endif - -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include "llite_internal.h" #include "vvp_internal.h" /***************************************************************************** @@ -55,155 +54,145 @@ * */ -static void vvp_page_fini_common(struct ccc_page *cp) +static void vvp_page_fini_common(struct vvp_page *vpg, struct pagevec *pvec) { - cfs_page_t *vmpage = cp->cpg_page; - - LASSERT(vmpage != NULL); - page_cache_release(vmpage); - OBD_SLAB_FREE_PTR(cp, vvp_page_kmem); + struct page *vmpage = vpg->vpg_page; + + LASSERT(vmpage != NULL); + if (pvec) { + if (!pagevec_add(pvec, vmpage)) + pagevec_release(pvec); + } else { + put_page(vmpage); + } } static void vvp_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) + struct cl_page_slice *slice, + struct pagevec *pvec) { - struct ccc_page *cp = cl2ccc_page(slice); - cfs_page_t *vmpage = cp->cpg_page; - - /* - * vmpage->private was already cleared when page was moved into - * VPG_FREEING state. - */ - LASSERT((struct cl_page *)vmpage->private != slice->cpl_page); - vvp_page_fini_common(cp); + struct vvp_page *vpg = cl2vvp_page(slice); + struct page *vmpage = vpg->vpg_page; + + /* + * vmpage->private was already cleared when page was moved into + * VPG_FREEING state. + */ + LASSERT((struct cl_page *)vmpage->private != slice->cpl_page); + vvp_page_fini_common(vpg, pvec); } static int vvp_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io, - int nonblock) + const struct cl_page_slice *slice, struct cl_io *io, + int nonblock) { - struct ccc_page *vpg = cl2ccc_page(slice); - cfs_page_t *vmpage = vpg->cpg_page; + struct vvp_page *vpg = cl2vvp_page(slice); + struct page *vmpage = vpg->vpg_page; - LASSERT(vmpage != NULL); - if (nonblock) { - if (TestSetPageLocked(vmpage)) - return -EAGAIN; + LASSERT(vmpage != NULL); + if (nonblock) { + if (!trylock_page(vmpage)) + return -EAGAIN; - if (unlikely(PageWriteback(vmpage))) { - unlock_page(vmpage); - return -EAGAIN; - } + if (unlikely(PageWriteback(vmpage))) { + unlock_page(vmpage); + return -EAGAIN; + } - return 0; - } + return 0; + } - lock_page(vmpage); - wait_on_page_writeback(vmpage); - return 0; + lock_page(vmpage); + wait_on_page_writeback(vmpage); + + return 0; } static void vvp_page_assume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) + const struct cl_page_slice *slice, + struct cl_io *unused) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); - wait_on_page_writeback(vmpage); + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); + wait_on_page_writeback(vmpage); } static void vvp_page_unassume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) + const struct cl_page_slice *slice, + struct cl_io *unused) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); } static void vvp_page_disown(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io) + const struct cl_page_slice *slice, struct cl_io *io) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); - unlock_page(cl2vm_page(slice)); + unlock_page(cl2vm_page(slice)); } static void vvp_page_discard(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) + const struct cl_page_slice *slice, + struct cl_io *unused) { - cfs_page_t *vmpage = cl2vm_page(slice); - struct address_space *mapping = vmpage->mapping; - struct ccc_page *cpg = cl2ccc_page(slice); - - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); + struct page *vmpage = cl2vm_page(slice); + struct vvp_page *vpg = cl2vvp_page(slice); - if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used) - ll_ra_stats_inc(mapping, RA_STAT_DISCARDED); + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); - /* - * truncate_complete_page() calls - * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete(). - */ - truncate_complete_page(mapping, vmpage); -} - -static int vvp_page_unmap(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - cfs_page_t *vmpage = cl2vm_page(slice); - __u64 offset = vmpage->index << CFS_PAGE_SHIFT; + if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used) + ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED); - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); - /* - * XXX is it safe to call this with the page lock held? - */ - ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE); - return 0; + generic_error_remove_page(vmpage->mapping, vmpage); } static void vvp_page_delete(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { - cfs_page_t *vmpage = cl2vm_page(slice); - struct inode *inode = vmpage->mapping->host; - struct cl_object *obj = slice->cpl_obj; - - LASSERT(PageLocked(vmpage)); - LASSERT((struct cl_page *)vmpage->private == slice->cpl_page); - LASSERT(inode == ccc_object_inode(obj)); - - vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice)); - ClearPagePrivate(vmpage); - vmpage->private = 0; - /* - * Reference from vmpage to cl_page is removed, but the reference back - * is still here. It is removed later in vvp_page_fini(). - */ + struct page *vmpage = cl2vm_page(slice); + struct inode *inode = vmpage->mapping->host; + struct cl_object *obj = slice->cpl_obj; + struct cl_page *page = slice->cpl_page; + int refc; + + LASSERT(PageLocked(vmpage)); + LASSERT((struct cl_page *)vmpage->private == page); + LASSERT(inode == vvp_object_inode(obj)); + + /* Drop the reference count held in vvp_page_init */ + refc = atomic_dec_return(&page->cp_ref); + LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc); + + ClearPagePrivate(vmpage); + vmpage->private = 0; + /* + * Reference from vmpage to cl_page is removed, but the reference back + * is still here. It is removed later in vvp_page_fini(). + */ } static void vvp_page_export(const struct lu_env *env, - const struct cl_page_slice *slice, - int uptodate) + const struct cl_page_slice *slice, + int uptodate) { - cfs_page_t *vmpage = cl2vm_page(slice); - - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); - if (uptodate) - SetPageUptodate(vmpage); - else - ClearPageUptodate(vmpage); + struct page *vmpage = cl2vm_page(slice); + + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); + if (uptodate) + SetPageUptodate(vmpage); + else + ClearPageUptodate(vmpage); } static int vvp_page_is_vmlocked(const struct lu_env *env, @@ -225,21 +214,18 @@ static int vvp_page_prep_write(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused) { - struct cl_page *cp = slice->cpl_page; - cfs_page_t *vmpage = cl2vm_page(slice); - int result; - - if (clear_page_dirty_for_io(vmpage)) { - set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); - result = 0; - - /* only turn on writeback for async write. */ - if (cp->cp_sync_io == NULL) - unlock_page(vmpage); - } else - result = -EALREADY; - return result; + struct page *vmpage = cl2vm_page(slice); + struct cl_page *pg = slice->cpl_page; + + LASSERT(PageLocked(vmpage)); + LASSERT(!PageDirty(vmpage)); + + /* ll_writepage path is not a sync write, so need to set page writeback + * flag */ + if (pg->cp_sync_io == NULL) + set_page_writeback(vmpage); + + return 0; } /** @@ -248,83 +234,88 @@ static int vvp_page_prep_write(const struct lu_env *env, * This takes inode as a separate argument, because inode on which error is to * be set can be different from \a vmpage inode in case of direct-io. */ -static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret) +static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret) { - if (ioret == 0) - ClearPageError(vmpage); - else if (ioret != -EINTR) { - SetPageError(vmpage); - if (ioret == -ENOSPC) - set_bit(AS_ENOSPC, &inode->i_mapping->flags); - else - set_bit(AS_EIO, &inode->i_mapping->flags); - } + struct vvp_object *obj = cl_inode2vvp(inode); + + if (ioret == 0) { + ClearPageError(vmpage); + obj->vob_discard_page_warned = 0; + } else { + SetPageError(vmpage); + if (ioret == -ENOSPC) + set_bit(AS_ENOSPC, &inode->i_mapping->flags); + else + set_bit(AS_EIO, &inode->i_mapping->flags); + + if ((ioret == -ESHUTDOWN || ioret == -EINTR || + ioret == -EIO) && obj->vob_discard_page_warned == 0) { + obj->vob_discard_page_warned = 1; + ll_dirty_page_discard_warn(vmpage, ioret); + } + } } static void vvp_page_completion_read(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) + const struct cl_page_slice *slice, + int ioret) { - struct ccc_page *cp = cl2ccc_page(slice); - cfs_page_t *vmpage = cp->cpg_page; - struct cl_page *page = cl_page_top(slice->cpl_page); - struct inode *inode = ccc_object_inode(page->cp_obj); - ENTRY; - - LASSERT(PageLocked(vmpage)); - CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret); - - if (cp->cpg_defer_uptodate) - ll_ra_count_put(ll_i2sbi(inode), 1); - - if (ioret == 0) { - if (!cp->cpg_defer_uptodate) - cl_page_export(env, page, 1); - } else - cp->cpg_defer_uptodate = 0; - - if (page->cp_sync_io == NULL) - unlock_page(vmpage); - - EXIT; + struct vvp_page *vpg = cl2vvp_page(slice); + struct page *vmpage = vpg->vpg_page; + struct cl_page *page = slice->cpl_page; + struct inode *inode = vvp_object_inode(page->cp_obj); + ENTRY; + + LASSERT(PageLocked(vmpage)); + CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret); + + if (vpg->vpg_defer_uptodate) + ll_ra_count_put(ll_i2sbi(inode), 1); + + if (ioret == 0) { + if (!vpg->vpg_defer_uptodate) + cl_page_export(env, page, 1); + } else if (vpg->vpg_defer_uptodate) { + vpg->vpg_defer_uptodate = 0; + if (ioret == -EWOULDBLOCK) { + /* mirror read failed, it needs to destroy the page + * because subpage would be from wrong osc when trying + * to read from a new mirror */ + generic_error_remove_page(vmpage->mapping, vmpage); + } + } + + if (page->cp_sync_io == NULL) + unlock_page(vmpage); + + EXIT; } static void vvp_page_completion_write(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) + const struct cl_page_slice *slice, + int ioret) { - struct ccc_page *cp = cl2ccc_page(slice); - struct cl_page *pg = slice->cpl_page; - cfs_page_t *vmpage = cp->cpg_page; - ENTRY; - - LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage))); - LASSERT(PageWriteback(vmpage)); - - CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); - - /* - * TODO: Actually it makes sense to add the page into oap pending - * list again and so that we don't need to take the page out from - * SoM write pending list, if we just meet a recoverable error, - * -ENOMEM, etc. - * To implement this, we just need to return a non zero value in - * ->cpo_completion method. The underlying transfer should be notified - * and then re-add the page into pending transfer queue. -jay - */ - - cp->cpg_write_queued = 0; - vvp_write_complete(cl2ccc(slice->cpl_obj), cp); - - /* - * Only mark the page error only when it's an async write because - * applications won't wait for IO to finish. - */ - if (pg->cp_sync_io == NULL) - vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret); - - end_page_writeback(vmpage); - EXIT; + struct vvp_page *vpg = cl2vvp_page(slice); + struct cl_page *pg = slice->cpl_page; + struct page *vmpage = vpg->vpg_page; + ENTRY; + + CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); + + if (pg->cp_sync_io != NULL) { + LASSERT(PageLocked(vmpage)); + LASSERT(!PageWriteback(vmpage)); + } else { + LASSERT(PageWriteback(vmpage)); + /* + * Only mark the page error only when it's an async write + * because applications won't wait for IO to finish. + */ + vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret); + + end_page_writeback(vmpage); + } + EXIT; } /** @@ -342,125 +333,91 @@ static void vvp_page_completion_write(const struct lu_env *env, * truncated. Skip it. */ static int vvp_page_make_ready(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { - cfs_page_t *vmpage = cl2vm_page(slice); - struct cl_page *pg = slice->cpl_page; - int result; - - result = -EAGAIN; - /* we're trying to write, but the page is locked.. come back later */ - if (!TestSetPageLocked(vmpage)) { - if (pg->cp_state == CPS_CACHED) { - /* - * We can cancel IO if page wasn't dirty after all. - */ - clear_page_dirty_for_io(vmpage); - /* - * This actually clears the dirty bit in the radix - * tree. - */ - set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), - cl2ccc_page(slice)); - CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); - result = 0; - } else - /* - * Page was concurrently truncated. - */ - LASSERT(pg->cp_state == CPS_FREEING); - unlock_page(vmpage); - } - RETURN(result); + struct page *vmpage = cl2vm_page(slice); + struct cl_page *pg = slice->cpl_page; + int result = 0; + + lock_page(vmpage); + if (clear_page_dirty_for_io(vmpage)) { + LASSERT(pg->cp_state == CPS_CACHED); + /* This actually clears the dirty bit in the radix + * tree. */ + set_page_writeback(vmpage); + CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); + } else if (pg->cp_state == CPS_PAGEOUT) { + /* is it possible for osc_flush_async_page() to already + * make it ready? */ + result = -EALREADY; + } else { + CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n", + pg->cp_state); + LBUG(); + } + unlock_page(vmpage); + RETURN(result); } static int vvp_page_print(const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t printer) + const struct cl_page_slice *slice, + void *cookie, lu_printer_t printer) { - struct ccc_page *vp = cl2ccc_page(slice); - cfs_page_t *vmpage = vp->cpg_page; - - (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) " - "vm@%p ", - vp, vp->cpg_defer_uptodate, vp->cpg_ra_used, - vp->cpg_write_queued, vmpage); - if (vmpage != NULL) { - (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", - (long)vmpage->flags, page_count(vmpage), - page_mapcount(vmpage), vmpage->private, - page_index(vmpage), - list_empty(&vmpage->lru) ? "not-" : ""); - } - (*printer)(env, cookie, "\n"); - return 0; -} + struct vvp_page *vpg = cl2vvp_page(slice); + struct page *vmpage = vpg->vpg_page; -static const struct cl_page_operations vvp_page_ops = { - .cpo_own = vvp_page_own, - .cpo_assume = vvp_page_assume, - .cpo_unassume = vvp_page_unassume, - .cpo_disown = vvp_page_disown, - .cpo_vmpage = ccc_page_vmpage, - .cpo_discard = vvp_page_discard, - .cpo_delete = vvp_page_delete, - .cpo_unmap = vvp_page_unmap, - .cpo_export = vvp_page_export, - .cpo_is_vmlocked = vvp_page_is_vmlocked, - .cpo_fini = vvp_page_fini, - .cpo_print = vvp_page_print, - .cpo_is_under_lock = ccc_page_is_under_lock, - .io = { - [CRT_READ] = { - .cpo_prep = vvp_page_prep_read, - .cpo_completion = vvp_page_completion_read, - .cpo_make_ready = ccc_fail, - }, - [CRT_WRITE] = { - .cpo_prep = vvp_page_prep_write, - .cpo_completion = vvp_page_completion_write, - .cpo_make_ready = vvp_page_make_ready, - } - } -}; + (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d) " + "vm@%p ", + vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage); -static void vvp_transient_page_verify(const struct cl_page *page) -{ - struct inode *inode = ccc_object_inode(page->cp_obj); + if (vmpage != NULL) { + (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", + (long)vmpage->flags, page_count(vmpage), + page_mapcount(vmpage), vmpage->private, + page_index(vmpage), + list_empty(&vmpage->lru) ? "not-" : ""); + } - LASSERT(!TRYLOCK_INODE_MUTEX(inode)); - /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */ -} + (*printer)(env, cookie, "\n"); -static int vvp_transient_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused, int nonblock) -{ - vvp_transient_page_verify(slice->cpl_page); - return 0; + return 0; } -static void vvp_transient_page_assume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) +static int vvp_page_fail(const struct lu_env *env, + const struct cl_page_slice *slice) { - vvp_transient_page_verify(slice->cpl_page); -} + /* + * Cached read? + */ + LBUG(); -static void vvp_transient_page_unassume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - vvp_transient_page_verify(slice->cpl_page); + return 0; } -static void vvp_transient_page_disown(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) -{ - vvp_transient_page_verify(slice->cpl_page); -} +static const struct cl_page_operations vvp_page_ops = { + .cpo_own = vvp_page_own, + .cpo_assume = vvp_page_assume, + .cpo_unassume = vvp_page_unassume, + .cpo_disown = vvp_page_disown, + .cpo_discard = vvp_page_discard, + .cpo_delete = vvp_page_delete, + .cpo_export = vvp_page_export, + .cpo_is_vmlocked = vvp_page_is_vmlocked, + .cpo_fini = vvp_page_fini, + .cpo_print = vvp_page_print, + .io = { + [CRT_READ] = { + .cpo_prep = vvp_page_prep_read, + .cpo_completion = vvp_page_completion_read, + .cpo_make_ready = vvp_page_fail, + }, + [CRT_WRITE] = { + .cpo_prep = vvp_page_prep_write, + .cpo_completion = vvp_page_completion_write, + .cpo_make_ready = vvp_page_make_ready, + }, + }, +}; static void vvp_transient_page_discard(const struct lu_env *env, const struct cl_page_slice *slice, @@ -468,8 +425,6 @@ static void vvp_transient_page_discard(const struct lu_env *env, { struct cl_page *page = slice->cpl_page; - vvp_transient_page_verify(slice->cpl_page); - /* * For transient pages, remove it from the radix tree. */ @@ -477,90 +432,53 @@ static void vvp_transient_page_discard(const struct lu_env *env, } static int vvp_transient_page_is_vmlocked(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { - struct inode *inode = ccc_object_inode(slice->cpl_obj); - int locked; - - locked = !TRYLOCK_INODE_MUTEX(inode); - if (!locked) - UNLOCK_INODE_MUTEX(inode); - return locked ? -EBUSY : -ENODATA; -} - -static void -vvp_transient_page_completion(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) -{ - vvp_transient_page_verify(slice->cpl_page); + return -EBUSY; } static void vvp_transient_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) + struct cl_page_slice *slice, + struct pagevec *pvec) { - struct ccc_page *cp = cl2ccc_page(slice); - struct cl_page *clp = slice->cpl_page; - struct ccc_object *clobj = cl2ccc(clp->cp_obj); + struct vvp_page *vpg = cl2vvp_page(slice); + struct vvp_object *clobj = cl2vvp(slice->cpl_obj); - vvp_page_fini_common(cp); - LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode)); - clobj->cob_transient_pages--; + vvp_page_fini_common(vpg, pvec); + atomic_dec(&clobj->vob_transient_pages); } static const struct cl_page_operations vvp_transient_page_ops = { - .cpo_own = vvp_transient_page_own, - .cpo_assume = vvp_transient_page_assume, - .cpo_unassume = vvp_transient_page_unassume, - .cpo_disown = vvp_transient_page_disown, - .cpo_discard = vvp_transient_page_discard, - .cpo_vmpage = ccc_page_vmpage, - .cpo_fini = vvp_transient_page_fini, - .cpo_is_vmlocked = vvp_transient_page_is_vmlocked, - .cpo_print = vvp_page_print, - .cpo_is_under_lock = ccc_page_is_under_lock, - .io = { - [CRT_READ] = { - .cpo_prep = ccc_transient_page_prep, - .cpo_completion = vvp_transient_page_completion, - }, - [CRT_WRITE] = { - .cpo_prep = ccc_transient_page_prep, - .cpo_completion = vvp_transient_page_completion, - } - } + .cpo_discard = vvp_transient_page_discard, + .cpo_fini = vvp_transient_page_fini, + .cpo_is_vmlocked = vvp_transient_page_is_vmlocked, + .cpo_print = vvp_page_print, }; -struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage) +int vvp_page_init(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, pgoff_t index) { - struct ccc_page *cpg; - int result; - - CLOBINVRNT(env, obj, ccc_object_invariant(obj)); - - OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO); - if (cpg != NULL) { - cpg->cpg_page = vmpage; - page_cache_get(vmpage); - - CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage); - if (page->cp_type == CPT_CACHEABLE) { - SetPagePrivate(vmpage); - vmpage->private = (unsigned long)page; - cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_page_ops); - } else { - struct ccc_object *clobj = cl2ccc(obj); - - LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode)); - cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_transient_page_ops); - clobj->cob_transient_pages++; - } - result = 0; - } else - result = -ENOMEM; - return ERR_PTR(result); + struct vvp_page *vpg = cl_object_page_slice(obj, page); + struct page *vmpage = page->cp_vmpage; + + CLOBINVRNT(env, obj, vvp_object_invariant(obj)); + + vpg->vpg_page = vmpage; + get_page(vmpage); + + if (page->cp_type == CPT_CACHEABLE) { + /* in cache, decref in vvp_page_delete */ + atomic_inc(&page->cp_ref); + SetPagePrivate(vmpage); + vmpage->private = (unsigned long)page; + cl_page_slice_add(page, &vpg->vpg_cl, obj, index, + &vvp_page_ops); + } else { + struct vvp_object *clobj = cl2vvp(obj); + + cl_page_slice_add(page, &vpg->vpg_cl, obj, index, + &vvp_transient_page_ops); + atomic_inc(&clobj->vob_transient_pages); + } + return 0; } -