X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fvvp_page.c;h=e2afea40f2334f2986d2992f0cb600b4533e20d1;hp=2b384d81dc63e35e323f3af253da72569950a592;hb=e3d507eec50fc1ff79acf2a9f93d52d698c887d7;hpb=d758bfd1f6ac16bce930033bf14e4bf7b5567972 diff --git a/lustre/llite/vvp_page.c b/lustre/llite/vvp_page.c index 2b384d8..e2afea4 100644 --- a/lustre/llite/vvp_page.c +++ b/lustre/llite/vvp_page.c @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -41,8 +41,15 @@ #define DEBUG_SUBSYSTEM S_LLITE +#include +#include +#include +#include +#include +#include -#include +#include +#include "llite_internal.h" #include "vvp_internal.h" /***************************************************************************** @@ -51,9 +58,9 @@ * */ -static void vvp_page_fini_common(struct ccc_page *cp) +static void vvp_page_fini_common(struct vvp_page *vpg) { - struct page *vmpage = cp->cpg_page; + struct page *vmpage = vpg->vpg_page; LASSERT(vmpage != NULL); page_cache_release(vmpage); @@ -62,40 +69,41 @@ static void vvp_page_fini_common(struct ccc_page *cp) static void vvp_page_fini(const struct lu_env *env, struct cl_page_slice *slice) { - struct ccc_page *cp = cl2ccc_page(slice); - struct page *vmpage = cp->cpg_page; + struct vvp_page *vpg = cl2vvp_page(slice); + struct page *vmpage = vpg->vpg_page; /* * vmpage->private was already cleared when page was moved into * VPG_FREEING state. */ LASSERT((struct cl_page *)vmpage->private != slice->cpl_page); - vvp_page_fini_common(cp); + vvp_page_fini_common(vpg); } static int vvp_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io, - int nonblock) + const struct cl_page_slice *slice, struct cl_io *io, + int nonblock) { - struct ccc_page *vpg = cl2ccc_page(slice); - struct page *vmpage = vpg->cpg_page; + struct vvp_page *vpg = cl2vvp_page(slice); + struct page *vmpage = vpg->vpg_page; LASSERT(vmpage != NULL); if (nonblock) { if (!trylock_page(vmpage)) return -EAGAIN; - if (unlikely(PageWriteback(vmpage))) { - unlock_page(vmpage); - return -EAGAIN; - } + if (unlikely(PageWriteback(vmpage))) { + unlock_page(vmpage); + return -EAGAIN; + } - return 0; - } + return 0; + } - lock_page(vmpage); - wait_on_page_writeback(vmpage); - return 0; + lock_page(vmpage); + wait_on_page_writeback(vmpage); + + return 0; } static void vvp_page_assume(const struct lu_env *env, @@ -131,23 +139,23 @@ static void vvp_page_disown(const struct lu_env *env, } static void vvp_page_discard(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) + const struct cl_page_slice *slice, + struct cl_io *unused) { struct page *vmpage = cl2vm_page(slice); - struct ccc_page *cpg = cl2ccc_page(slice); + struct vvp_page *vpg = cl2vvp_page(slice); LASSERT(vmpage != NULL); LASSERT(PageLocked(vmpage)); - if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used) + if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used) ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED); ll_invalidate_page(vmpage); } static void vvp_page_delete(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { struct page *vmpage = cl2vm_page(slice); struct inode *inode = vmpage->mapping->host; @@ -157,9 +165,7 @@ static void vvp_page_delete(const struct lu_env *env, LASSERT(PageLocked(vmpage)); LASSERT((struct cl_page *)vmpage->private == page); - LASSERT(inode == ccc_object_inode(obj)); - - vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice)); + LASSERT(inode == vvp_object_inode(obj)); /* Drop the reference count held in vvp_page_init */ refc = atomic_dec_return(&page->cp_ref); @@ -213,7 +219,6 @@ static int vvp_page_prep_write(const struct lu_env *env, LASSERT(!PageDirty(vmpage)); set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); return 0; } @@ -226,11 +231,11 @@ static int vvp_page_prep_write(const struct lu_env *env, */ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret) { - struct ccc_object *obj = cl_inode2ccc(inode); + struct vvp_object *obj = cl_inode2vvp(inode); if (ioret == 0) { ClearPageError(vmpage); - obj->cob_discard_page_warned = 0; + obj->vob_discard_page_warned = 0; } else { SetPageError(vmpage); if (ioret == -ENOSPC) @@ -239,77 +244,65 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret set_bit(AS_EIO, &inode->i_mapping->flags); if ((ioret == -ESHUTDOWN || ioret == -EINTR) && - obj->cob_discard_page_warned == 0) { - obj->cob_discard_page_warned = 1; + obj->vob_discard_page_warned == 0) { + obj->vob_discard_page_warned = 1; ll_dirty_page_discard_warn(vmpage, ioret); } } } static void vvp_page_completion_read(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) + const struct cl_page_slice *slice, + int ioret) { - struct ccc_page *cp = cl2ccc_page(slice); - struct page *vmpage = cp->cpg_page; + struct vvp_page *vpg = cl2vvp_page(slice); + struct page *vmpage = vpg->vpg_page; struct cl_page *page = slice->cpl_page; - struct inode *inode = ccc_object_inode(page->cp_obj); - ENTRY; + struct inode *inode = vvp_object_inode(page->cp_obj); + ENTRY; - LASSERT(PageLocked(vmpage)); - CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret); + LASSERT(PageLocked(vmpage)); + CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret); - if (cp->cpg_defer_uptodate) - ll_ra_count_put(ll_i2sbi(inode), 1); + if (vpg->vpg_defer_uptodate) + ll_ra_count_put(ll_i2sbi(inode), 1); - if (ioret == 0) { - if (!cp->cpg_defer_uptodate) - cl_page_export(env, page, 1); - } else - cp->cpg_defer_uptodate = 0; + if (ioret == 0) { + if (!vpg->vpg_defer_uptodate) + cl_page_export(env, page, 1); + } else { + vpg->vpg_defer_uptodate = 0; + } - if (page->cp_sync_io == NULL) - unlock_page(vmpage); + if (page->cp_sync_io == NULL) + unlock_page(vmpage); - EXIT; + EXIT; } static void vvp_page_completion_write(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) + const struct cl_page_slice *slice, + int ioret) { - struct ccc_page *cp = cl2ccc_page(slice); + struct vvp_page *vpg = cl2vvp_page(slice); struct cl_page *pg = slice->cpl_page; - struct page *vmpage = cp->cpg_page; + struct page *vmpage = vpg->vpg_page; ENTRY; - LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage))); - LASSERT(PageWriteback(vmpage)); - - CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); - - /* - * TODO: Actually it makes sense to add the page into oap pending - * list again and so that we don't need to take the page out from - * SoM write pending list, if we just meet a recoverable error, - * -ENOMEM, etc. - * To implement this, we just need to return a non zero value in - * ->cpo_completion method. The underlying transfer should be notified - * and then re-add the page into pending transfer queue. -jay - */ + LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage))); + LASSERT(PageWriteback(vmpage)); - cp->cpg_write_queued = 0; - vvp_write_complete(cl2ccc(slice->cpl_obj), cp); + CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); - /* - * Only mark the page error only when it's an async write because - * applications won't wait for IO to finish. - */ - if (pg->cp_sync_io == NULL) - vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret); + /* + * Only mark the page error only when it's an async write because + * applications won't wait for IO to finish. + */ + if (pg->cp_sync_io == NULL) + vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret); - end_page_writeback(vmpage); - EXIT; + end_page_writeback(vmpage); + EXIT; } /** @@ -339,8 +332,6 @@ static int vvp_page_make_ready(const struct lu_env *env, /* This actually clears the dirty bit in the radix * tree. */ set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), - cl2ccc_page(slice)); CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); } else if (pg->cp_state == CPS_PAGEOUT) { /* is it possible for osc_flush_async_page() to already @@ -355,71 +346,75 @@ static int vvp_page_make_ready(const struct lu_env *env, RETURN(result); } -static int vvp_page_is_under_lock(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, pgoff_t *max_index) +static int vvp_page_print(const struct lu_env *env, + const struct cl_page_slice *slice, + void *cookie, lu_printer_t printer) { - ENTRY; + struct vvp_page *vpg = cl2vvp_page(slice); + struct page *vmpage = vpg->vpg_page; + + (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d) " + "vm@%p ", + vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage); + + if (vmpage != NULL) { + (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", + (long)vmpage->flags, page_count(vmpage), + page_mapcount(vmpage), vmpage->private, + page_index(vmpage), + list_empty(&vmpage->lru) ? "not-" : ""); + } - if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || - io->ci_type == CIT_FAULT) { - struct ccc_io *cio = ccc_env_io(env); + (*printer)(env, cookie, "\n"); - if (unlikely(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) - *max_index = CL_PAGE_EOF; - } - RETURN(0); + return 0; } - -static int vvp_page_print(const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t printer) +static int vvp_page_fail(const struct lu_env *env, + const struct cl_page_slice *slice) { - struct ccc_page *vp = cl2ccc_page(slice); - struct page *vmpage = vp->cpg_page; - - (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) " - "vm@%p ", - vp, vp->cpg_defer_uptodate, vp->cpg_ra_used, - vp->cpg_write_queued, vmpage); - if (vmpage != NULL) { - (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", - (long)vmpage->flags, page_count(vmpage), - page_mapcount(vmpage), vmpage->private, - page_index(vmpage), - list_empty(&vmpage->lru) ? "not-" : ""); - } - (*printer)(env, cookie, "\n"); - return 0; + /* + * Cached read? + */ + LBUG(); + + return 0; } static const struct cl_page_operations vvp_page_ops = { - .cpo_own = vvp_page_own, - .cpo_assume = vvp_page_assume, - .cpo_unassume = vvp_page_unassume, - .cpo_disown = vvp_page_disown, - .cpo_discard = vvp_page_discard, - .cpo_delete = vvp_page_delete, - .cpo_export = vvp_page_export, - .cpo_is_vmlocked = vvp_page_is_vmlocked, - .cpo_fini = vvp_page_fini, - .cpo_print = vvp_page_print, - .cpo_is_under_lock = vvp_page_is_under_lock, - .io = { - [CRT_READ] = { - .cpo_prep = vvp_page_prep_read, - .cpo_completion = vvp_page_completion_read, - .cpo_make_ready = ccc_fail, - }, - [CRT_WRITE] = { - .cpo_prep = vvp_page_prep_write, - .cpo_completion = vvp_page_completion_write, - .cpo_make_ready = vvp_page_make_ready, - } - } + .cpo_own = vvp_page_own, + .cpo_assume = vvp_page_assume, + .cpo_unassume = vvp_page_unassume, + .cpo_disown = vvp_page_disown, + .cpo_discard = vvp_page_discard, + .cpo_delete = vvp_page_delete, + .cpo_export = vvp_page_export, + .cpo_is_vmlocked = vvp_page_is_vmlocked, + .cpo_fini = vvp_page_fini, + .cpo_print = vvp_page_print, + .io = { + [CRT_READ] = { + .cpo_prep = vvp_page_prep_read, + .cpo_completion = vvp_page_completion_read, + .cpo_make_ready = vvp_page_fail, + }, + [CRT_WRITE] = { + .cpo_prep = vvp_page_prep_write, + .cpo_completion = vvp_page_completion_write, + .cpo_make_ready = vvp_page_make_ready, + }, + }, }; +static int vvp_transient_page_prep(const struct lu_env *env, + const struct cl_page_slice *slice, + struct cl_io *unused) +{ + ENTRY; + /* transient page should always be sent. */ + RETURN(0); +} + static void vvp_transient_page_verify(const struct cl_page *page) { } @@ -470,12 +465,12 @@ static void vvp_transient_page_discard(const struct lu_env *env, static int vvp_transient_page_is_vmlocked(const struct lu_env *env, const struct cl_page_slice *slice) { - struct inode *inode = ccc_object_inode(slice->cpl_obj); + struct inode *inode = vvp_object_inode(slice->cpl_obj); int locked; - locked = !mutex_trylock(&inode->i_mutex); + locked = !inode_trylock(inode); if (!locked) - mutex_unlock(&inode->i_mutex); + inode_unlock(inode); return locked ? -EBUSY : -ENODATA; } @@ -490,12 +485,12 @@ vvp_transient_page_completion(const struct lu_env *env, static void vvp_transient_page_fini(const struct lu_env *env, struct cl_page_slice *slice) { - struct ccc_page *cp = cl2ccc_page(slice); + struct vvp_page *vpg = cl2vvp_page(slice); struct cl_page *clp = slice->cpl_page; - struct ccc_object *clobj = cl2ccc(clp->cp_obj); + struct vvp_object *clobj = cl2vvp(clp->cp_obj); - vvp_page_fini_common(cp); - atomic_dec(&clobj->cob_transient_pages); + vvp_page_fini_common(vpg); + atomic_dec(&clobj->vob_transient_pages); } static const struct cl_page_operations vvp_transient_page_ops = { @@ -507,14 +502,13 @@ static const struct cl_page_operations vvp_transient_page_ops = { .cpo_fini = vvp_transient_page_fini, .cpo_is_vmlocked = vvp_transient_page_is_vmlocked, .cpo_print = vvp_page_print, - .cpo_is_under_lock = vvp_page_is_under_lock, .io = { [CRT_READ] = { - .cpo_prep = ccc_transient_page_prep, + .cpo_prep = vvp_transient_page_prep, .cpo_completion = vvp_transient_page_completion, }, [CRT_WRITE] = { - .cpo_prep = ccc_transient_page_prep, + .cpo_prep = vvp_transient_page_prep, .cpo_completion = vvp_transient_page_completion, } } @@ -523,29 +517,27 @@ static const struct cl_page_operations vvp_transient_page_ops = { int vvp_page_init(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, pgoff_t index) { - struct ccc_page *cpg = cl_object_page_slice(obj, page); + struct vvp_page *vpg = cl_object_page_slice(obj, page); struct page *vmpage = page->cp_vmpage; - CLOBINVRNT(env, obj, ccc_object_invariant(obj)); + CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - cpg->cpg_page = vmpage; + vpg->vpg_page = vmpage; page_cache_get(vmpage); - INIT_LIST_HEAD(&cpg->cpg_pending_linkage); if (page->cp_type == CPT_CACHEABLE) { /* in cache, decref in vvp_page_delete */ atomic_inc(&page->cp_ref); SetPagePrivate(vmpage); vmpage->private = (unsigned long)page; - cl_page_slice_add(page, &cpg->cpg_cl, obj, index, + cl_page_slice_add(page, &vpg->vpg_cl, obj, index, &vvp_page_ops); } else { - struct ccc_object *clobj = cl2ccc(obj); + struct vvp_object *clobj = cl2vvp(obj); - cl_page_slice_add(page, &cpg->cpg_cl, obj, index, + cl_page_slice_add(page, &vpg->vpg_cl, obj, index, &vvp_transient_page_ops); - atomic_inc(&clobj->cob_transient_pages); + atomic_inc(&clobj->vob_transient_pages); } return 0; } -