X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fvvp_page.c;h=125d02aeba1040a3482b7a86d1b4a549a290e4ae;hb=c06fc9da14b061aa4f1f3e652683eb648b02ae8e;hp=3d9ed9cc606fcfbbf5e711eded1286c85484a9be;hpb=9ab07c2801e50169c689ed65e2caa69dcf553a09;p=fs%2Flustre-release.git diff --git a/lustre/llite/vvp_page.c b/lustre/llite/vvp_page.c index 3d9ed9c..125d02a 100644 --- a/lustre/llite/vvp_page.c +++ b/lustre/llite/vvp_page.c @@ -78,19 +78,34 @@ static void vvp_page_fini(const struct lu_env *env, vvp_page_fini_common(cp); } -static void vvp_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *_) +static int vvp_page_own(const struct lu_env *env, + const struct cl_page_slice *slice, struct cl_io *io, + int nonblock) { struct ccc_page *vpg = cl2ccc_page(slice); cfs_page_t *vmpage = vpg->cpg_page; LASSERT(vmpage != NULL); + if (nonblock) { + if (TestSetPageLocked(vmpage)) + return -EAGAIN; + + if (unlikely(PageWriteback(vmpage))) { + unlock_page(vmpage); + return -EAGAIN; + } + + return 0; + } + lock_page(vmpage); wait_on_page_writeback(vmpage); + return 0; } static void vvp_page_assume(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *_) + const struct cl_page_slice *slice, + struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); @@ -101,7 +116,7 @@ static void vvp_page_assume(const struct lu_env *env, static void vvp_page_unassume(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); @@ -121,7 +136,8 @@ static void vvp_page_disown(const struct lu_env *env, } static void vvp_page_discard(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *_) + const struct cl_page_slice *slice, + struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); struct address_space *mapping = vmpage->mapping; @@ -141,7 +157,8 @@ static void vvp_page_discard(const struct lu_env *env, } static int vvp_page_unmap(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *_) + const struct cl_page_slice *slice, + struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); __u64 offset = vmpage->index << CFS_PAGE_SHIFT; @@ -176,13 +193,17 @@ static void vvp_page_delete(const struct lu_env *env, } static void vvp_page_export(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice, + int uptodate) { cfs_page_t *vmpage = cl2vm_page(slice); LASSERT(vmpage != NULL); LASSERT(PageLocked(vmpage)); - SetPageUptodate(vmpage); + if (uptodate) + SetPageUptodate(vmpage); + else + ClearPageUptodate(vmpage); } static int vvp_page_is_vmlocked(const struct lu_env *env, @@ -193,7 +214,7 @@ static int vvp_page_is_vmlocked(const struct lu_env *env, static int vvp_page_prep_read(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { ENTRY; /* Skip the page already marked as PG_uptodate. */ @@ -202,13 +223,14 @@ static int vvp_page_prep_read(const struct lu_env *env, static int vvp_page_prep_write(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); int result; if (clear_page_dirty_for_io(vmpage)) { set_page_writeback(vmpage); + vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); result = 0; } else result = -EALREADY; @@ -240,18 +262,21 @@ static void vvp_page_completion_common(const struct lu_env *env, struct cl_page *clp = cp->cpg_cl.cpl_page; cfs_page_t *vmpage = cp->cpg_page; struct inode *inode = ccc_object_inode(clp->cp_obj); - struct cl_sync_io *anchor = cp->cpg_sync_io; LINVRNT(cl_page_is_vmlocked(env, clp)); - KLASSERT(!PageWriteback(vmpage)); - - vvp_vmpage_error(inode, vmpage, ioret); - if (anchor != NULL) { - cp->cpg_sync_io = NULL; - cl_sync_io_note(anchor, ioret); - } else if (clp->cp_type == CPT_CACHEABLE) + if (!clp->cp_sync_io && clp->cp_type == CPT_CACHEABLE) { + /* + * Only mark the page error only when it's a cacheable page + * and NOT a sync io. + * + * For sync IO and direct IO(CPT_TRANSIENT), the error is able + * to be seen by application, so we don't need to mark a page + * as error at all. + */ + vvp_vmpage_error(inode, vmpage, ioret); unlock_page(vmpage); + } } static void vvp_page_completion_read(const struct lu_env *env, @@ -271,7 +296,7 @@ static void vvp_page_completion_read(const struct lu_env *env, if (ioret == 0) { /* XXX: do we need this for transient pages? */ if (!cp->cpg_defer_uptodate) - cl_page_export(env, page); + cl_page_export(env, page, 1); } else cp->cpg_defer_uptodate = 0; vvp_page_completion_common(env, cp, ioret); @@ -285,14 +310,18 @@ static void vvp_page_completion_write_common(const struct lu_env *env, { struct ccc_page *cp = cl2ccc_page(slice); - if (ioret == 0) { - cp->cpg_write_queued = 0; - /* - * Only ioret == 0, write succeed, then this page could be - * deleted from the pending_writing count. - */ - vvp_write_complete(cl2ccc(slice->cpl_obj), cp); - } + /* + * TODO: Actually it makes sense to add the page into oap pending + * list again and so that we don't need to take the page out from + * SoM write pending list, if we just meet a recoverable error, + * -ENOMEM, etc. + * To implement this, we just need to return a non zero value in + * ->cpo_completion method. The underlying transfer should be notified + * and then re-add the page into pending transfer queue. -jay + */ + cp->cpg_write_queued = 0; + vvp_write_complete(cl2ccc(slice->cpl_obj), cp); + vvp_page_completion_common(env, cp, ioret); } @@ -311,18 +340,16 @@ static void vvp_page_completion_write(const struct lu_env *env, CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); - end_page_writeback(vmpage); - LASSERT(!PageWriteback(vmpage)); - vvp_page_completion_write_common(env, slice, ioret); + end_page_writeback(vmpage); EXIT; } /** * Implements cl_page_operations::cpo_make_ready() method. * - * This is called to yank page from the transfer page and to send it out as a - * part of transfer. This function try-locks the page. If try-lock failed, + * This is called to yank a page from the transfer cache and to send it out as + * a part of transfer. This function try-locks the page. If try-lock failed, * page is owned by some concurrent IO, and should be skipped (this is bad, * but hopefully rare situation, as it usually results in transfer being * shorter than possible). @@ -352,7 +379,8 @@ static int vvp_page_make_ready(const struct lu_env *env, * tree. */ set_page_writeback(vmpage); - + vvp_write_pending(cl2ccc(slice->cpl_obj), + cl2ccc_page(slice)); CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); result = 0; } else @@ -422,37 +450,38 @@ static void vvp_transient_page_verify(const struct cl_page *page) /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */ } -static void vvp_transient_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *_) +static int vvp_transient_page_own(const struct lu_env *env, + const struct cl_page_slice *slice, + struct cl_io *unused, int nonblock) { vvp_transient_page_verify(slice->cpl_page); + return 0; } static void vvp_transient_page_assume(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { vvp_transient_page_verify(slice->cpl_page); } static void vvp_transient_page_unassume(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { vvp_transient_page_verify(slice->cpl_page); } static void vvp_transient_page_disown(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { vvp_transient_page_verify(slice->cpl_page); } static void vvp_transient_page_discard(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { struct cl_page *page = slice->cpl_page; @@ -529,7 +558,7 @@ struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj, CLOBINVRNT(env, obj, ccc_object_invariant(obj)); - OBD_SLAB_ALLOC_PTR(cpg, vvp_page_kmem); + OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO); if (cpg != NULL) { cpg->cpg_page = vmpage; page_cache_get(vmpage);