X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fvvp_page.c;h=990d59bf2af888cf65cc854d1170c84127355777;hb=12b6c0b993bca79091d035cbe4cf1805d8adcc47;hp=f543f59cc675abc1f79b318e025dd7cc5c2b645d;hpb=c33cd87b28122c8d059b4d05d373ae447348523e;p=fs%2Flustre-release.git diff --git a/lustre/llite/vvp_page.c b/lustre/llite/vvp_page.c index f543f59..990d59b 100644 --- a/lustre/llite/vvp_page.c +++ b/lustre/llite/vvp_page.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -36,6 +36,7 @@ * Implementation of cl_page for VVP layer. * * Author: Nikita Danilov + * Author: Jinshan Xiong */ #define DEBUG_SUBSYSTEM S_LLITE @@ -78,42 +79,34 @@ static void vvp_page_fini(const struct lu_env *env, vvp_page_fini_common(cp); } -static void vvp_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *_) +static int vvp_page_own(const struct lu_env *env, + const struct cl_page_slice *slice, struct cl_io *io, + int nonblock) { struct ccc_page *vpg = cl2ccc_page(slice); cfs_page_t *vmpage = vpg->cpg_page; - int count = 0; LASSERT(vmpage != NULL); + if (nonblock) { + if (TestSetPageLocked(vmpage)) + return -EAGAIN; - /* DEBUG CODE FOR #18881 */ - while (TestSetPageLocked(vmpage)) { - cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, - cfs_time_seconds(1)/10); - if (++count > 600) { - CL_PAGE_DEBUG(D_ERROR, env, - cl_page_top(slice->cpl_page), - "XXX page %p blocked on acquiring the" - " lock. process %s/%p, flags %lx,io %p\n", - vmpage, current->comm, current, - vmpage->flags, _); - libcfs_debug_dumpstack(NULL); - LCONSOLE_WARN("Reproduced bug #18881,please contact:" - "jay , thanks\n"); - - lock_page(vmpage); - break; + if (unlikely(PageWriteback(vmpage))) { + unlock_page(vmpage); + return -EAGAIN; } + + return 0; } - /* DEBUG CODE END */ - /* lock_page(vmpage); */ + lock_page(vmpage); wait_on_page_writeback(vmpage); + return 0; } static void vvp_page_assume(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *_) + const struct cl_page_slice *slice, + struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); @@ -124,7 +117,7 @@ static void vvp_page_assume(const struct lu_env *env, static void vvp_page_unassume(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); @@ -144,7 +137,8 @@ static void vvp_page_disown(const struct lu_env *env, } static void vvp_page_discard(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *_) + const struct cl_page_slice *slice, + struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); struct address_space *mapping = vmpage->mapping; @@ -164,7 +158,8 @@ static void vvp_page_discard(const struct lu_env *env, } static int vvp_page_unmap(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *_) + const struct cl_page_slice *slice, + struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); __u64 offset = vmpage->index << CFS_PAGE_SHIFT; @@ -199,13 +194,17 @@ static void vvp_page_delete(const struct lu_env *env, } static void vvp_page_export(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice, + int uptodate) { cfs_page_t *vmpage = cl2vm_page(slice); LASSERT(vmpage != NULL); LASSERT(PageLocked(vmpage)); - SetPageUptodate(vmpage); + if (uptodate) + SetPageUptodate(vmpage); + else + ClearPageUptodate(vmpage); } static int vvp_page_is_vmlocked(const struct lu_env *env, @@ -216,7 +215,7 @@ static int vvp_page_is_vmlocked(const struct lu_env *env, static int vvp_page_prep_read(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { ENTRY; /* Skip the page already marked as PG_uptodate. */ @@ -225,18 +224,17 @@ static int vvp_page_prep_read(const struct lu_env *env, static int vvp_page_prep_write(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { - cfs_page_t *vmpage = cl2vm_page(slice); - int result; + cfs_page_t *vmpage = cl2vm_page(slice); - if (clear_page_dirty_for_io(vmpage)) { - set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); - result = 0; - } else - result = -EALREADY; - return result; + LASSERT(PageLocked(vmpage)); + LASSERT(!PageDirty(vmpage)); + + set_page_writeback(vmpage); + vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); + + return 0; } /** @@ -258,63 +256,47 @@ static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret) } } -static void vvp_page_completion_common(const struct lu_env *env, - struct ccc_page *cp, int ioret) -{ - struct cl_page *clp = cp->cpg_cl.cpl_page; - cfs_page_t *vmpage = cp->cpg_page; - struct inode *inode = ccc_object_inode(clp->cp_obj); - struct cl_sync_io *anchor = cp->cpg_sync_io; - - LINVRNT(cl_page_is_vmlocked(env, clp)); - - if (anchor != NULL) { - cp->cpg_sync_io = NULL; - cl_sync_io_note(anchor, ioret); - } else if (clp->cp_type == CPT_CACHEABLE) { - /* - * Only mark the page error only when it's a cacheable page - * and NOT a sync io. - * - * For sync IO and direct IO(CPT_TRANSIENT), the error is able - * to be seen by application, so we don't need to mark a page - * as error at all. - */ - vvp_vmpage_error(inode, vmpage, ioret); - unlock_page(vmpage); - } -} - static void vvp_page_completion_read(const struct lu_env *env, const struct cl_page_slice *slice, int ioret) { - struct ccc_page *cp = cl2ccc_page(slice); - struct cl_page *page = cl_page_top(slice->cpl_page); - struct inode *inode = ccc_object_inode(page->cp_obj); + struct ccc_page *cp = cl2ccc_page(slice); + cfs_page_t *vmpage = cp->cpg_page; + struct cl_page *page = cl_page_top(slice->cpl_page); + struct inode *inode = ccc_object_inode(page->cp_obj); ENTRY; + LASSERT(PageLocked(vmpage)); CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret); if (cp->cpg_defer_uptodate) ll_ra_count_put(ll_i2sbi(inode), 1); if (ioret == 0) { - /* XXX: do we need this for transient pages? */ if (!cp->cpg_defer_uptodate) - cl_page_export(env, page); + cl_page_export(env, page, 1); } else cp->cpg_defer_uptodate = 0; - vvp_page_completion_common(env, cp, ioret); + + if (page->cp_sync_io == NULL) + unlock_page(vmpage); EXIT; } -static void vvp_page_completion_write_common(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) +static void vvp_page_completion_write(const struct lu_env *env, + const struct cl_page_slice *slice, + int ioret) { - struct ccc_page *cp = cl2ccc_page(slice); + struct ccc_page *cp = cl2ccc_page(slice); + struct cl_page *pg = slice->cpl_page; + cfs_page_t *vmpage = cp->cpg_page; + ENTRY; + + LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage))); + LASSERT(PageWriteback(vmpage)); + + CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); /* * TODO: Actually it makes sense to add the page into oap pending @@ -325,28 +307,17 @@ static void vvp_page_completion_write_common(const struct lu_env *env, * ->cpo_completion method. The underlying transfer should be notified * and then re-add the page into pending transfer queue. -jay */ + cp->cpg_write_queued = 0; vvp_write_complete(cl2ccc(slice->cpl_obj), cp); - vvp_page_completion_common(env, cp, ioret); -} - -static void vvp_page_completion_write(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) -{ - struct ccc_page *cp = cl2ccc_page(slice); - struct cl_page *pg = slice->cpl_page; - cfs_page_t *vmpage = cp->cpg_page; - - ENTRY; - - LINVRNT(cl_page_is_vmlocked(env, pg)); - LASSERT(PageWriteback(vmpage)); - - CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); + /* + * Only mark the page error only when it's an async write because + * applications won't wait for IO to finish. + */ + if (pg->cp_sync_io == NULL) + vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret); - vvp_page_completion_write_common(env, slice, ioret); end_page_writeback(vmpage); EXIT; } @@ -366,36 +337,32 @@ static void vvp_page_completion_write(const struct lu_env *env, * truncated. Skip it. */ static int vvp_page_make_ready(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { - cfs_page_t *vmpage = cl2vm_page(slice); - struct cl_page *pg = slice->cpl_page; - int result; - - result = -EAGAIN; - /* we're trying to write, but the page is locked.. come back later */ - if (!TestSetPageLocked(vmpage)) { - if (pg->cp_state == CPS_CACHED) { - /* - * We can cancel IO if page wasn't dirty after all. - */ - clear_page_dirty_for_io(vmpage); - /* - * This actually clears the dirty bit in the radix - * tree. - */ - set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), - cl2ccc_page(slice)); - CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); - result = 0; - } else - /* - * Page was concurrently truncated. - */ - LASSERT(pg->cp_state == CPS_FREEING); - } - RETURN(result); + cfs_page_t *vmpage = cl2vm_page(slice); + struct cl_page *pg = slice->cpl_page; + int result = 0; + + lock_page(vmpage); + if (clear_page_dirty_for_io(vmpage)) { + LASSERT(pg->cp_state == CPS_CACHED); + /* This actually clears the dirty bit in the radix + * tree. */ + set_page_writeback(vmpage); + vvp_write_pending(cl2ccc(slice->cpl_obj), + cl2ccc_page(slice)); + CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); + } else if (pg->cp_state == CPS_PAGEOUT) { + /* is it possible for osc_flush_async_page() to already + * make it ready? */ + result = -EALREADY; + } else { + CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n", + pg->cp_state); + LBUG(); + } + unlock_page(vmpage); + RETURN(result); } static int vvp_page_print(const struct lu_env *env, @@ -456,37 +423,38 @@ static void vvp_transient_page_verify(const struct cl_page *page) /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */ } -static void vvp_transient_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *_) +static int vvp_transient_page_own(const struct lu_env *env, + const struct cl_page_slice *slice, + struct cl_io *unused, int nonblock) { vvp_transient_page_verify(slice->cpl_page); + return 0; } static void vvp_transient_page_assume(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { vvp_transient_page_verify(slice->cpl_page); } static void vvp_transient_page_unassume(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { vvp_transient_page_verify(slice->cpl_page); } static void vvp_transient_page_disown(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { vvp_transient_page_verify(slice->cpl_page); } static void vvp_transient_page_discard(const struct lu_env *env, const struct cl_page_slice *slice, - struct cl_io *_) + struct cl_io *unused) { struct cl_page *page = slice->cpl_page; @@ -511,15 +479,13 @@ static int vvp_transient_page_is_vmlocked(const struct lu_env *env, } static void -vvp_transient_page_completion_write(const struct lu_env *env, - const struct cl_page_slice *slice, - int ioret) +vvp_transient_page_completion(const struct lu_env *env, + const struct cl_page_slice *slice, + int ioret) { vvp_transient_page_verify(slice->cpl_page); - vvp_page_completion_write_common(env, slice, ioret); } - static void vvp_transient_page_fini(const struct lu_env *env, struct cl_page_slice *slice) { @@ -546,11 +512,11 @@ static const struct cl_page_operations vvp_transient_page_ops = { .io = { [CRT_READ] = { .cpo_prep = ccc_transient_page_prep, - .cpo_completion = vvp_page_completion_read, + .cpo_completion = vvp_transient_page_completion, }, [CRT_WRITE] = { .cpo_prep = ccc_transient_page_prep, - .cpo_completion = vvp_transient_page_completion_write, + .cpo_completion = vvp_transient_page_completion, } } };