X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fvvp_page.c;h=4b85a234a3e21b7a2d7ea67923666d5e4ec9a8e4;hp=74b81ce2c93d590deb9aa6bb352a8e055132513f;hb=ea766bd959cc2347b223d597b3f8a31e010bed72;hpb=65701b4a30efdb695776bcf690a2b3cabc928da1 diff --git a/lustre/llite/vvp_page.c b/lustre/llite/vvp_page.c index 74b81ce..4b85a23 100644 --- a/lustre/llite/vvp_page.c +++ b/lustre/llite/vvp_page.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -29,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, Whamcloud, Inc. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -38,6 +36,7 @@ * Implementation of cl_page for VVP layer. * * Author: Nikita Danilov + * Author: Jinshan Xiong */ #define DEBUG_SUBSYSTEM S_LLITE @@ -63,7 +62,6 @@ static void vvp_page_fini_common(struct ccc_page *cp) LASSERT(vmpage != NULL); page_cache_release(vmpage); - OBD_SLAB_FREE_PTR(cp, vvp_page_kmem); } static void vvp_page_fini(const struct lu_env *env, @@ -87,10 +85,10 @@ static int vvp_page_own(const struct lu_env *env, struct ccc_page *vpg = cl2ccc_page(slice); cfs_page_t *vmpage = vpg->cpg_page; - LASSERT(vmpage != NULL); - if (nonblock) { - if (TestSetPageLocked(vmpage)) - return -EAGAIN; + LASSERT(vmpage != NULL); + if (nonblock) { + if (!trylock_page(vmpage)) + return -EAGAIN; if (unlikely(PageWriteback(vmpage))) { unlock_page(vmpage); @@ -142,12 +140,14 @@ static void vvp_page_discard(const struct lu_env *env, struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); - struct address_space *mapping = vmpage->mapping; + struct address_space *mapping; struct ccc_page *cpg = cl2ccc_page(slice); LASSERT(vmpage != NULL); LASSERT(PageLocked(vmpage)); + mapping = vmpage->mapping; + if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used) ll_ra_stats_inc(mapping, RA_STAT_DISCARDED); @@ -163,10 +163,13 @@ static int vvp_page_unmap(const struct lu_env *env, struct cl_io *unused) { cfs_page_t *vmpage = cl2vm_page(slice); - __u64 offset = vmpage->index << CFS_PAGE_SHIFT; + __u64 offset; LASSERT(vmpage != NULL); LASSERT(PageLocked(vmpage)); + + offset = vmpage->index << CFS_PAGE_SHIFT; + /* * XXX is it safe to call this with the page lock held? */ @@ -227,21 +230,15 @@ static int vvp_page_prep_write(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused) { - struct cl_page *cp = slice->cpl_page; - cfs_page_t *vmpage = cl2vm_page(slice); - int result; + cfs_page_t *vmpage = cl2vm_page(slice); - if (clear_page_dirty_for_io(vmpage)) { - set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); - result = 0; + LASSERT(PageLocked(vmpage)); + LASSERT(!PageDirty(vmpage)); - /* only turn on writeback for async write. */ - if (cp->cp_sync_io == NULL) - unlock_page(vmpage); - } else - result = -EALREADY; - return result; + set_page_writeback(vmpage); + vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); + + return 0; } /** @@ -252,15 +249,24 @@ static int vvp_page_prep_write(const struct lu_env *env, */ static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret) { - if (ioret == 0) - ClearPageError(vmpage); - else if (ioret != -EINTR) { - SetPageError(vmpage); - if (ioret == -ENOSPC) - set_bit(AS_ENOSPC, &inode->i_mapping->flags); - else - set_bit(AS_EIO, &inode->i_mapping->flags); - } + struct ccc_object *obj = cl_inode2ccc(inode); + + if (ioret == 0) { + ClearPageError(vmpage); + obj->cob_discard_page_warned = 0; + } else { + SetPageError(vmpage); + if (ioret == -ENOSPC) + set_bit(AS_ENOSPC, &inode->i_mapping->flags); + else + set_bit(AS_EIO, &inode->i_mapping->flags); + + if ((ioret == -ESHUTDOWN || ioret == -EINTR) && + obj->cob_discard_page_warned == 0) { + obj->cob_discard_page_warned = 1; + ll_dirty_page_discard_warn(vmpage, ioret); + } + } } static void vvp_page_completion_read(const struct lu_env *env, @@ -344,37 +350,32 @@ static void vvp_page_completion_write(const struct lu_env *env, * truncated. Skip it. */ static int vvp_page_make_ready(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { - cfs_page_t *vmpage = cl2vm_page(slice); - struct cl_page *pg = slice->cpl_page; - int result; - - result = -EAGAIN; - /* we're trying to write, but the page is locked.. come back later */ - if (!TestSetPageLocked(vmpage)) { - if (pg->cp_state == CPS_CACHED) { - /* - * We can cancel IO if page wasn't dirty after all. - */ - clear_page_dirty_for_io(vmpage); - /* - * This actually clears the dirty bit in the radix - * tree. - */ - set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), - cl2ccc_page(slice)); - CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); - result = 0; - } else - /* - * Page was concurrently truncated. - */ - LASSERT(pg->cp_state == CPS_FREEING); - unlock_page(vmpage); - } - RETURN(result); + cfs_page_t *vmpage = cl2vm_page(slice); + struct cl_page *pg = slice->cpl_page; + int result = 0; + + lock_page(vmpage); + if (clear_page_dirty_for_io(vmpage)) { + LASSERT(pg->cp_state == CPS_CACHED); + /* This actually clears the dirty bit in the radix + * tree. */ + set_page_writeback(vmpage); + vvp_write_pending(cl2ccc(slice->cpl_obj), + cl2ccc_page(slice)); + CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); + } else if (pg->cp_state == CPS_PAGEOUT) { + /* is it possible for osc_flush_async_page() to already + * make it ready? */ + result = -EALREADY; + } else { + CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n", + pg->cp_state); + LBUG(); + } + unlock_page(vmpage); + RETURN(result); } static int vvp_page_print(const struct lu_env *env, @@ -429,10 +430,9 @@ static const struct cl_page_operations vvp_page_ops = { static void vvp_transient_page_verify(const struct cl_page *page) { - struct inode *inode = ccc_object_inode(page->cp_obj); + struct inode *inode = ccc_object_inode(page->cp_obj); - LASSERT(!TRYLOCK_INODE_MUTEX(inode)); - /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */ + LASSERT(!mutex_trylock(&inode->i_mutex)); } static int vvp_transient_page_own(const struct lu_env *env, @@ -479,15 +479,15 @@ static void vvp_transient_page_discard(const struct lu_env *env, } static int vvp_transient_page_is_vmlocked(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { - struct inode *inode = ccc_object_inode(slice->cpl_obj); - int locked; + struct inode *inode = ccc_object_inode(slice->cpl_obj); + int locked; - locked = !TRYLOCK_INODE_MUTEX(inode); - if (!locked) - UNLOCK_INODE_MUTEX(inode); - return locked ? -EBUSY : -ENODATA; + locked = !mutex_trylock(&inode->i_mutex); + if (!locked) + mutex_unlock(&inode->i_mutex); + return locked ? -EBUSY : -ENODATA; } static void @@ -499,15 +499,15 @@ vvp_transient_page_completion(const struct lu_env *env, } static void vvp_transient_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) + struct cl_page_slice *slice) { - struct ccc_page *cp = cl2ccc_page(slice); - struct cl_page *clp = slice->cpl_page; - struct ccc_object *clobj = cl2ccc(clp->cp_obj); + struct ccc_page *cp = cl2ccc_page(slice); + struct cl_page *clp = slice->cpl_page; + struct ccc_object *clobj = cl2ccc(clp->cp_obj); - vvp_page_fini_common(cp); - LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode)); - clobj->cob_transient_pages--; + vvp_page_fini_common(cp); + LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex)); + clobj->cob_transient_pages--; } static const struct cl_page_operations vvp_transient_page_ops = { @@ -533,36 +533,30 @@ static const struct cl_page_operations vvp_transient_page_ops = { } }; -struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage) +int vvp_page_init(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, cfs_page_t *vmpage) { - struct ccc_page *cpg; - int result; - - CLOBINVRNT(env, obj, ccc_object_invariant(obj)); - - OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO); - if (cpg != NULL) { - cpg->cpg_page = vmpage; - page_cache_get(vmpage); - - CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage); - if (page->cp_type == CPT_CACHEABLE) { - SetPagePrivate(vmpage); - vmpage->private = (unsigned long)page; - cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_page_ops); - } else { - struct ccc_object *clobj = cl2ccc(obj); - - LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode)); - cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_transient_page_ops); - clobj->cob_transient_pages++; - } - result = 0; - } else - result = -ENOMEM; - return ERR_PTR(result); + struct ccc_page *cpg = cl_object_page_slice(obj, page); + + CLOBINVRNT(env, obj, ccc_object_invariant(obj)); + + cpg->cpg_page = vmpage; + page_cache_get(vmpage); + + CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage); + if (page->cp_type == CPT_CACHEABLE) { + SetPagePrivate(vmpage); + vmpage->private = (unsigned long)page; + cl_page_slice_add(page, &cpg->cpg_cl, obj, + &vvp_page_ops); + } else { + struct ccc_object *clobj = cl2ccc(obj); + + LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex)); + cl_page_slice_add(page, &cpg->cpg_cl, obj, + &vvp_transient_page_ops); + clobj->cob_transient_pages++; + } + return 0; }