X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fvvp_page.c;h=6d1c7703cc455bebd7ec2e5e52bda3be462c58b6;hb=3d255acab28a2e36a90460bee4fbf7a88fad815c;hp=d8ebd149c3599da922d414ca8b998207f80c99c4;hpb=be4372fddbada6d026f4188a7e88c6a11d0a83d4;p=fs%2Flustre-release.git diff --git a/lustre/llite/vvp_page.c b/lustre/llite/vvp_page.c index d8ebd14..6d1c770 100644 --- a/lustre/llite/vvp_page.c +++ b/lustre/llite/vvp_page.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2014, Intel Corporation. + * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -63,7 +59,7 @@ static void vvp_page_fini_common(struct vvp_page *vpg) struct page *vmpage = vpg->vpg_page; LASSERT(vmpage != NULL); - page_cache_release(vmpage); + put_page(vmpage); } static void vvp_page_fini(const struct lu_env *env, @@ -167,13 +163,10 @@ static void vvp_page_delete(const struct lu_env *env, LASSERT((struct cl_page *)vmpage->private == page); LASSERT(inode == vvp_object_inode(obj)); - vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice)); - /* Drop the reference count held in vvp_page_init */ refc = atomic_dec_return(&page->cp_ref); LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc); - ClearPageUptodate(vmpage); ClearPagePrivate(vmpage); vmpage->private = 0; /* @@ -221,7 +214,6 @@ static int vvp_page_prep_write(const struct lu_env *env, LASSERT(!PageDirty(vmpage)); set_page_writeback(vmpage); - vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice)); return 0; } @@ -298,19 +290,6 @@ static void vvp_page_completion_write(const struct lu_env *env, CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret); /* - * TODO: Actually it makes sense to add the page into oap pending - * list again and so that we don't need to take the page out from - * SoM write pending list, if we just meet a recoverable error, - * -ENOMEM, etc. - * To implement this, we just need to return a non zero value in - * ->cpo_completion method. The underlying transfer should be notified - * and then re-add the page into pending transfer queue. -jay - */ - - vpg->vpg_write_queued = 0; - vvp_write_complete(cl2vvp(slice->cpl_obj), vpg); - - /* * Only mark the page error only when it's an async write because * applications won't wait for IO to finish. */ @@ -348,8 +327,6 @@ static int vvp_page_make_ready(const struct lu_env *env, /* This actually clears the dirty bit in the radix * tree. */ set_page_writeback(vmpage); - vvp_write_pending(cl2vvp(slice->cpl_obj), - cl2vvp_page(slice)); CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); } else if (pg->cp_state == CPS_PAGEOUT) { /* is it possible for osc_flush_async_page() to already @@ -364,23 +341,6 @@ static int vvp_page_make_ready(const struct lu_env *env, RETURN(result); } -static int vvp_page_is_under_lock(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, pgoff_t *max_index) -{ - ENTRY; - - if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || - io->ci_type == CIT_FAULT) { - struct ccc_io *cio = ccc_env_io(env); - - if (unlikely(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) - *max_index = CL_PAGE_EOF; - } - RETURN(0); -} - - static int vvp_page_print(const struct lu_env *env, const struct cl_page_slice *slice, void *cookie, lu_printer_t printer) @@ -388,10 +348,9 @@ static int vvp_page_print(const struct lu_env *env, struct vvp_page *vpg = cl2vvp_page(slice); struct page *vmpage = vpg->vpg_page; - (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) " + (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d) " "vm@%p ", - vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, - vpg->vpg_write_queued, vmpage); + vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage); if (vmpage != NULL) { (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", @@ -428,7 +387,6 @@ static const struct cl_page_operations vvp_page_ops = { .cpo_is_vmlocked = vvp_page_is_vmlocked, .cpo_fini = vvp_page_fini, .cpo_print = vvp_page_print, - .cpo_is_under_lock = vvp_page_is_under_lock, .io = { [CRT_READ] = { .cpo_prep = vvp_page_prep_read, @@ -505,9 +463,9 @@ static int vvp_transient_page_is_vmlocked(const struct lu_env *env, struct inode *inode = vvp_object_inode(slice->cpl_obj); int locked; - locked = !mutex_trylock(&inode->i_mutex); + locked = !inode_trylock(inode); if (!locked) - mutex_unlock(&inode->i_mutex); + inode_unlock(inode); return locked ? -EBUSY : -ENODATA; } @@ -539,7 +497,6 @@ static const struct cl_page_operations vvp_transient_page_ops = { .cpo_fini = vvp_transient_page_fini, .cpo_is_vmlocked = vvp_transient_page_is_vmlocked, .cpo_print = vvp_page_print, - .cpo_is_under_lock = vvp_page_is_under_lock, .io = { [CRT_READ] = { .cpo_prep = vvp_transient_page_prep, @@ -561,9 +518,8 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj, CLOBINVRNT(env, obj, vvp_object_invariant(obj)); vpg->vpg_page = vmpage; - page_cache_get(vmpage); + get_page(vmpage); - INIT_LIST_HEAD(&vpg->vpg_pending_linkage); if (page->cp_type == CPT_CACHEABLE) { /* in cache, decref in vvp_page_delete */ atomic_inc(&page->cp_ref);