* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
vvp_page_fini_common(cp);
}
-static void vvp_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
+static int vvp_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io,
+ int nonblock)
{
struct ccc_page *vpg = cl2ccc_page(slice);
cfs_page_t *vmpage = vpg->cpg_page;
- int count = 0;
LASSERT(vmpage != NULL);
+ if (nonblock) {
+ if (TestSetPageLocked(vmpage))
+ return -EAGAIN;
- /* DEBUG CODE FOR #18881 */
- while (TestSetPageLocked(vmpage)) {
- cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1)/10);
- if (++count > 600) {
- CL_PAGE_DEBUG(D_ERROR, env,
- cl_page_top(slice->cpl_page),
- "XXX page %p blocked on acquiring the"
- " lock. process %s/%p, flags %lx,io %p\n",
- vmpage, current->comm, current,
- vmpage->flags, io);
- libcfs_debug_dumpstack(NULL);
- if (slice->cpl_page->cp_task) {
- cfs_task_t *tsk = slice->cpl_page->cp_task;
- LCONSOLE_WARN("The page was owned by %s\n",
- tsk->comm);
- libcfs_debug_dumpstack(tsk);
- }
- LCONSOLE_WARN("Reproduced bug #18881,please contact:"
- "jay <jinshan.xiong@sun.com>, thanks\n");
-
- lock_page(vmpage);
- break;
+ if (unlikely(PageWriteback(vmpage))) {
+ unlock_page(vmpage);
+ return -EAGAIN;
}
+
+ return 0;
}
- /* DEBUG CODE END */
- /* lock_page(vmpage); */
+ lock_page(vmpage);
wait_on_page_writeback(vmpage);
+ return 0;
}
static void vvp_page_assume(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
+ struct cl_page *cp = slice->cpl_page;
+ cfs_page_t *vmpage = cl2vm_page(slice);
int result;
if (clear_page_dirty_for_io(vmpage)) {
set_page_writeback(vmpage);
vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
result = 0;
+
+ /* only turn on writeback for async write. */
+ if (cp->cp_sync_io == NULL)
+ unlock_page(vmpage);
} else
result = -EALREADY;
return result;
}
}
-static void vvp_page_completion_common(const struct lu_env *env,
- struct ccc_page *cp, int ioret)
-{
- struct cl_page *clp = cp->cpg_cl.cpl_page;
- cfs_page_t *vmpage = cp->cpg_page;
- struct inode *inode = ccc_object_inode(clp->cp_obj);
-
- LINVRNT(cl_page_is_vmlocked(env, clp));
-
- if (!clp->cp_sync_io && clp->cp_type == CPT_CACHEABLE) {
- /*
- * Only mark the page error only when it's a cacheable page
- * and NOT a sync io.
- *
- * For sync IO and direct IO(CPT_TRANSIENT), the error is able
- * to be seen by application, so we don't need to mark a page
- * as error at all.
- */
- vvp_vmpage_error(inode, vmpage, ioret);
- unlock_page(vmpage);
- }
-}
-
static void vvp_page_completion_read(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *page = cl_page_top(slice->cpl_page);
- struct inode *inode = ccc_object_inode(page->cp_obj);
+ struct ccc_page *cp = cl2ccc_page(slice);
+ cfs_page_t *vmpage = cp->cpg_page;
+ struct cl_page *page = cl_page_top(slice->cpl_page);
+ struct inode *inode = ccc_object_inode(page->cp_obj);
ENTRY;
+ LASSERT(PageLocked(vmpage));
CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
if (cp->cpg_defer_uptodate)
ll_ra_count_put(ll_i2sbi(inode), 1);
if (ioret == 0) {
- /* XXX: do we need this for transient pages? */
if (!cp->cpg_defer_uptodate)
cl_page_export(env, page, 1);
} else
cp->cpg_defer_uptodate = 0;
- vvp_page_completion_common(env, cp, ioret);
+
+ if (page->cp_sync_io == NULL)
+ unlock_page(vmpage);
EXIT;
}
-static void vvp_page_completion_write_common(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
+static void vvp_page_completion_write(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct cl_page *pg = slice->cpl_page;
+ cfs_page_t *vmpage = cp->cpg_page;
+ ENTRY;
+
+ LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
+ LASSERT(PageWriteback(vmpage));
+
+ CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
/*
* TODO: Actually it makes sense to add the page into oap pending
* ->cpo_completion method. The underlying transfer should be notified
* and then re-add the page into pending transfer queue. -jay
*/
+
cp->cpg_write_queued = 0;
vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
- vvp_page_completion_common(env, cp, ioret);
-}
-
-static void vvp_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- cfs_page_t *vmpage = cp->cpg_page;
-
- ENTRY;
-
- LINVRNT(cl_page_is_vmlocked(env, pg));
- LASSERT(PageWriteback(vmpage));
-
- CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
+ /*
+ * Only mark the page error only when it's an async write because
+ * applications won't wait for IO to finish.
+ */
+ if (pg->cp_sync_io == NULL)
+ vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
- vvp_page_completion_write_common(env, slice, ioret);
end_page_writeback(vmpage);
EXIT;
}
* Page was concurrently truncated.
*/
LASSERT(pg->cp_state == CPS_FREEING);
+ unlock_page(vmpage);
}
RETURN(result);
}
/* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
}
-static void vvp_transient_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
+static int vvp_transient_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused, int nonblock)
{
vvp_transient_page_verify(slice->cpl_page);
+ return 0;
}
static void vvp_transient_page_assume(const struct lu_env *env,
}
static void
-vvp_transient_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
+vvp_transient_page_completion(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
{
vvp_transient_page_verify(slice->cpl_page);
- vvp_page_completion_write_common(env, slice, ioret);
}
-
static void vvp_transient_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
.io = {
[CRT_READ] = {
.cpo_prep = ccc_transient_page_prep,
- .cpo_completion = vvp_page_completion_read,
+ .cpo_completion = vvp_transient_page_completion,
},
[CRT_WRITE] = {
.cpo_prep = ccc_transient_page_prep,
- .cpo_completion = vvp_transient_page_completion_write,
+ .cpo_completion = vvp_transient_page_completion,
}
}
};