* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
vvp_page_fini_common(cp);
}
-static void vvp_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *_)
+static int vvp_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io,
+ int nonblock)
{
struct ccc_page *vpg = cl2ccc_page(slice);
cfs_page_t *vmpage = vpg->cpg_page;
LASSERT(vmpage != NULL);
+ if (nonblock) {
+ if (TestSetPageLocked(vmpage))
+ return -EAGAIN;
+
+ if (unlikely(PageWriteback(vmpage))) {
+ unlock_page(vmpage);
+ return -EAGAIN;
+ }
+
+ return 0;
+ }
+
lock_page(vmpage);
wait_on_page_writeback(vmpage);
+ return 0;
}
static void vvp_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *_)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
static void vvp_page_unassume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
}
static void vvp_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *_)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
struct address_space *mapping = vmpage->mapping;
}
static int vvp_page_unmap(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *_)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
__u64 offset = vmpage->index << CFS_PAGE_SHIFT;
}
static void vvp_page_export(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice,
+ int uptodate)
{
cfs_page_t *vmpage = cl2vm_page(slice);
LASSERT(vmpage != NULL);
LASSERT(PageLocked(vmpage));
- SetPageUptodate(vmpage);
+ if (uptodate)
+ SetPageUptodate(vmpage);
+ else
+ ClearPageUptodate(vmpage);
}
static int vvp_page_is_vmlocked(const struct lu_env *env,
static int vvp_page_prep_read(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ENTRY;
/* Skip the page already marked as PG_uptodate. */
static int vvp_page_prep_write(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
int result;
if (clear_page_dirty_for_io(vmpage)) {
set_page_writeback(vmpage);
+ vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
result = 0;
} else
result = -EALREADY;
struct cl_page *clp = cp->cpg_cl.cpl_page;
cfs_page_t *vmpage = cp->cpg_page;
struct inode *inode = ccc_object_inode(clp->cp_obj);
- struct cl_sync_io *anchor = cp->cpg_sync_io;
LINVRNT(cl_page_is_vmlocked(env, clp));
- /* Don't assert the page writeback bit here because the lustre file
- * may be as a backend of swap space. in this case, the page writeback
- * is set by VM, and obvious we shouldn't clear it at all. Fortunately
- * this type of pages are all TRANSIENT pages. */
- KLASSERT(ergo(clp->cp_type == CPT_CACHEABLE, !PageWriteback(vmpage)));
-
- vvp_vmpage_error(inode, vmpage, ioret);
-
- if (anchor != NULL) {
- cp->cpg_sync_io = NULL;
- cl_sync_io_note(anchor, ioret);
- } else if (clp->cp_type == CPT_CACHEABLE)
+ if (!clp->cp_sync_io && clp->cp_type == CPT_CACHEABLE) {
+ /*
+ * Only mark the page error only when it's a cacheable page
+ * and NOT a sync io.
+ *
+ * For sync IO and direct IO(CPT_TRANSIENT), the error is able
+ * to be seen by application, so we don't need to mark a page
+ * as error at all.
+ */
+ vvp_vmpage_error(inode, vmpage, ioret);
unlock_page(vmpage);
+ }
}
static void vvp_page_completion_read(const struct lu_env *env,
if (ioret == 0) {
/* XXX: do we need this for transient pages? */
if (!cp->cpg_defer_uptodate)
- cl_page_export(env, page);
+ cl_page_export(env, page, 1);
} else
cp->cpg_defer_uptodate = 0;
vvp_page_completion_common(env, cp, ioret);
{
struct ccc_page *cp = cl2ccc_page(slice);
- if (ioret == 0) {
- cp->cpg_write_queued = 0;
- /*
- * Only ioret == 0, write succeed, then this page could be
- * deleted from the pending_writing count.
- */
- vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
- }
+ /*
+ * TODO: Actually it makes sense to add the page into oap pending
+ * list again and so that we don't need to take the page out from
+ * SoM write pending list, if we just meet a recoverable error,
+ * -ENOMEM, etc.
+ * To implement this, we just need to return a non zero value in
+ * ->cpo_completion method. The underlying transfer should be notified
+ * and then re-add the page into pending transfer queue. -jay
+ */
+ cp->cpg_write_queued = 0;
+ vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
+
vvp_page_completion_common(env, cp, ioret);
}
CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
- end_page_writeback(vmpage);
- LASSERT(!PageWriteback(vmpage));
-
vvp_page_completion_write_common(env, slice, ioret);
+ end_page_writeback(vmpage);
EXIT;
}
* tree.
*/
set_page_writeback(vmpage);
-
+ vvp_write_pending(cl2ccc(slice->cpl_obj),
+ cl2ccc_page(slice));
CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
result = 0;
} else
/* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
}
-static void vvp_transient_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *_)
+static int vvp_transient_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused, int nonblock)
{
vvp_transient_page_verify(slice->cpl_page);
+ return 0;
}
static void vvp_transient_page_assume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_unassume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_disown(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_discard(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
struct cl_page *page = slice->cpl_page;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- OBD_SLAB_ALLOC_PTR(cpg, vvp_page_kmem);
+ OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
if (cpg != NULL) {
cpg->cpg_page = vmpage;
page_cache_get(vmpage);