}
static void vvp_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *_)
+ const struct cl_page_slice *slice, struct cl_io *io)
{
struct ccc_page *vpg = cl2ccc_page(slice);
cfs_page_t *vmpage = vpg->cpg_page;
"XXX page %p blocked on acquiring the"
" lock. process %s/%p, flags %lx,io %p\n",
vmpage, current->comm, current,
- vmpage->flags, _);
+ vmpage->flags, io);
libcfs_debug_dumpstack(NULL);
+ if (slice->cpl_page->cp_task) {
+ cfs_task_t *tsk = slice->cpl_page->cp_task;
+ LCONSOLE_WARN("The page was owned by %s\n",
+ tsk->comm);
+ libcfs_debug_dumpstack(tsk);
+ }
LCONSOLE_WARN("Reproduced bug #18881,please contact:"
"jay <jinshan.xiong@sun.com>, thanks\n");
}
static void vvp_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *_)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
static void vvp_page_unassume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
}
static void vvp_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *_)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
struct address_space *mapping = vmpage->mapping;
}
static int vvp_page_unmap(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *_)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
__u64 offset = vmpage->index << CFS_PAGE_SHIFT;
}
static void vvp_page_export(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice,
+ int uptodate)
{
cfs_page_t *vmpage = cl2vm_page(slice);
LASSERT(vmpage != NULL);
LASSERT(PageLocked(vmpage));
- SetPageUptodate(vmpage);
+ if (uptodate)
+ SetPageUptodate(vmpage);
+ else
+ ClearPageUptodate(vmpage);
}
static int vvp_page_is_vmlocked(const struct lu_env *env,
static int vvp_page_prep_read(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ENTRY;
/* Skip the page already marked as PG_uptodate. */
static int vvp_page_prep_write(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
cfs_page_t *vmpage = cl2vm_page(slice);
int result;
struct cl_page *clp = cp->cpg_cl.cpl_page;
cfs_page_t *vmpage = cp->cpg_page;
struct inode *inode = ccc_object_inode(clp->cp_obj);
- struct cl_sync_io *anchor = cp->cpg_sync_io;
LINVRNT(cl_page_is_vmlocked(env, clp));
- if (anchor != NULL) {
- cp->cpg_sync_io = NULL;
- cl_sync_io_note(anchor, ioret);
- } else if (clp->cp_type == CPT_CACHEABLE) {
+ if (!clp->cp_sync_io && clp->cp_type == CPT_CACHEABLE) {
/*
* Only mark the page error only when it's a cacheable page
* and NOT a sync io.
if (ioret == 0) {
/* XXX: do we need this for transient pages? */
if (!cp->cpg_defer_uptodate)
- cl_page_export(env, page);
+ cl_page_export(env, page, 1);
} else
cp->cpg_defer_uptodate = 0;
vvp_page_completion_common(env, cp, ioret);
static void vvp_transient_page_own(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_assume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_unassume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_disown(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_discard(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
struct cl_page *page = slice->cpl_page;