*/
struct {
/**
- * Called when a page is submitted for a transfer as a part of
- * cl_page_list.
- *
- * \return 0 : page is eligible for submission;
- * \return -EALREADY : skip this page;
- * \return -ve : error.
- *
- * \see cl_page_prep()
- */
- int (*cpo_prep)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /**
* Completion handler. This is guaranteed to be eventually
- * fired after cl_page_operations::cpo_prep() or
- * cl_page_operations::cpo_make_ready() call.
+ * fired after cl_page_prep() or cl_page_make_ready() call.
*
* This method can be called in a non-blocking context. It is
* guaranteed however, that the page involved and its object
void (*cpo_completion)(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret);
- /**
- * Called when cached page is about to be added to the
- * ptlrpc request as a part of req formation.
- *
- * \return 0 : proceed with this page;
- * \return -EAGAIN : skip this page;
- * \return -ve : error.
- *
- * \see cl_page_make_ready()
- */
- int (*cpo_make_ready)(const struct lu_env *env,
- const struct cl_page_slice *slice);
} io[CRT_NR];
/**
* Tell transfer engine that only [to, from] part of a page should be
ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
}
-static int vvp_page_prep_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- ENTRY;
- /* Skip the page already marked as PG_uptodate. */
- RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
-}
-
-static int vvp_page_prep_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct cl_page *pg = slice->cpl_page;
-
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageDirty(vmpage));
-
- /* ll_writepage path is not a sync write, so need to set page writeback
- * flag
- */
- if (pg->cp_sync_io == NULL)
- set_page_writeback(vmpage);
-
- return 0;
-}
-
/**
* Handles page transfer errors at VM level.
*
EXIT;
}
-/**
- * Implements cl_page_operations::cpo_make_ready() method.
- *
- * This is called to yank a page from the transfer cache and to send it out as
- * a part of transfer. This function try-locks the page. If try-lock failed,
- * page is owned by some concurrent IO, and should be skipped (this is bad,
- * but hopefully rare situation, as it usually results in transfer being
- * shorter than possible).
- *
- * \retval 0 success, page can be placed into transfer
- *
- * \retval -EAGAIN page is either used by concurrent IO has been
- * truncated. Skip it.
- */
-static int vvp_page_make_ready(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct cl_page *pg = slice->cpl_page;
- int result = 0;
-
- lock_page(vmpage);
- if (clear_page_dirty_for_io(vmpage)) {
- LASSERT(pg->cp_state == CPS_CACHED);
- /* This actually clears the dirty bit in the radix
- * tree.
- */
- set_page_writeback(vmpage);
- CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
- } else if (pg->cp_state == CPS_PAGEOUT) {
- /* is it possible for osc_flush_async_page() to already
- * make it ready?
- */
- result = -EALREADY;
- } else {
- CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
- pg->cp_state);
- LBUG();
- }
- unlock_page(vmpage);
- RETURN(result);
-}
-
-static int vvp_page_fail(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- /*
- * Cached read?
- */
- LBUG();
-
- return 0;
-}
-
static const struct cl_page_operations vvp_page_ops = {
.cpo_discard = vvp_page_discard,
.io = {
[CRT_READ] = {
- .cpo_prep = vvp_page_prep_read,
.cpo_completion = vvp_page_completion_read,
- .cpo_make_ready = vvp_page_fail,
},
[CRT_WRITE] = {
- .cpo_prep = vvp_page_prep_write,
.cpo_completion = vvp_page_completion_write,
- .cpo_make_ready = vvp_page_make_ready,
},
},
};
}
/**
- * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
- * called top-to-bottom. Every layer either agrees to submit this page (by
- * returning 0), or requests to omit this page (by returning -EALREADY). Layer
- * handling interactions with the VM also has to inform VM that page is under
- * transfer now.
+ * Prepares page for immediate transfer. Return -EALREADY if this page
+ * should be omitted from transfer.
*/
int cl_page_prep(const struct lu_env *env, struct cl_io *io,
- struct cl_page *cl_page, enum cl_req_type crt)
+ struct cl_page *cp, enum cl_req_type crt)
{
- const struct cl_page_slice *slice;
- int result = 0;
- int i;
+ struct page *vmpage = cp->cp_vmpage;
+ int rc;
- PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
- PINVRNT(env, cl_page, cl_page_invariant(cl_page));
- PINVRNT(env, cl_page, crt < CRT_NR);
+ PASSERT(env, cp, crt < CRT_NR);
+ PINVRNT(env, cp, cl_page_is_owned(cp, io));
+ PINVRNT(env, cp, cl_page_invariant(cp));
- /*
- * this has to be called bottom-to-top, so that llite can set up
- * PG_writeback without risking other layers deciding to skip this
- * page.
- */
- if (crt >= CRT_NR)
- return -EINVAL;
-
- if (cl_page->cp_type != CPT_TRANSIENT) {
- cl_page_slice_for_each(cl_page, slice, i) {
- if (slice->cpl_ops->io[crt].cpo_prep)
- result =
- (*slice->cpl_ops->io[crt].cpo_prep)(env,
- slice,
- io);
- if (result != 0)
- break;
- }
- }
+ if (cp->cp_type == CPT_TRANSIENT) {
+ /* Nothing to do. */
+ } else if (crt == CRT_READ) {
+ if (PageUptodate(vmpage))
+ GOTO(out, rc = -EALREADY);
+ } else {
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageDirty(vmpage));
- if (result >= 0) {
- result = 0;
- cl_page_io_start(env, cl_page, crt);
+ /* ll_writepage path is not a sync write, so need to
+ * set page writeback flag
+ */
+ if (cp->cp_sync_io == NULL)
+ set_page_writeback(vmpage);
}
- CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
- return result;
+ cl_page_io_start(env, cp, crt);
+ rc = 0;
+out:
+ CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
+
+ return rc;
}
EXPORT_SYMBOL(cl_page_prep);
*
* \pre cl_page->cp_state == CPS_CACHED
* \post cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
- *
- * \see cl_page_operations::cpo_make_ready()
*/
-int cl_page_make_ready(const struct lu_env *env, struct cl_page *cl_page,
- enum cl_req_type crt)
+int cl_page_make_ready(const struct lu_env *env, struct cl_page *cp,
+ enum cl_req_type crt)
{
- const struct cl_page_slice *slice;
- int result = 0;
- int i;
+ struct page *vmpage = cp->cp_vmpage;
+ int rc;
- ENTRY;
- PINVRNT(env, cl_page, crt < CRT_NR);
- if (crt >= CRT_NR)
- RETURN(-EINVAL);
+ ENTRY;
+ PASSERT(env, cp, crt == CRT_WRITE);
- cl_page_slice_for_each(cl_page, slice, i) {
- if (slice->cpl_ops->io[crt].cpo_make_ready != NULL)
- result = (*slice->cpl_ops->io[crt].cpo_make_ready)(env, slice);
- if (result != 0)
- break;
+ if (cp->cp_type == CPT_TRANSIENT)
+ GOTO(out, rc = 0);
+
+ lock_page(vmpage);
+
+ if (clear_page_dirty_for_io(vmpage)) {
+ LASSERT(cp->cp_state == CPS_CACHED);
+ /* This actually clears the dirty bit in the
+ * radix tree.
+ */
+ set_page_writeback(vmpage);
+ CL_PAGE_HEADER(D_PAGE, env, cp, "readied\n");
+ rc = 0;
+ } else if (cp->cp_state == CPS_PAGEOUT) {
+ /* is it possible for osc_flush_async_page()
+ * to already make it ready?
+ */
+ rc = -EALREADY;
+ } else {
+ CL_PAGE_DEBUG(D_ERROR, env, cp,
+ "unexpecting page state %d\n",
+ cp->cp_state);
+ LBUG();
}
- if (result >= 0) {
- result = 0;
- PASSERT(env, cl_page, cl_page->cp_state == CPS_CACHED);
- cl_page_io_start(env, cl_page, crt);
- }
- CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
+ unlock_page(vmpage);
+out:
+ if (rc == 0) {
+ PASSERT(env, cp, cp->cp_state == CPS_CACHED);
+ cl_page_io_start(env, cp, crt);
+ }
- RETURN(result);
+ CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
+
+ return rc;
}
EXPORT_SYMBOL(cl_page_make_ready);