* are pinned in memory (and, hence, calling cl_page_put() is
* safe).
*
- * \see cl_page_completion()
+ * \see cl_page_complete()
*/
- void (*cpo_completion)(const struct lu_env *env,
- const struct cl_page_slice *slice,
+ void (*cpo_complete)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
int ioret);
} io[CRT_NR];
/**
*/
int cl_page_prep(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg, enum cl_req_type crt);
-void cl_dio_pages_completion(const struct lu_env *env, struct cl_dio_pages *pg,
- int count, int ioret);
-void cl_page_completion(const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt, int ioret);
+void cl_dio_pages_complete(const struct lu_env *env, struct cl_dio_pages *pg,
+ int count, int ioret);
+void cl_page_complete(const struct lu_env *env, struct cl_page *pg,
+ enum cl_req_type crt, int ioret);
int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
enum cl_req_type crt);
void cl_page_clip(const struct lu_env *env, struct cl_page *pg, int from,
}
}
-static void vvp_page_completion_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
+static void vvp_page_complete_read(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
{
struct cl_page *cp = slice->cpl_page;
struct page *vmpage = cp->cp_vmpage;
EXIT;
}
-static void vvp_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
+static void vvp_page_complete_write(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
{
struct cl_page *cp = slice->cpl_page;
struct page *vmpage = cp->cp_vmpage;
.cpo_discard = vvp_page_discard,
.io = {
[CRT_READ] = {
- .cpo_completion = vvp_page_completion_read,
+ .cpo_complete = vvp_page_complete_read,
},
[CRT_WRITE] = {
- .cpo_completion = vvp_page_completion_write,
+ .cpo_complete = vvp_page_complete_write,
},
},
};
* the return code of cl_page_prep() at all.
*/
LASSERT(page->cp_type == CPT_TRANSIENT);
- cl_page_completion(env, page, crt, 0);
+ cl_page_complete(env, page, crt, 0);
continue;
}
* the return code of cl_page_prep() at all.
*/
LASSERT(page->cp_type == CPT_TRANSIENT);
- cl_page_completion(env, page, crt, 0);
+ cl_page_complete(env, page, crt, 0);
continue;
}
}
EXPORT_SYMBOL(cl_page_prep);
-/* this is the equivalent of cl_page_completion for a dio pages struct, but is
+/* this is the equivalent of cl_page_complete for a dio pages struct, but is
* much simpler - in fact, it only needs to note the completion in the sync io
*/
-void cl_dio_pages_completion(const struct lu_env *env,
- struct cl_dio_pages *cdp, int count, int ioret)
+void cl_dio_pages_complete(const struct lu_env *env, struct cl_dio_pages *cdp,
+ int count, int ioret)
{
struct cl_sub_dio *sdio = container_of(cdp, struct cl_sub_dio,
csd_dio_pages);
EXIT;
}
-EXPORT_SYMBOL(cl_dio_pages_completion);
+EXPORT_SYMBOL(cl_dio_pages_complete);
/**
- * Notify layers about transfer completion.
+ * Notify layers about transfer complete.
*
* Invoked by transfer sub-system (which is a part of osc) to notify layers
* that a transfer, of which this page is a part of has completed.
* \pre cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
* \post cl_page->cl_page_state == CPS_CACHED
*
- * \see cl_page_operations::cpo_completion()
+ * \see cl_page_operations::cpo_complete()
*/
-void cl_page_completion(const struct lu_env *env,
- struct cl_page *cl_page, enum cl_req_type crt,
- int ioret)
+void cl_page_complete(const struct lu_env *env, struct cl_page *cl_page,
+ enum cl_req_type crt, int ioret)
{
const struct cl_page_slice *slice;
struct cl_sync_io *anchor = cl_page->cp_sync_io;
cl_page_state_set(env, cl_page, CPS_CACHED);
cl_page_slice_for_each_reverse(cl_page, slice, i) {
- if (slice->cpl_ops->io[crt].cpo_completion != NULL)
- (*slice->cpl_ops->io[crt].cpo_completion)(env,
- slice,
- ioret);
+ if (slice->cpl_ops->io[crt].cpo_complete != NULL)
+ (*slice->cpl_ops->io[crt].cpo_complete)(env,
+ slice,
+ ioret);
}
}
}
EXIT;
}
-EXPORT_SYMBOL(cl_page_completion);
+EXPORT_SYMBOL(cl_page_complete);
/**
* Notify layers that transfer formation engine decided to yank this page from
* reference counter protects page from concurrent reclaim.
*/
- /* for transient pages, the last reference is destroyed by the
- * cl_page_completion process, so do not referencce the page after this
+ /* for transient pages, the last reference can be destroyed by
+ * cl_page_complete, so do not reference the page after this
*/
- cl_page_completion(env, page, crt, rc);
+ cl_page_complete(env, page, crt, rc);
if (cptype != CPT_TRANSIENT)
cl_page_put(env, page);