PASSERT(env, page, list_empty(&page->cp_batch));
PASSERT(env, page, page->cp_owner == NULL);
- PASSERT(env, page, page->cp_req == NULL);
PASSERT(env, page, page->cp_state == CPS_FREEING);
ENTRY;
page->cp_type = type;
INIT_LIST_HEAD(&page->cp_layers);
INIT_LIST_HEAD(&page->cp_batch);
- INIT_LIST_HEAD(&page->cp_flight);
lu_ref_init(&page->cp_reference);
head = o->co_lu.lo_header;
list_for_each_entry(o, &head->loh_layers,
io, nonblock);
if (result == 0) {
PASSERT(env, pg, pg->cp_owner == NULL);
- PASSERT(env, pg, pg->cp_req == NULL);
pg->cp_owner = cl_io_top(io);;
cl_page_owner_set(pg);
if (pg->cp_state != CPS_FREEING) {
struct cl_sync_io *anchor = pg->cp_sync_io;
PASSERT(env, pg, crt < CRT_NR);
- /* cl_page::cp_req already cleared by the caller (osc_completion()) */
- PASSERT(env, pg, pg->cp_req == NULL);
PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
ENTRY;
CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
(const struct lu_env *,
const struct cl_page_slice *, int), ioret);
- if (anchor) {
- LASSERT(pg->cp_sync_io == anchor);
- pg->cp_sync_io = NULL;
- }
- /*
- * As page->cp_obj is pinned by a reference from page->cp_req, it is
- * safe to call cl_page_put() without risking object destruction in a
- * non-blocking context.
- */
- cl_page_put(env, pg);
-
- if (anchor != NULL)
+ if (anchor != NULL) {
+ LASSERT(pg->cp_sync_io == anchor);
+ pg->cp_sync_io = NULL;
cl_sync_io_note(env, anchor, ioret);
-
+ }
EXIT;
}
EXPORT_SYMBOL(cl_page_completion);
lu_printer_t printer, const struct cl_page *pg)
{
(*printer)(env, cookie,
- "page@%p[%d %p %d %d %d %p %p]\n",
+ "page@%p[%d %p %d %d %d %p]\n",
pg, atomic_read(&pg->cp_ref), pg->cp_obj,
pg->cp_state, pg->cp_error, pg->cp_type,
- pg->cp_owner, pg->cp_req);
+ pg->cp_owner);
}
EXPORT_SYMBOL(cl_page_header_print);