OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
CFS_ALLOC_IO);
if (page != NULL) {
- int result;
+ int result = 0;
cfs_atomic_set(&page->cp_ref, 1);
if (type == CPT_CACHEABLE) /* for radix tree */
cfs_atomic_inc(&page->cp_ref);
* PG_writeback without risking other layers deciding to skip this
* page.
*/
+ if (crt >= CRT_NR)
+ return -EINVAL;
result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
if (result == 0)
cl_page_io_start(env, pg, crt);
}
cl_page_state_set(env, pg, CPS_CACHED);
+ if (crt >= CRT_NR)
+ return;
CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
(const struct lu_env *,
const struct cl_page_slice *, int), ioret);
LASSERT(cl_page_is_vmlocked(env, pg));
LASSERT(pg->cp_sync_io == anchor);
pg->cp_sync_io = NULL;
+ }
+ /*
+ * As page->cp_obj is pinned by a reference from page->cp_req, it is
+ * safe to call cl_page_put() without risking object destruction in a
+ * non-blocking context.
+ */
+ cl_page_put(env, pg);
+
+ if (anchor)
cl_sync_io_note(anchor, ioret);
- }
+
EXIT;
}
EXPORT_SYMBOL(cl_page_completion);
PINVRNT(env, pg, crt < CRT_NR);
ENTRY;
+ if (crt >= CRT_NR)
+ RETURN(-EINVAL);
result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
(const struct lu_env *,
const struct cl_page_slice *));
ENTRY;
+ if (crt >= CRT_NR)
+ RETURN(-EINVAL);
+
cfs_list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
continue;