CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, ioret);
PASSERT(env, cl_page, crt < CRT_NR);
- if (cl_page->cp_type != CPT_TRANSIENT) {
- PASSERT(env, cl_page,
- cl_page->cp_state == cl_req_type_state(crt));
- cl_page_state_set(env, cl_page, CPS_CACHED);
-
- cl_page_slice_for_each_reverse(cl_page, slice, i) {
- if (slice->cpl_ops->io[crt].cpo_complete != NULL)
- (*slice->cpl_ops->io[crt].cpo_complete)(env,
- slice,
- ioret);
- }
+ LASSERT(cl_page->cp_type != CPT_TRANSIENT);
+
+ PASSERT(env, cl_page,
+ cl_page->cp_state == cl_req_type_state(crt));
+ cl_page_state_set(env, cl_page, CPS_CACHED);
+
+ cl_page_slice_for_each_reverse(cl_page, slice, i) {
+ if (slice->cpl_ops->io[crt].cpo_complete != NULL)
+ (*slice->cpl_ops->io[crt].cpo_complete)(env, slice,
+ ioret);
}
if (anchor != NULL) {
cl_page->cp_sync_io = NULL;
cl_sync_io_note(env, anchor, ioret);
}
+
EXIT;
}
EXPORT_SYMBOL(cl_page_complete);
struct osc_async_page *oap, enum cl_req_type crt,
int rc);
static void osc_dio_completion(const struct lu_env *env, struct osc_object *osc,
- struct osc_async_page *oap, enum cl_req_type crt,
- int brw_flags, int rc);
+ struct cl_dio_pages *cdp, enum cl_req_type crt,
+ int page_count, int brw_flags, int rc);
static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
int cmd);
static int osc_refresh_count(const struct lu_env *env, struct osc_object *osc,
osc_lru_add_batch(cli, &ext->oe_pages);
list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
- oap_pending_item) {
+ oap_pending_item) {
list_del_init(&oap->oap_rpc_item);
list_del_init(&oap->oap_pending_item);
if (last_off <= oap->oap_obj_off) {
--ext->oe_nr_pages;
if (ext->oe_odp.odp_cdp)
- osc_dio_completion(env, osc, oap, crt,
- ext->oe_odp.odp_brw_flags, rc);
+ continue;
else
osc_completion(env, osc, oap, crt, rc);
}
+
+ /* after this, the cl_dio_pages may be freed, so take care not to
+ * access it again
+ */
+ if (ext->oe_odp.odp_cdp)
+ osc_dio_completion(env, osc, ext->oe_odp.odp_cdp, crt,
+ nr_pages, ext->oe_odp.odp_brw_flags, rc);
EASSERT(ext->oe_nr_pages == 0, ext);
if (!sent) {
}
static void osc_dio_completion(const struct lu_env *env, struct osc_object *osc,
- struct osc_async_page *oap, enum cl_req_type crt,
- int brw_flags, int rc)
+ struct cl_dio_pages *cdp, enum cl_req_type crt,
+ int page_count, int brw_flags, int rc)
{
- struct cl_page *page = oap2cl_page(oap);
int srvlock;
ENTRY;
if (rc == 0 && srvlock) {
struct lu_device *ld = osc->oo_cl.co_lu.lo_dev;
struct osc_stats *stats = &lu2osc_dev(ld)->osc_stats;
- size_t bytes = oap->oap_count;
+ /* this is slightly sloppy - it assumes the DIO is size aligned
+ * and all pages are complete. This makes the stats slightly
+ * inaccurate but helps keep the code simple
+ */
if (crt == CRT_READ)
- stats->os_lockless_reads += bytes;
+ stats->os_lockless_reads += page_count * PAGE_SIZE;
else
- stats->os_lockless_writes += bytes;
+ stats->os_lockless_writes += page_count * PAGE_SIZE;
}
- /* for transient pages, the last reference can be destroyed by
- * cl_page_complete, so do not reference the page after this
- */
- cl_page_complete(env, page, crt, rc);
+ cl_dio_pages_complete(env, cdp, page_count, rc);
EXIT;
}
oap = &opg->ops_oap;
list_del_init(&oap->oap_pending_item);
- osc_dio_completion(env, obj, oap, crt, brw_flags,
- -ENOMEM);
}
+ osc_dio_completion(env, obj, cdp, crt,
+ to_page - from_page + 1, brw_flags,
+ -ENOMEM);
RETURN(-ENOMEM);
}