COIO_SIZE_UNLOCK,
};
+struct cl_dio_pages;
+
/**
* Operations implemented for each cl object layer.
*
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index);
-
+ /**
+ * Initialize the dio pages structure with information from this layer
+ *
+ * Called top-to-bottom through every object layer to gather the
+ * per-layer information required for the dio, does the same job as
+ * coo_page_init but just once for each dio page array
+ */
+ int (*coo_dio_pages_init)(const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_dio_pages *cdp,
+ pgoff_t index);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
ra->cra_release(env, ra);
}
-struct cl_dio_pages;
-
/**
* Per-layer io operations.
* \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
struct cl_dio_pages *cdp, struct iov_iter *iter,
int rw, size_t bytes, loff_t offset, bool unaligned)
{
+ struct cl_object *head = obj;
+ pgoff_t index = offset >> PAGE_SHIFT;
ssize_t result = 0;
ENTRY;
if (!cdp->cdp_cl_pages)
GOTO(out, result = -ENOMEM);
+ cl_object_for_each(obj, head) {
+ if (obj->co_ops->coo_dio_pages_init != NULL) {
+ result = obj->co_ops->coo_dio_pages_init(env, obj,
+ cdp, index);
+ if (result != 0) {
+ LASSERT(result < 0);
+ GOTO(out, result);
+ }
+ }
+ }
+
out:
if (result >= 0)
result = bytes;