COIO_LAYOUT_UNLOCK,
};
+struct cl_dio_pages;
+
/**
* Operations implemented for each cl object layer.
*
int (*coo_page_init)(const struct lu_env *env, struct cl_io *io,
struct cl_object *obj, struct cl_page *page,
pgoff_t index);
+ /**
+ * Initialize the dio pages structure with information from this layer
+ *
+ * Called top-to-bottom through every object layer to gather the
+ * per-layer information required for the dio, does the same job as
+ * coo_page_init but just once for each dio page array
+ */
+ int (*coo_dio_pages_init)(const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_dio_pages *cdp,
+ pgoff_t index);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
}
-struct cl_dio_pages;
-
/**
* Per-layer io operations.
* \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
/** @} cl_object */
+ssize_t cl_dio_pages_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_dio_pages *cdp, struct iov_iter *iter,
+ int rw, size_t maxsize, loff_t offset,
+ bool unaligned);
+
/** \defgroup cl_page cl_page
* @{ */
struct cl_page *cl_page_find (const struct lu_env *env,
size_t cdp_count;
/* the file offset of the first page. */
loff_t cdp_file_offset;
+ unsigned int cdp_lov_index;
/* the first and last page can be incomplete, this records the
* offsets
*/
}
#endif /* HAVE_AOPS_RELEASE_FOLIO */
-static ssize_t ll_get_user_pages(int rw, struct iov_iter *iter,
- struct cl_dio_pages *cdp,
- size_t maxsize)
-{
-#if defined(HAVE_DIO_ITER)
- size_t start;
- size_t result;
-
- result = iov_iter_get_pages_alloc2(iter, &cdp->cdp_pages, maxsize,
- &start);
- if (result > 0)
- cdp->cdp_count = DIV_ROUND_UP(result + start, PAGE_SIZE);
-
- return result;
-#else
- unsigned long addr;
- size_t page_count;
- size_t size;
- long result;
-
- if (!maxsize)
- return 0;
-
- if (!iter->nr_segs)
- return 0;
-
- addr = (unsigned long)iter->iov->iov_base + iter->iov_offset;
- if (addr & ~PAGE_MASK)
- return -EINVAL;
-
- size = min_t(size_t, maxsize, iter->iov->iov_len);
- page_count = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- OBD_ALLOC_PTR_ARRAY_LARGE(cdp->cdp_pages, page_count);
- if (cdp->cdp_pages == NULL)
- return -ENOMEM;
-
- mmap_read_lock(current->mm);
- result = get_user_pages(current, current->mm, addr, page_count,
- rw == READ, 0, cdp->cdp_pages, NULL);
- mmap_read_unlock(current->mm);
-
- if (unlikely(result != page_count)) {
- ll_release_user_pages(cdp->cdp_pages, page_count);
- cdp->cdp_pages = NULL;
-
- if (result >= 0)
- return -EFAULT;
-
- return result;
- }
- cdp->cdp_count = page_count;
-
- return size;
-#endif
-}
-
/* iov_iter_alignment() is introduced in 3.16 similar to HAVE_DIO_ITER */
#if defined(HAVE_DIO_ITER)
static unsigned long iov_iter_alignment_vfs(const struct iov_iter *i)
cdp->cdp_from = offset & ~PAGE_MASK;
cdp->cdp_to = (offset + size) & ~PAGE_MASK;
- /* this is a special temporary allocation which lets us track the
- * cl_pages and convert them to a list
- *
- * this is used in 'pushing down' the conversion to a page queue
- */
- OBD_ALLOC_PTR_ARRAY_LARGE(cdp->cdp_cl_pages, cdp->cdp_count);
- if (!cdp->cdp_cl_pages)
- GOTO(out, rc = -ENOMEM);
-
while (size > 0) {
size_t from = offset & ~PAGE_MASK;
size_t to = min(from + size, PAGE_SIZE);
GOTO(out, result = -ENOMEM);
cdp = &sdio->csd_dio_pages;
- cdp->cdp_file_offset = file_offset;
-
- if (!unaligned) {
- result = ll_get_user_pages(rw, iter, cdp, count);
- /* ll_get_user_pages returns bytes in the IO or error*/
- count = result;
- } else {
- /* same calculation used in ll_get_user_pages */
- count = min_t(size_t, count, iter->iov->iov_len);
- result = ll_allocate_dio_buffer(cdp, count);
- /* allocate_dio_buffer returns number of pages or
- * error, so do not set count = result
- */
- }
-
- /* now we have the actual count, so store it in the sdio */
- sdio->csd_bytes = count;
-
+ result = cl_dio_pages_init(env, ll_dio_aio->cda_obj, cdp,
+ iter, rw, count, file_offset,
+ unaligned);
if (unlikely(result <= 0)) {
cl_sync_io_note(env, &sdio->csd_sync, result);
if (sync_submit) {
}
GOTO(out, result);
}
+ count = result;
+ /* now we have the actual count, so store it in the sdio */
+ sdio->csd_bytes = count;
if (unaligned && rw == WRITE) {
result = ll_dio_user_copy(sdio, iter);
slice = cl_page_slice_get(cl_page, i); i >= 0; \
slice = cl_page_slice_get(cl_page, --i))
+static ssize_t ll_get_user_pages(int rw, struct iov_iter *iter,
+ struct cl_dio_pages *cdp,
+ size_t maxsize)
+{
+#if defined(HAVE_DIO_ITER)
+ size_t start;
+ size_t result;
+
+ result = iov_iter_get_pages_alloc2(iter, &cdp->cdp_pages, maxsize,
+ &start);
+ if (result > 0)
+ cdp->cdp_count = DIV_ROUND_UP(result + start, PAGE_SIZE);
+
+ return result;
+#else
+ unsigned long addr;
+ size_t page_count;
+ size_t size;
+ long result;
+
+ if (!maxsize)
+ return 0;
+
+ if (!iter->nr_segs)
+ return 0;
+
+ addr = (unsigned long)iter->iov->iov_base + iter->iov_offset;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+
+ size = min_t(size_t, maxsize, iter->iov->iov_len);
+ page_count = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ OBD_ALLOC_PTR_ARRAY_LARGE(cdp->cdp_pages, page_count);
+ if (cdp->cdp_pages == NULL)
+ return -ENOMEM;
+
+ mmap_read_lock(current->mm);
+ result = get_user_pages(current, current->mm, addr, page_count,
+ rw == READ, 0, cdp->cdp_pages, NULL);
+ mmap_read_unlock(current->mm);
+
+ if (unlikely(result != page_count)) {
+ ll_release_user_pages(cdp->cdp_pages, page_count);
+ cdp->cdp_pages = NULL;
+
+ if (result >= 0)
+ return -EFAULT;
+
+ return result;
+ }
+ cdp->cdp_count = page_count;
+
+ return size;
+#endif
+}
+
+ssize_t cl_dio_pages_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_dio_pages *cdp, struct iov_iter *iter,
+ int rw, size_t maxsize, loff_t offset, bool unaligned)
+{
+ /* index of the first page in the set */
+ pgoff_t index = offset >> PAGE_SHIFT;
+ struct cl_object *head = obj;
+ ssize_t result = 0;
+ ssize_t count = 0;
+
+ ENTRY;
+
+ cdp->cdp_file_offset = offset;
+ cdp->cdp_from = 0;
+ cdp->cdp_to = PAGE_SIZE - 1;
+
+ /* these set cdp->count, which is used in coo_dio_pages_init */
+ if (!unaligned) {
+ result = ll_get_user_pages(rw, iter, cdp, maxsize);
+ /* ll_get_user_pages returns bytes in the IO or error*/
+ count = result;
+ } else {
+ /* same calculation used in ll_get_user_pages */
+ count = min_t(size_t, maxsize, iter->iov->iov_len);
+ result = ll_allocate_dio_buffer(cdp, count);
+ /* allocate_dio_buffer returns number of pages or
+ * error, not count, so clear result on success
+ */
+ if (result > 0)
+ result = 0;
+ }
+ if (result < 0)
+ GOTO(out, result);
+
+ cl_object_for_each(obj, head) {
+ if (obj->co_ops->coo_dio_pages_init != NULL) {
+ result = obj->co_ops->coo_dio_pages_init(env, obj,
+ cdp, index);
+ if (result != 0) {
+ LASSERT(result < 0);
+ GOTO(out, result);
+ }
+ }
+ }
+
+ /* this is special temporary allocation which lets us track the
+ * cl_pages and convert them to a list
+ *
+ * this is used in 'pushing down' the conversion to a page queue
+ */
+ OBD_ALLOC_PTR_ARRAY_LARGE(cdp->cdp_cl_pages, cdp->cdp_count);
+ if (!cdp->cdp_cl_pages)
+ GOTO(out, result = -ENOMEM);
+
+out:
+ /* success */
+ if (result == 0)
+ result = count;
+ RETURN(result);
+}
+EXPORT_SYMBOL(cl_dio_pages_init);
+
static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
{
int index = cl_page->cp_kmem_index;