Whamcloud - gitweb
LU-13814 clio: add cl_dio_pages_init 09/52109/20
authorPatrick Farrell <pfarrell@whamcloud.com>
Sun, 24 Sep 2023 20:53:37 +0000 (16:53 -0400)
committerPatrick Farrell <pfarrell@whamcloud.com>
Tue, 26 Sep 2023 18:58:27 +0000 (14:58 -0400)
Just like the cl_page it's replacing, the cl_dio_pages
struct needs various pieces of information from the
different layers of the cl_object in order to do the IO.

This means we need a cl_dio_pages init, analogous to
cl_page_alloc and coo_pages_init.

Note this does not implement coo_pages_init for any layers,
it just moves parts of the existing init code and lays out
the function structure.

Test-Parameters: fortestonly
Test-Parameters: forjanitoronly
Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I1fcf407b16d4077d94c7ba5afbc63bdd3fb3dfb4

lustre/include/cl_object.h
lustre/llite/rw26.c
lustre/obdclass/cl_page.c

index ba42527..0dc942c 100644 (file)
@@ -310,6 +310,8 @@ enum coo_inode_opc {
        COIO_LAYOUT_UNLOCK,
 };
 
+struct cl_dio_pages;
+
 /**
  * Operations implemented for each cl object layer.
  *
@@ -334,6 +336,17 @@ struct cl_object_operations {
        int  (*coo_page_init)(const struct lu_env *env, struct cl_io *io,
                              struct cl_object *obj, struct cl_page *page,
                              pgoff_t index);
+       /**
+        * Initialize the dio pages structure with information from this layer
+        *
+        * Called top-to-bottom through every object layer to gather the
+        * per-layer information required for the dio, does the same job as
+        * coo_page_init but just once for each dio page array
+        */
+       int  (*coo_dio_pages_init)(const struct lu_env *env,
+                                  struct cl_object *obj,
+                                  struct cl_dio_pages *cdp,
+                                  pgoff_t index);
         /**
          * Initialize lock slice for this layer. Called top-to-bottom through
          * every object layer when a new cl_lock is instantiated. Layer
@@ -1436,8 +1449,6 @@ static inline void cl_read_ahead_release(const struct lu_env *env,
 }
 
 
-struct cl_dio_pages;
-
 /**
  * Per-layer io operations.
  * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
@@ -2214,6 +2225,11 @@ static inline int cl_object_refc(struct cl_object *clob)
 
 /** @} cl_object */
 
+ssize_t cl_dio_pages_init(const struct lu_env *env, struct cl_object *obj,
+                         struct cl_dio_pages *cdp, struct iov_iter *iter,
+                         int rw, size_t maxsize, loff_t offset,
+                         bool unaligned);
+
 /** \defgroup cl_page cl_page
  * @{ */
 struct cl_page *cl_page_find        (const struct lu_env *env,
@@ -2586,6 +2602,7 @@ struct cl_dio_pages {
        size_t                  cdp_count;
        /* the file offset of the first page. */
        loff_t                  cdp_file_offset;
+       unsigned int            cdp_lov_index;
        /* the first and last page can be incomplete, this records the
         * offsets
         */
index 761c84f..c1a538c 100644 (file)
@@ -242,62 +242,6 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
 }
 #endif /* HAVE_AOPS_RELEASE_FOLIO */
 
-static ssize_t ll_get_user_pages(int rw, struct iov_iter *iter,
-                                struct cl_dio_pages *cdp,
-                                size_t maxsize)
-{
-#if defined(HAVE_DIO_ITER)
-       size_t start;
-       size_t result;
-
-       result = iov_iter_get_pages_alloc2(iter, &cdp->cdp_pages, maxsize,
-                                         &start);
-       if (result > 0)
-               cdp->cdp_count = DIV_ROUND_UP(result + start, PAGE_SIZE);
-
-       return result;
-#else
-       unsigned long addr;
-       size_t page_count;
-       size_t size;
-       long result;
-
-       if (!maxsize)
-               return 0;
-
-       if (!iter->nr_segs)
-               return 0;
-
-       addr = (unsigned long)iter->iov->iov_base + iter->iov_offset;
-       if (addr & ~PAGE_MASK)
-               return -EINVAL;
-
-       size = min_t(size_t, maxsize, iter->iov->iov_len);
-       page_count = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       OBD_ALLOC_PTR_ARRAY_LARGE(cdp->cdp_pages, page_count);
-       if (cdp->cdp_pages == NULL)
-               return -ENOMEM;
-
-       mmap_read_lock(current->mm);
-       result = get_user_pages(current, current->mm, addr, page_count,
-                               rw == READ, 0, cdp->cdp_pages, NULL);
-       mmap_read_unlock(current->mm);
-
-       if (unlikely(result != page_count)) {
-               ll_release_user_pages(cdp->cdp_pages, page_count);
-               cdp->cdp_pages = NULL;
-
-               if (result >= 0)
-                       return -EFAULT;
-
-               return result;
-       }
-       cdp->cdp_count = page_count;
-
-       return size;
-#endif
-}
-
 /* iov_iter_alignment() is introduced in 3.16 similar to HAVE_DIO_ITER */
 #if defined(HAVE_DIO_ITER)
 static unsigned long iov_iter_alignment_vfs(const struct iov_iter *i)
@@ -379,15 +323,6 @@ ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, size_t size,
        cdp->cdp_from = offset & ~PAGE_MASK;
        cdp->cdp_to = (offset + size) & ~PAGE_MASK;
 
-       /* this is a special temporary allocation which lets us track the
-        * cl_pages and convert them to a list
-        *
-        * this is used in 'pushing down' the conversion to a page queue
-        */
-       OBD_ALLOC_PTR_ARRAY_LARGE(cdp->cdp_cl_pages, cdp->cdp_count);
-       if (!cdp->cdp_cl_pages)
-               GOTO(out, rc = -ENOMEM);
-
        while (size > 0) {
                size_t from = offset & ~PAGE_MASK;
                size_t to = min(from + size, PAGE_SIZE);
@@ -586,24 +521,9 @@ ll_direct_IO_impl(struct kiocb *iocb, struct iov_iter *iter, int rw)
                        GOTO(out, result = -ENOMEM);
 
                cdp = &sdio->csd_dio_pages;
-               cdp->cdp_file_offset = file_offset;
-
-               if (!unaligned) {
-                       result = ll_get_user_pages(rw, iter, cdp, count);
-                       /* ll_get_user_pages returns bytes in the IO or error*/
-                       count = result;
-               } else {
-                       /* same calculation used in ll_get_user_pages */
-                       count = min_t(size_t, count, iter->iov->iov_len);
-                       result = ll_allocate_dio_buffer(cdp, count);
-                       /* allocate_dio_buffer returns number of pages or
-                        * error, so do not set count = result
-                        */
-               }
-
-               /* now we have the actual count, so store it in the sdio */
-               sdio->csd_bytes = count;
-
+               result = cl_dio_pages_init(env, ll_dio_aio->cda_obj, cdp,
+                                          iter, rw, count, file_offset,
+                                          unaligned);
                if (unlikely(result <= 0)) {
                        cl_sync_io_note(env, &sdio->csd_sync, result);
                        if (sync_submit) {
@@ -612,6 +532,9 @@ ll_direct_IO_impl(struct kiocb *iocb, struct iov_iter *iter, int rw)
                        }
                        GOTO(out, result);
                }
+               count = result;
+               /* now we have the actual count, so store it in the sdio */
+               sdio->csd_bytes = count;
 
                if (unaligned && rw == WRITE) {
                        result = ll_dio_user_copy(sdio, iter);
index 8b5ff00..b78a920 100644 (file)
@@ -145,6 +145,124 @@ cl_page_slice_get(const struct cl_page *cl_page, int index)
             slice = cl_page_slice_get(cl_page, i); i >= 0;     \
             slice = cl_page_slice_get(cl_page, --i))
 
+static ssize_t ll_get_user_pages(int rw, struct iov_iter *iter,
+                               struct cl_dio_pages *cdp,
+                               size_t maxsize)
+{
+#if defined(HAVE_DIO_ITER)
+       size_t start;
+       size_t result;
+
+       result = iov_iter_get_pages_alloc2(iter, &cdp->cdp_pages, maxsize,
+                                         &start);
+       if (result > 0)
+               cdp->cdp_count = DIV_ROUND_UP(result + start, PAGE_SIZE);
+
+       return result;
+#else
+       unsigned long addr;
+       size_t page_count;
+       size_t size;
+       long result;
+
+       if (!maxsize)
+               return 0;
+
+       if (!iter->nr_segs)
+               return 0;
+
+       addr = (unsigned long)iter->iov->iov_base + iter->iov_offset;
+       if (addr & ~PAGE_MASK)
+               return -EINVAL;
+
+       size = min_t(size_t, maxsize, iter->iov->iov_len);
+       page_count = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       OBD_ALLOC_PTR_ARRAY_LARGE(cdp->cdp_pages, page_count);
+       if (cdp->cdp_pages == NULL)
+               return -ENOMEM;
+
+       mmap_read_lock(current->mm);
+       result = get_user_pages(current, current->mm, addr, page_count,
+                               rw == READ, 0, cdp->cdp_pages, NULL);
+       mmap_read_unlock(current->mm);
+
+       if (unlikely(result != page_count)) {
+               ll_release_user_pages(cdp->cdp_pages, page_count);
+               cdp->cdp_pages = NULL;
+
+               if (result >= 0)
+                       return -EFAULT;
+
+               return result;
+       }
+       cdp->cdp_count = page_count;
+
+       return size;
+#endif
+}
+
+ssize_t cl_dio_pages_init(const struct lu_env *env, struct cl_object *obj,
+                         struct cl_dio_pages *cdp, struct iov_iter *iter,
+                         int rw, size_t maxsize, loff_t offset, bool unaligned)
+{
+       /* index of the first page in the set */
+       pgoff_t index = offset >> PAGE_SHIFT;
+       struct cl_object *head = obj;
+       ssize_t result = 0;
+       ssize_t count = 0;
+
+       ENTRY;
+
+       cdp->cdp_file_offset = offset;
+       cdp->cdp_from = 0;
+       cdp->cdp_to = PAGE_SIZE - 1;
+
+       /* these set cdp->count, which is used in coo_dio_pages_init */
+       if (!unaligned) {
+               result = ll_get_user_pages(rw, iter, cdp, maxsize);
+               /* ll_get_user_pages returns bytes in the IO or error*/
+               count = result;
+       } else {
+               /* same calculation used in ll_get_user_pages */
+               count = min_t(size_t, maxsize, iter->iov->iov_len);
+               result = ll_allocate_dio_buffer(cdp, count);
+               /* allocate_dio_buffer returns number of pages or
+                * error, not count, so clear result on success
+                */
+               if (result > 0)
+                       result = 0;
+       }
+       if (result < 0)
+               GOTO(out, result);
+
+       cl_object_for_each(obj, head) {
+               if (obj->co_ops->coo_dio_pages_init != NULL) {
+                       result = obj->co_ops->coo_dio_pages_init(env, obj,
+                                                                cdp, index);
+                       if (result != 0) {
+                               LASSERT(result < 0);
+                               GOTO(out, result);
+                       }
+               }
+       }
+
+       /* this is special temporary allocation which lets us track the
+        * cl_pages and convert them to a list
+        *
+        * this is used in 'pushing down' the conversion to a page queue
+        */
+       OBD_ALLOC_PTR_ARRAY_LARGE(cdp->cdp_cl_pages, cdp->cdp_count);
+       if (!cdp->cdp_cl_pages)
+               GOTO(out, result = -ENOMEM);
+
+out:
+       /* success */
+       if (result == 0)
+               result = count;
+       RETURN(result);
+}
+EXPORT_SYMBOL(cl_dio_pages_init);
+
 static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
 {
        int index = cl_page->cp_kmem_index;