Whamcloud - gitweb
LU-17081 build: Prefer folio_batch to pagevec 99/59099/2
authorShaun Tancheff <shaun.tancheff@hpe.com>
Mon, 5 May 2025 18:22:00 +0000 (11:22 -0700)
committerOleg Drokin <green@whamcloud.com>
Thu, 15 May 2025 07:31:19 +0000 (07:31 +0000)
Linux commit v5.16-rc4-36-g10331795fb79
  pagevec: Add folio_batch

Linux commit v6.2-rc4-254-g811561288397
  mm: pagevec: add folio_batch_reinit()

Linux commit v6.4-rc4-438-g1e0877d58b1e
  mm: remove struct pagevec

Use folio_batch and provide wrappers for older kernels to use
pagevec handling, conditionally provide a folio_batch_reinit

Add macros to ease adding pages to folio_batch(es) as well
as unwinding batches of struct folio where struct page is
needed.

Lustre-change: https://review.whamcloud.com/52259
Lustre-commit: b82eab822c078b584fadefd419bfa74df0edebcb

Was-Change-Id: Ie70e4851df00a73f194aaa6631678b54b5d128a1

LU-17904 build: fix typo in vvp_set_batch_dirty

Fix typo vvp_set_batch_dirty() when kallsyms_lookup_name()
is exported and account_page_dirtied is not.

HPE-bug-id: LUS-12374
Fixes: b82eab822c0 ("LU-17081 build: Prefer folio_batch to pagevec")

Lustre-change: https://review.whamcloud.com/55301
Lustre-commit: a89458b3b2a08f78c4795816ca34716b110b8aac

Was-Change-Id: I8b2e6884e74e384aba6e563bef30072175cc0efc

LU-17903 build: enable fast path of vvp_set_batch_dirty

SUSE 15 SP6 6.4 kernel retains kallsyms_lookup_name so
the fast path of vvp_set_batch_dirty() can be enabled.

However the combination of kallsyms_lookup_name without
lock_page_memcg breaks some old assumptions

Prefer folio_memcg_lock to lock_page_memcg however

Linux commit v5.15-12272-g913ffbdd9985
  mm: unexport folio_memcg_{,un}lock

folio_memcg_lock is also not exported so use
kallsyms_lookup_name to acquire the symbol

HPE-bug-id: LUS-12371
Fixes: 61e83a6f130 ("LU-16113 build: Fix configure tests for lock_page_memcg")

Lustre-change: https://review.whamcloud.com/55300
Lustre-commit: ac6dba062928c3eba5f2ddd372a6225436b4e96a

Was-Change-Id: I8ac6b7bde8ee8964db5a801c2f3c4dfb2ef459f9

HPE-bug-id: LUS-11811
Signed-off-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Change-Id: Ie70e4851df00a73f194aaa6631678b54b5d128a1
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/59099
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Yang Sheng <ys@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
12 files changed:
lustre/autoconf/lustre-core.m4
lustre/include/cl_object.h
lustre/include/lustre_compat.h
lustre/include/lustre_osc.h
lustre/llite/vvp_dev.c
lustre/llite/vvp_internal.h
lustre/llite/vvp_io.c
lustre/obdclass/cl_page.c
lustre/osc/osc_cache.c
lustre/osc/osc_io.c
lustre/osc/osc_page.c
lustre/osd-ldiskfs/osd_io.c

index 3f21854..6e9d041 100644 (file)
@@ -1848,6 +1848,8 @@ AC_DEFUN([LC_D_IN_LOOKUP], [
 # Kernel version 4.6 adds lock_page_memcg(page)
 # Linux commit v5.15-12273-gab2f9d2d3626
 #   mm: unexport {,un}lock_page_memcg
+# and removed in v6.4-rc4-327-g6c77b607ee26
+#   mm: kill lock|unlock_page_memcg()
 #
 AC_DEFUN([LC_SRC_LOCK_PAGE_MEMCG], [
        LB2_LINUX_TEST_SRC([lock_page_memcg], [
@@ -3025,6 +3027,27 @@ AC_DEFUN([LC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG], [
 ]) # LC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG
 
 #
+# LC_FOLIO_MEMCG_LOCK
+#
+# kernel v5.15-rc3-45-gf70ad4487415
+#    mm/memcg: Add folio_memcg_lock() and folio_memcg_unlock()
+# Use folio_memcg_[un]lock when [un]lock_page_memcg is removed.
+#
+AC_DEFUN([LC_SRC_FOLIO_MEMCG_LOCK], [
+       LB2_LINUX_TEST_SRC([folio_memcg_lock], [
+               #include <linux/memcontrol.h>
+       ],[
+               folio_memcg_lock(NULL);
+       ],[-Werror])
+])
+AC_DEFUN([LC_FOLIO_MEMCG_LOCK], [
+       AC_MSG_CHECKING([if 'folio_memcg_lock' is defined])
+       LB2_LINUX_TEST_RESULT([folio_memcg_lock], [
+               AC_DEFINE(HAVE_FOLIO_MEMCG_LOCK, 1, [folio_memcg_lock is defined])
+       ])
+]) # LC_FOLIO_MEMCG_LOCK
+
+#
 # LC_HAVE_KIOCB_COMPLETE_2ARGS
 #
 # kernel v5.15-rc6-145-g6b19b766e8f0
@@ -3054,6 +3077,18 @@ AC_DEFUN([LC_HAVE_KIOCB_COMPLETE_2ARGS], [
 ]) # LC_HAVE_KIOCB_COMPLETE_2ARGS
 
 #
+# LC_FOLIO_MEMCG_LOCK_EXPORTED
+#
+# Linux commit v5.15-12272-g913ffbdd9985
+#   mm: unexport folio_memcg_{,un}lock
+#
+AC_DEFUN([LC_FOLIO_MEMCG_LOCK_EXPORTED], [
+LB_CHECK_EXPORT([folio_memcg_lock], [mm/memcontrol.c],
+       [AC_DEFINE(FOLIO_MEMCG_LOCK_EXPORTED, 1,
+                       [folio_memcg_{,un}lock are exported])])
+]) # LC_FOLIO_MEMCG_LOCK_EXPORTED
+
+#
 # LC_EXPORTS_DELETE_FROM_PAGE_CACHE
 #
 # Linux commit v5.16-rc4-44-g452e9e6992fe
@@ -3788,6 +3823,74 @@ AC_DEFUN([LC_HAVE_GET_USER_PAGES_WITHOUT_VMA], [
 ]) # LC_HAVE_GET_USER_PAGES_WITHOUT_VMA
 
 #
+# LC_HAVE_FOLIO_BATCH_REINIT
+#
+# linux kernel v6.2-rc4-254-g811561288397
+#   mm: pagevec: add folio_batch_reinit()
+#
+AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH_REINIT], [
+       LB2_LINUX_TEST_SRC([folio_batch_reinit_exists], [
+               #include <linux/pagevec.h>
+       ],[
+               struct folio_batch fbatch __attribute__ ((unused));
+
+               folio_batch_reinit(&fbatch);
+       ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_FOLIO_BATCH_REINIT], [
+       AC_MSG_CHECKING([if 'folio_batch_reinit' is available])
+       LB2_LINUX_TEST_RESULT([folio_batch_reinit_exists], [
+               AC_DEFINE(HAVE_FOLIO_BATCH_REINIT, 1,
+                       ['folio_batch_reinit' is available])
+       ])
+]) # LC_HAVE_FOLIO_BATCH_REINIT
+
+#
+# LC_HAVE_FOLIO_BATCH
+#
+# linux kernel v5.16-rc4-36-g10331795fb79
+#   pagevec: Add folio_batch
+#
+AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH], [
+       LB2_LINUX_TEST_SRC([struct_folio_batch_exists], [
+               #include <linux/pagevec.h>
+       ],[
+               struct folio_batch fbatch __attribute__ ((unused));
+
+               folio_batch_init(&fbatch);
+       ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_FOLIO_BATCH], [
+       AC_MSG_CHECKING([if 'struct folio_batch' is available])
+       LB2_LINUX_TEST_RESULT([struct_folio_batch_exists], [
+               AC_DEFINE(HAVE_FOLIO_BATCH, 1,
+                       ['struct folio_batch' is available])
+       ])
+]) # LC_HAVE_FOLIO_BATCH
+
+#
+# LC_HAVE_STRUCT_PAGEVEC
+#
+# linux kernel v6.4-rc4-438-g1e0877d58b1e
+#   mm: remove struct pagevec
+#
+AC_DEFUN([LC_SRC_HAVE_STRUCT_PAGEVEC], [
+       LB2_LINUX_TEST_SRC([struct_pagevec_exists], [
+               #include <linux/pagevec.h>
+       ],[
+               struct pagevec *pvec = NULL;
+               (void)pvec;
+       ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_STRUCT_PAGEVEC], [
+       AC_MSG_CHECKING([if 'struct pagevec' is available])
+       LB2_LINUX_TEST_RESULT([struct_pagevec_exists], [
+               AC_DEFINE(HAVE_PAGEVEC, 1,
+                       ['struct pagevec' is available])
+       ])
+]) # LC_HAVE_STRUCT_PAGEVEC
+
+#
 # LC_PROG_LINUX
 #
 # Lustre linux kernel checks
@@ -3987,6 +4090,7 @@ AC_DEFUN([LC_PROG_LINUX_SRC], [
 
        # 5.16
        LC_SRC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG
+       LC_SRC_FOLIO_MEMCG_LOCK
        LC_SRC_HAVE_KIOCB_COMPLETE_2ARGS
 
        # 5.17
@@ -4031,11 +4135,14 @@ AC_DEFUN([LC_PROG_LINUX_SRC], [
        LC_SRC_HAVE_MNT_IDMAP_ARG
        LC_SRC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK
        LC_SRC_HAVE_U64_CAPABILITY
+       LC_SRC_HAVE_FOLIO_BATCH_REINIT
 
        # 6.5
        LC_SRC_HAVE_FILEMAP_SPLICE_READ
        LC_SRC_HAVE_ENUM_ITER_PIPE
        LC_SRC_HAVE_GET_USER_PAGES_WITHOUT_VMA
+       LC_SRC_HAVE_FOLIO_BATCH
+       LC_SRC_HAVE_STRUCT_PAGEVEC
 
        # kernel patch to extend integrity interface
        LC_SRC_BIO_INTEGRITY_PREP_FN
@@ -4248,7 +4355,9 @@ AC_DEFUN([LC_PROG_LINUX_RESULTS], [
 
        # 5.16
        LC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG
+       LC_FOLIO_MEMCG_LOCK
        LC_HAVE_KIOCB_COMPLETE_2ARGS
+       LC_FOLIO_MEMCG_LOCK_EXPORTED
        LC_EXPORTS_DELETE_FROM_PAGE_CACHE
 
        # 5.17
@@ -4293,11 +4402,14 @@ AC_DEFUN([LC_PROG_LINUX_RESULTS], [
        LC_HAVE_MNT_IDMAP_ARG
        LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK
        LC_HAVE_U64_CAPABILITY
+       LC_HAVE_FOLIO_BATCH_REINIT
 
        # 6.5
        LC_HAVE_FILEMAP_SPLICE_READ
        LC_HAVE_ENUM_ITER_PIPE
        LC_HAVE_GET_USER_PAGES_WITHOUT_VMA
+       LC_HAVE_FOLIO_BATCH
+       LC_HAVE_STRUCT_PAGEVEC
 
        # kernel patch to extend integrity interface
        LC_BIO_INTEGRITY_PREP_FN
index c2c8864..1a9afc6 100644 (file)
 #include <linux/pagevec.h>
 #include <libcfs/linux/linux-misc.h>
 #include <lustre_dlm.h>
+#include <lustre_compat.h>
 
 struct obd_info;
 struct inode;
@@ -1388,7 +1389,7 @@ struct cl_io_slice {
 };
 
 typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
-                             struct pagevec *);
+                             struct folio_batch *);
 
 struct cl_read_ahead {
        /* Maximum page index the readahead window will end.
@@ -2169,9 +2170,8 @@ struct cl_page *cl_page_alloc       (const struct lu_env *env,
 void            cl_page_get         (struct cl_page *page);
 void            cl_page_put         (const struct lu_env *env,
                                      struct cl_page *page);
-void           cl_pagevec_put      (const struct lu_env *env,
-                                    struct cl_page *page,
-                                    struct pagevec *pvec);
+void cl_batch_put(const struct lu_env *env, struct cl_page *page,
+                 struct folio_batch *fbatch);
 void            cl_page_print       (const struct lu_env *env, void *cookie,
                                      lu_printer_t printer,
                                      const struct cl_page *pg);
index 891db55..d531acd 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/blkdev.h>
 #include <linux/slab.h>
 #include <linux/security.h>
+#include <linux/pagevec.h>
 #include <linux/workqueue.h>
 #include <libcfs/linux/linux-fs.h>
 #include <obd_support.h>
@@ -436,12 +437,6 @@ static inline struct timespec current_time(struct inode *inode)
 #define smp_store_mb(var, value)       set_mb(var, value)
 #endif
 
-#ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
-#define ll_pagevec_init(pvec, n) pagevec_init(pvec)
-#else
-#define ll_pagevec_init(pvec, n) pagevec_init(pvec, n)
-#endif
-
 #ifdef HAVE_D_COUNT
 #  define ll_d_count(d)                d_count(d)
 #else
@@ -462,20 +457,6 @@ static inline struct timespec current_time(struct inode *inode)
 #define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
 #endif
 
-/* Linux commit v5.15-12273-gab2f9d2d3626
- *   mm: unexport {,un}lock_page_memcg
- *
- * Note that the functions are still defined or declared breaking
- * the simple approach of just defining the missing functions here
- */
-#ifdef HAVE_LOCK_PAGE_MEMCG
-#define vvp_lock_page_memcg(page)      lock_page_memcg((page))
-#define vvp_unlock_page_memcg(page)    unlock_page_memcg((page))
-#else
-#define vvp_lock_page_memcg(page)
-#define vvp_unlock_page_memcg(page)
-#endif
-
 #ifndef KMEM_CACHE_USERCOPY
 #define kmem_cache_create_usercopy(name, size, align, flags, useroffset, \
                                   usersize, ctor)                       \
@@ -652,6 +633,43 @@ static inline struct page *ll_read_cache_page(struct address_space *mapping,
 #endif /* HAVE_READ_CACHE_PAGE_WANTS_FILE */
 }
 
+#ifdef HAVE_FOLIO_BATCH
+# define ll_folio_batch_init(batch, n) folio_batch_init(batch)
+# define fbatch_at(fbatch, f)          ((fbatch)->folios[(f)])
+# define fbatch_at_npgs(fbatch, f)     folio_nr_pages((fbatch)->folios[(f)])
+# define fbatch_at_pg(fbatch, f, pg)   folio_page((fbatch)->folios[(f)], (pg))
+# define folio_batch_add_page(fbatch, page) \
+        folio_batch_add(fbatch, page_folio(page))
+# ifndef HAVE_FOLIO_BATCH_REINIT
+static inline void folio_batch_reinit(struct folio_batch *fbatch)
+{
+       fbatch->nr = 0;
+}
+# endif /* HAVE_FOLIO_BATCH_REINIT */
+
+#else /* !HAVE_FOLIO_BATCH */
+
+# ifdef HAVE_PAGEVEC
+#  define folio_batch                  pagevec
+# endif
+# define folio_batch_init(pvec)                pagevec_init(pvec)
+# define folio_batch_reinit(pvec)      pagevec_reinit(pvec)
+# define folio_batch_count(pvec)       pagevec_count(pvec)
+# define folio_batch_space(pvec)       pagevec_space(pvec)
+# define folio_batch_add_page(pvec, page) \
+        pagevec_add(pvec, page)
+# define folio_batch_release(pvec) \
+        pagevec_release(((struct pagevec *)pvec))
+# ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
+#  define ll_folio_batch_init(pvec, n) pagevec_init(pvec)
+# else
+#  define ll_folio_batch_init(pvec, n) pagevec_init(pvec, n)
+# endif
+# define fbatch_at(pvec, n)            ((pvec)->pages[(n)])
+# define fbatch_at_npgs(pvec, n)       1
+# define fbatch_at_pg(pvec, n, pg)     ((pvec)->pages[(n)])
+#endif /* HAVE_FOLIO_BATCH */
+
 #ifndef HAVE_FLUSH___WORKQUEUE
 #define __flush_workqueue(wq)  flush_scheduled_work()
 #endif
index a15a6b6..a6a6ab1 100644 (file)
@@ -161,7 +161,7 @@ struct osc_thread_info {
        union ldlm_policy_data  oti_policy;
        struct cl_attr          oti_attr;
        struct cl_io            oti_io;
-       struct pagevec          oti_pagevec;
+       struct folio_batch      oti_fbatch;
        void                    *oti_pvec[OTI_PVEC_SIZE];
        /**
         * Fields used by cl_lock_discard_pages().
index 4036afc..cd537e9 100644 (file)
@@ -267,6 +267,11 @@ struct lu_device_type vvp_device_type = {
 
 unsigned int (*vvp_account_page_dirtied)(struct page *page,
                                         struct address_space *mapping);
+#if !defined(FOLIO_MEMCG_LOCK_EXPORTED) && defined(HAVE_FOLIO_MEMCG_LOCK) && \
+     defined(HAVE_KALLSYMS_LOOKUP_NAME)
+void (*vvp_folio_memcg_lock)(struct folio *folio);
+void (*vvp_folio_memcg_unlock)(struct folio *folio);
+#endif
 
 /**
  * A mutex serializing calls to vvp_inode_fini() under extreme memory
@@ -294,6 +299,17 @@ int vvp_global_init(void)
 #endif
 #endif
 
+#if !defined(FOLIO_MEMCG_LOCK_EXPORTED) && defined(HAVE_FOLIO_MEMCG_LOCK) && \
+     defined(HAVE_KALLSYMS_LOOKUP_NAME)
+       vvp_folio_memcg_lock = (void *)
+               cfs_kallsyms_lookup_name("folio_memcg_lock");
+       LASSERT(vvp_folio_memcg_lock);
+
+       vvp_folio_memcg_unlock = (void *)
+               cfs_kallsyms_lookup_name("folio_memcg_unlock");
+       LASSERT(vvp_folio_memcg_unlock);
+#endif
+
        return 0;
 
 out_kmem:
index 40648c4..0769a5b 100644 (file)
@@ -283,6 +283,27 @@ extern unsigned int (*vvp_account_page_dirtied)(struct page *page,
                                                struct address_space *mapping);
 #endif
 
+#ifdef HAVE_FOLIO_MEMCG_LOCK
+#ifdef FOLIO_MEMCG_LOCK_EXPORTED
+#define folio_memcg_lock_page(page)    folio_memcg_lock(page_folio((page)))
+#define folio_memcg_unlock_page(page)  folio_memcg_unlock(page_folio((page)))
+#elif defined(HAVE_KALLSYMS_LOOKUP_NAME)
+/* Use kallsyms_lookup_name to acquire folio_memcg_[un]lock */
+extern void (*vvp_folio_memcg_lock)(struct folio *folio);
+extern void (*vvp_folio_memcg_unlock)(struct folio *folio);
+#define folio_memcg_lock_page(page) \
+       vvp_folio_memcg_lock(page_folio((page)))
+#define folio_memcg_unlock_page(page) \
+       vvp_folio_memcg_unlock(page_folio((page)))
+#endif
+#elif defined HAVE_LOCK_PAGE_MEMCG
+#define folio_memcg_lock_page(page)    lock_page_memcg((page))
+#define folio_memcg_unlock_page(page)  unlock_page_memcg((page))
+#else
+#define folio_memcg_lock_page(page)
+#define folio_memcg_unlock_page(page)
+#endif
+
 extern const struct file_operations vvp_dump_pgcache_file_ops;
 
 #endif /* VVP_INTERNAL_H */
index 73cc2a3..2f3e745 100644 (file)
@@ -43,6 +43,7 @@
 
 #include "llite_internal.h"
 #include "vvp_internal.h"
+#include <lustre_compat.h>
 #include <libcfs/linux/linux-misc.h>
 
 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
@@ -1018,15 +1019,19 @@ static inline void ll_account_page_dirtied(struct page *page,
  * Backwards compat for 3.x, 5.x kernels relating to memcg handling
  * & rename of radix tree to xarray.
  */
-void vvp_set_pagevec_dirty(struct pagevec *pvec)
+void vvp_set_batch_dirty(struct folio_batch *fbatch)
 {
-       struct page *page = pvec->pages[0];
-       int count = pagevec_count(pvec);
+       struct page *page = fbatch_at_pg(fbatch, 0, 0);
+       int count = folio_batch_count(fbatch);
        int i;
+#if !defined(HAVE_FOLIO_BATCH) || defined(HAVE_KALLSYMS_LOOKUP_NAME)
+       int pg, npgs;
+#endif
 #ifdef HAVE_KALLSYMS_LOOKUP_NAME
        struct address_space *mapping = page->mapping;
        unsigned long flags;
        unsigned long skip_pages = 0;
+       int pgno;
        int dirtied = 0;
 #endif
 
@@ -1045,25 +1050,41 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
         */
 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
        if (!vvp_account_page_dirtied) {
-               for (i = 0; i < count; i++)
-                       __set_page_dirty_nobuffers(pvec->pages[i]);
+               for (i = 0; i < count; i++) {
+#ifdef HAVE_FOLIO_BATCH
+                       filemap_dirty_folio(page->mapping, fbatch->folios[i]);
+#else
+                       npgs = fbatch_at_npgs(fbatch, i);
+                       for (pg = 0; pg < npgs; pg++) {
+                               page = fbatch_at_pg(fbatch, i, pg);
+                               __set_page_dirty_nobuffers(page);
+                       }
+#endif
+               }
                EXIT;
        }
 #endif
 
+       /* account_page_dirtied is available directly or via kallsyms */
 #ifdef HAVE_KALLSYMS_LOOKUP_NAME
-       for (i = 0; i < count; i++) {
-               page = pvec->pages[i];
+       for (pgno = i = 0; i < count; i++) {
+               npgs = fbatch_at_npgs(fbatch, i);
+               for (pg = 0; pg < npgs; pg++) {
+                       page = fbatch_at_pg(fbatch, i, pg);
 
-               ClearPageReclaim(page);
+                       ClearPageReclaim(page);
 
-               vvp_lock_page_memcg(page);
-               if (TestSetPageDirty(page)) {
-                       /* page is already dirty .. no extra work needed
-                        * set a flag for the i'th page to be skipped
-                        */
-                       vvp_unlock_page_memcg(page);
-                       skip_pages |= (1 << i);
+                       folio_memcg_lock_page(page);
+                       if (TestSetPageDirty(page)) {
+                               /* page is already dirty .. no extra work needed
+                                * set a flag for the i'th page to be skipped
+                                */
+                               folio_memcg_unlock_page(page);
+                               skip_pages |= (1ul << pgno++);
+                               LASSERTF(pgno <= BITS_PER_LONG,
+                                        "Limit exceeded pgno: %d/%d\n", pgno,
+                                        BITS_PER_LONG);
+                       }
                }
        }
 
@@ -1078,19 +1099,22 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
         * dirty_nobuffers should be impossible because we hold the page lock.)
         * 4. All mappings are the same because i/o is only to one file.
         */
-       for (i = 0; i < count; i++) {
-               page = pvec->pages[i];
-               /* if the i'th page was unlocked above, skip it here */
-               if ((skip_pages >> i) & 1)
-                       continue;
-
-               LASSERTF(page->mapping == mapping,
-                        "all pages must have the same mapping.  page %p, mapping %p, first mapping %p\n",
-                        page, page->mapping, mapping);
-               WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
-               ll_account_page_dirtied(page, mapping);
-               dirtied++;
-               vvp_unlock_page_memcg(page);
+       for (pgno = i = 0; i < count; i++) {
+               npgs = fbatch_at_npgs(fbatch, i);
+               for (pg = 0; pg < npgs; pg++) {
+                       page = fbatch_at_pg(fbatch, i, pg);
+                       /* if the i'th page was unlocked above, skip it here */
+                       if ((skip_pages >> pgno++) & 1)
+                               continue;
+
+                       LASSERTF(page->mapping == mapping,
+                                "all pages must have the same mapping.  page %px, mapping %px, first mapping %px\n",
+                                page, page->mapping, mapping);
+                       WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+                       ll_account_page_dirtied(page, mapping);
+                       dirtied++;
+                       folio_memcg_unlock_page(page);
+               }
        }
        ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
 
@@ -1106,29 +1130,36 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
 }
 
 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
-                                 struct pagevec *pvec)
+                                 struct folio_batch *fbatch)
 {
+       struct page *vmpage;
+       struct cl_page *page;
+       int pg, npgs;
        int count = 0;
        int i = 0;
 
        ENTRY;
 
-       count = pagevec_count(pvec);
+       count = folio_batch_count(fbatch);
        LASSERT(count > 0);
 
        for (i = 0; i < count; i++) {
-               struct page *vmpage = pvec->pages[i];
-               SetPageUptodate(vmpage);
+               npgs = fbatch_at_npgs(fbatch, i);
+               for (pg = 0; pg < npgs; pg++)
+                       SetPageUptodate(fbatch_at_pg(fbatch, i, pg));
        }
 
-       vvp_set_pagevec_dirty(pvec);
+       vvp_set_batch_dirty(fbatch);
 
        for (i = 0; i < count; i++) {
-               struct page *vmpage = pvec->pages[i];
-               struct cl_page *page = (struct cl_page *) vmpage->private;
-               cl_page_disown(env, io, page);
-               lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
-               cl_page_put(env, page);
+               npgs = fbatch_at_npgs(fbatch, i);
+               for (pg = 0; pg < npgs; pg++) {
+                       vmpage = fbatch_at_pg(fbatch, i, pg);
+                       page = (struct cl_page *) vmpage->private;
+                       cl_page_disown(env, io, page);
+                       lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
+                       cl_page_put(env, page);
+               }
        }
 
        EXIT;
@@ -1445,9 +1476,9 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 }
 
 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
-                                   struct pagevec *pvec)
+                                   struct folio_batch *fbatch)
 {
-       vvp_set_pagevec_dirty(pvec);
+       vvp_set_batch_dirty(fbatch);
 }
 
 static int vvp_io_fault_start(const struct lu_env *env,
index 45b2cfe..aa1f2fe 100644 (file)
@@ -159,7 +159,7 @@ static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
 }
 
 static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
-                        struct pagevec *pvec)
+                        struct folio_batch *fbatch)
 {
        struct cl_object *obj  = cp->cp_obj;
        unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
@@ -177,9 +177,9 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
                LASSERT(vmpage != NULL);
                LASSERT((struct cl_page *)vmpage->private != cp);
 
-               if (pvec != NULL) {
-                       if (!pagevec_add(pvec, vmpage))
-                               pagevec_release(pvec);
+               if (fbatch != NULL) {
+                       if (!folio_batch_add_page(fbatch, vmpage))
+                               folio_batch_release(fbatch);
                } else {
                        put_page(vmpage);
                }
@@ -460,13 +460,13 @@ void cl_page_get(struct cl_page *page)
 EXPORT_SYMBOL(cl_page_get);
 
 /**
- * Releases a reference to a page, use the pagevec to release the pages
+ * Releases a reference to a page, use the folio_batch to release the pages
  * in batch if provided.
  *
- * Users need to do a final pagevec_release() to release any trailing pages.
+ * Users need to do a final folio_batch_release() to release any trailing pages.
  */
-void cl_pagevec_put(const struct lu_env *env, struct cl_page *page,
-                 struct pagevec *pvec)
+void cl_batch_put(const struct lu_env *env, struct cl_page *page,
+                 struct folio_batch *fbatch)
 {
         ENTRY;
         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
@@ -482,15 +482,15 @@ void cl_pagevec_put(const struct lu_env *env, struct cl_page *page,
                 * Page is no longer reachable by other threads. Tear
                 * it down.
                 */
-               cl_page_free(env, page, pvec);
+               cl_page_free(env, page, fbatch);
        }
 
        EXIT;
 }
-EXPORT_SYMBOL(cl_pagevec_put);
+EXPORT_SYMBOL(cl_batch_put);
 
 /**
- * Releases a reference to a page, wrapper to cl_pagevec_put
+ * Releases a reference to a page, wrapper to cl_batch_put
  *
  * When last reference is released, page is returned to the cache, unless it
  * is in cl_page_state::CPS_FREEING state, in which case it is immediately
@@ -500,7 +500,7 @@ EXPORT_SYMBOL(cl_pagevec_put);
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
-       cl_pagevec_put(env, page, NULL);
+       cl_batch_put(env, page, NULL);
 }
 EXPORT_SYMBOL(cl_page_put);
 
index 030edfe..f923a5c 100644 (file)
@@ -946,7 +946,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
        struct client_obd     *cli = osc_cli(obj);
        struct osc_async_page *oap;
        struct osc_async_page *tmp;
-       struct pagevec        *pvec;
+       struct folio_batch    *fbatch;
        int                    pages_in_chunk = 0;
        int                    ppc_bits    = cli->cl_chunkbits -
                                             PAGE_SHIFT;
@@ -971,8 +971,8 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
        io  = osc_env_thread_io(env);
        io->ci_obj = cl_object_top(osc2cl(obj));
        io->ci_ignore_layout = 1;
-       pvec = &osc_env_info(env)->oti_pagevec;
-       ll_pagevec_init(pvec, 0);
+       fbatch = &osc_env_info(env)->oti_fbatch;
+       ll_folio_batch_init(fbatch, 0);
        rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
        if (rc < 0)
                GOTO(out, rc);
@@ -1010,12 +1010,12 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
                }
 
                lu_ref_del(&page->cp_reference, "truncate", current);
-               cl_pagevec_put(env, page, pvec);
+               cl_batch_put(env, page, fbatch);
 
                --ext->oe_nr_pages;
                ++nr_pages;
        }
-       pagevec_release(pvec);
+       folio_batch_release(fbatch);
 
        EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
                      ext->oe_nr_pages == 0),
@@ -2282,7 +2282,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
        struct osc_async_page *oap = &ops->ops_oap;
        struct osc_object     *osc = oap->oap_obj;
        struct client_obd     *cli = osc_cli(osc);
-       struct pagevec        *pvec = &osc_env_info(env)->oti_pagevec;
+       struct folio_batch    *fbatch = &osc_env_info(env)->oti_fbatch;
        pgoff_t index;
        unsigned int tmp;
        unsigned int grants = 0;
@@ -2413,10 +2413,11 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
 
                /* We must not hold a page lock while we do osc_enter_cache()
                 * or osc_extent_find(), so we must mark dirty & unlock
-                * any pages in the write commit pagevec. */
-               if (pagevec_count(pvec)) {
-                       cb(env, io, pvec);
-                       pagevec_reinit(pvec);
+                * any pages in the write commit folio_batch.
+                */
+               if (folio_batch_count(fbatch)) {
+                       cb(env, io, fbatch);
+                       folio_batch_reinit(fbatch);
                }
 
                if (grants == 0) {
@@ -3061,7 +3062,7 @@ bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
                          osc_page_gang_cbt cb, void *cbdata)
 {
        struct osc_page *ops;
-       struct pagevec  *pagevec;
+       struct folio_batch *fbatch;
        void            **pvec;
        pgoff_t         idx;
        unsigned int    nr;
@@ -3073,8 +3074,8 @@ bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
 
        idx = start;
        pvec = osc_env_info(env)->oti_pvec;
-       pagevec = &osc_env_info(env)->oti_pagevec;
-       ll_pagevec_init(pagevec, 0);
+       fbatch = &osc_env_info(env)->oti_fbatch;
+       ll_folio_batch_init(fbatch, 0);
        spin_lock(&osc->oo_tree_lock);
        while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
                                            idx, OTI_PVEC_SIZE)) > 0) {
@@ -3120,9 +3121,9 @@ bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
                        ops = pvec[i];
                        page = ops->ops_cl.cpl_page;
                        lu_ref_del(&page->cp_reference, "gang_lookup", current);
-                       cl_pagevec_put(env, page, pagevec);
+                       cl_batch_put(env, page, fbatch);
                }
-               pagevec_release(pagevec);
+               folio_batch_release(fbatch);
 
                if (nr < OTI_PVEC_SIZE || end_of_region)
                        break;
index cd2a97f..0d20fd0 100644 (file)
@@ -300,13 +300,13 @@ int osc_io_commit_async(const struct lu_env *env,
                        struct cl_page_list *qin, int from, int to,
                        cl_commit_cbt cb)
 {
-       struct cl_io    *io = ios->cis_io;
-       struct osc_io   *oio = cl2osc_io(env, ios);
+       struct cl_io *io = ios->cis_io;
+       struct osc_io *oio = cl2osc_io(env, ios);
        struct osc_object *osc = cl2osc(ios->cis_obj);
-       struct cl_page  *page;
-       struct cl_page  *last_page;
+       struct cl_page *page;
+       struct cl_page *last_page;
        struct osc_page *opg;
-       struct pagevec  *pvec = &osc_env_info(env)->oti_pagevec;
+       struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
        int result = 0;
        ENTRY;
 
@@ -326,7 +326,7 @@ int osc_io_commit_async(const struct lu_env *env,
                }
        }
 
-       ll_pagevec_init(pvec, 0);
+       ll_folio_batch_init(fbatch, 0);
 
        while (qin->pl_nr > 0) {
                struct osc_async_page *oap;
@@ -358,9 +358,9 @@ int osc_io_commit_async(const struct lu_env *env,
                cl_page_list_del(env, qin, page);
 
                /* if there are no more slots, do the callback & reinit */
-               if (pagevec_add(pvec, page->cp_vmpage) == 0) {
-                       (*cb)(env, io, pvec);
-                       pagevec_reinit(pvec);
+               if (!folio_batch_add_page(fbatch, page->cp_vmpage)) {
+                       (*cb)(env, io, fbatch);
+                       folio_batch_reinit(fbatch);
                }
        }
        /* The shrink interval is in seconds, so we can update it once per
@@ -369,9 +369,9 @@ int osc_io_commit_async(const struct lu_env *env,
        osc_update_next_shrink(osc_cli(osc));
 
 
-       /* Clean up any partially full pagevecs */
-       if (pagevec_count(pvec) != 0)
-               (*cb)(env, io, pvec);
+       /* Clean up any partially full folio_batches */
+       if (folio_batch_count(fbatch) != 0)
+               (*cb)(env, io, fbatch);
 
        /* Can't access these pages any more. Page can be in transfer and
         * complete at any time. */
index 79c92ba..0bdf844 100644 (file)
@@ -503,13 +503,13 @@ static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
        }
 }
 
-static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
-                               struct cl_page **pvec, int max_index)
+static void discard_cl_pages(const struct lu_env *env, struct cl_io *io,
+                            struct cl_page **pvec, int max_index)
 {
-       struct pagevec *pagevec = &osc_env_info(env)->oti_pagevec;
+       struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
        int i;
 
-       ll_pagevec_init(pagevec, 0);
+       ll_folio_batch_init(fbatch, 0);
        for (i = 0; i < max_index; i++) {
                struct cl_page *page = pvec[i];
 
@@ -517,11 +517,11 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
                cl_page_delete(env, page);
                cl_page_discard(env, io, page);
                cl_page_disown(env, io, page);
-               cl_pagevec_put(env, page, pagevec);
+               cl_batch_put(env, page, fbatch);
 
                pvec[i] = NULL;
        }
-       pagevec_release(pagevec);
+       folio_batch_release(fbatch);
 }
 
 /**
@@ -613,7 +613,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                        spin_unlock(&cli->cl_lru_list_lock);
 
                        if (clobj != NULL) {
-                               discard_pagevec(env, io, pvec, index);
+                               discard_cl_pages(env, io, pvec, index);
                                index = 0;
 
                                cl_io_fini(env, io);
@@ -658,7 +658,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                pvec[index++] = page;
                if (unlikely(index == OTI_PVEC_SIZE)) {
                        spin_unlock(&cli->cl_lru_list_lock);
-                       discard_pagevec(env, io, pvec, index);
+                       discard_cl_pages(env, io, pvec, index);
                        index = 0;
 
                        spin_lock(&cli->cl_lru_list_lock);
@@ -670,7 +670,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
        spin_unlock(&cli->cl_lru_list_lock);
 
        if (clobj != NULL) {
-               discard_pagevec(env, io, pvec, index);
+               discard_cl_pages(env, io, pvec, index);
 
                cl_io_fini(env, io);
                cl_object_put(env, clobj);
index f74862c..9c33141 100644 (file)
@@ -807,10 +807,10 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
                        struct niobuf_local *lnb, int npages)
 {
        struct osd_thread_info *oti = osd_oti_get(env);
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        int i;
 
-       ll_pagevec_init(&pvec, 0);
+       ll_folio_batch_init(&fbatch, 0);
 
        for (i = 0; i < npages; i++) {
                struct page *page = lnb[i].lnb_page;
@@ -826,8 +826,8 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
                } else {
                        if (lnb[i].lnb_locked)
                                unlock_page(page);
-                       if (pagevec_add(&pvec, page) == 0)
-                               pagevec_release(&pvec);
+                       if (folio_batch_add_page(&fbatch, page) == 0)
+                               folio_batch_release(&fbatch);
                }
 
                lnb[i].lnb_page = NULL;
@@ -835,8 +835,8 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
 
        LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
 
-       /* Release any partial pagevec */
-       pagevec_release(&pvec);
+       /* Release any partial folio_batch */
+       folio_batch_release(&fbatch);
 
        RETURN(0);
 }