Whamcloud - gitweb
LU-15220 llite: Compat for set_pagevec_dirty 27/45927/6
authorPatrick Farrell <pfarrell@whamcloud.com>
Thu, 6 Jan 2022 18:20:59 +0000 (10:20 -0800)
committerOleg Drokin <green@whamcloud.com>
Thu, 20 Jan 2022 06:39:55 +0000 (06:39 +0000)
If we can't access account_page_dirtied either via export
or via kallsyms lookup, the benefit of vvp_set_pagevec_dirty
is mostly lost, since we have to take the xarray lock
repeatedly to handle accounting dirty pages.

Replace the more complicated compat code by just falling
back to __set_page_dirty_nobuffers in a loop, since this
has the same effect and is much simpler.

This also resolves 5.14 compatibility, as __set_page_dirty
is no longer exported there.

Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I3feb526b8eaaec3811c689a895875b409204a159
Reviewed-on: https://review.whamcloud.com/45927
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Yingjin Qian <qian@ddn.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/lustre_compat.h
lustre/llite/vvp_dev.c
lustre/llite/vvp_io.c

index ac80bde..9af8d4b 100644 (file)
@@ -502,14 +502,10 @@ static inline bool bdev_integrity_enabled(struct block_device *bdev, int rw)
 #define page_tree i_pages
 #define ll_xa_lock_irqsave(lockp, flags) xa_lock_irqsave(lockp, flags)
 #define ll_xa_unlock_irqrestore(lockp, flags) xa_unlock_irqrestore(lockp, flags)
-#define ll_xa_lock(lockp) xa_lock(lockp)
-#define ll_xa_unlock(lockp) xa_unlock(lockp)
 #else
 #define i_pages tree_lock
 #define ll_xa_lock_irqsave(lockp, flags) spin_lock_irqsave(lockp, flags)
 #define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
-#define ll_xa_lock(lockp) spin_lock(lockp)
-#define ll_xa_unlock(lockp) spin_unlock(lockp)
 #endif
 
 #ifndef HAVE_LOCK_PAGE_MEMCG
index 274c667..4d7995f 100644 (file)
@@ -265,23 +265,9 @@ struct lu_device_type vvp_device_type = {
         .ldt_ctx_tags = LCT_CL_THREAD
 };
 
-#ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
 unsigned int (*vvp_account_page_dirtied)(struct page *page,
                                         struct address_space *mapping);
 
-unsigned int ll_account_page_dirtied(struct page *page,
-                                    struct address_space *mapping)
-{
-       /* must use __set_page_dirty, which means unlocking and
-        * relocking, which hurts performance.
-        */
-       ll_xa_unlock(&mapping->i_pages);
-       __set_page_dirty(page, mapping, 0);
-       ll_xa_lock(&mapping->i_pages);
-       return 0;
-}
-#endif
-
 /**
  * A mutex serializing calls to vvp_inode_fini() under extreme memory
  * pressure, when environments cannot be allocated.
@@ -299,13 +285,13 @@ int vvp_global_init(void)
                goto out_kmem;
 
 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
+#ifdef HAVE_KALLSYMS_LOOKUP_NAME
        /*
         * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
         */
        vvp_account_page_dirtied = (void *)
                cfs_kallsyms_lookup_name("account_page_dirtied");
-       if (!vvp_account_page_dirtied)
-               vvp_account_page_dirtied = ll_account_page_dirtied;
+#endif
 #endif
 
        return 0;
index cff5cba..87ad197 100644 (file)
@@ -996,12 +996,14 @@ static inline void ll_account_page_dirtied(struct page *page,
 void vvp_set_pagevec_dirty(struct pagevec *pvec)
 {
        struct page *page = pvec->pages[0];
+       int count = pagevec_count(pvec);
+       int i;
+#ifdef HAVE_KALLSYMS_LOOKUP_NAME
        struct address_space *mapping = page->mapping;
        unsigned long flags;
        unsigned long skip_pages = 0;
-       int count = pagevec_count(pvec);
        int dirtied = 0;
-       int i;
+#endif
 
        ENTRY;
 
@@ -1010,6 +1012,15 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
                 "mapping must be set. page %p, page->private (cl_page) %p\n",
                 page, (void *) page->private);
 
+/* kernels without HAVE_KALLSYMS_LOOKUP_NAME also don't have account_dirty_page
+ * exported, and if we can't access that symbol, we can't do page dirtying in
+ * batch (taking the xarray lock only once) so we just fall back to a looped
+ * call to __set_page_dirty_nobuffers
+ */
+#ifndef HAVE_KALLSYMS_LOOKUP_NAME
+       for (i = 0; i < count; i++)
+               __set_page_dirty_nobuffers(pvec->pages[i]);
+#else
        for (i = 0; i < count; i++) {
                page = pvec->pages[i];
 
@@ -1059,7 +1070,7 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
                /* !PageAnon && !swapper_space */
                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
        }
-
+#endif
        EXIT;
 }