Whamcloud - gitweb
LU-15220 llite: Compat for set_pagevec_dirty
authorPatrick Farrell <pfarrell@whamcloud.com>
Fri, 21 Jan 2022 02:20:29 +0000 (18:20 -0800)
committerAndreas Dilger <adilger@whamcloud.com>
Mon, 24 Jan 2022 05:39:49 +0000 (05:39 +0000)
If we can't access account_page_dirtied either via export
or via kallsyms lookup, the benefit of vvp_set_pagevec_dirty
is mostly lost, since we have to take the xarray lock
repeatedly to handle accounting dirty pages.

Replace the more complicated compat code by just falling
back to __set_page_dirty_nobuffers in a loop, since this
has the same effect and is much simpler.

This also resolves 5.14 compatibility, as __set_page_dirty
is no longer exported there.

Lustre-change: https://review.whamcloud.com/45927
Lustre-commit: 8fdef1381ea07657d6689818b2897d69cb7e9c83

Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I3feb526b8eaaec3811c689a895875b409204a159
Reviewed-by: Yingjin Qian <qian@ddn.com>
Reviewed-on: https://review.whamcloud.com/46249
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
lustre/include/lustre_compat.h
lustre/llite/vvp_dev.c
lustre/llite/vvp_io.c

index f25af9f..0127f92 100644 (file)
@@ -503,14 +503,10 @@ static inline bool bdev_integrity_enabled(struct block_device *bdev, int rw)
 #define page_tree i_pages
 #define ll_xa_lock_irqsave(lockp, flags) xa_lock_irqsave(lockp, flags)
 #define ll_xa_unlock_irqrestore(lockp, flags) xa_unlock_irqrestore(lockp, flags)
-#define ll_xa_lock(lockp) xa_lock(lockp)
-#define ll_xa_unlock(lockp) xa_unlock(lockp)
 #else
 #define i_pages tree_lock
 #define ll_xa_lock_irqsave(lockp, flags) spin_lock_irqsave(lockp, flags)
 #define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
-#define ll_xa_lock(lockp) spin_lock(lockp)
-#define ll_xa_unlock(lockp) spin_unlock(lockp)
 #endif
 
 #ifndef HAVE_LOCK_PAGE_MEMCG
index 31e8bcc..228dfe4 100644 (file)
@@ -266,23 +266,9 @@ struct lu_device_type vvp_device_type = {
         .ldt_ctx_tags = LCT_CL_THREAD
 };
 
-#ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
 unsigned int (*vvp_account_page_dirtied)(struct page *page,
                                         struct address_space *mapping);
 
-unsigned int ll_account_page_dirtied(struct page *page,
-                                    struct address_space *mapping)
-{
-       /* must use __set_page_dirty, which means unlocking and
-        * relocking, which hurts performance.
-        */
-       ll_xa_unlock(&mapping->i_pages);
-       __set_page_dirty(page, mapping, 0);
-       ll_xa_lock(&mapping->i_pages);
-       return 0;
-}
-#endif
-
 /**
  * A mutex serializing calls to vvp_inode_fini() under extreme memory
  * pressure, when environments cannot be allocated.
@@ -300,13 +286,13 @@ int vvp_global_init(void)
                goto out_kmem;
 
 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
+#ifdef HAVE_KALLSYMS_LOOKUP_NAME
        /*
         * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
         */
        vvp_account_page_dirtied = (void *)
                cfs_kallsyms_lookup_name("account_page_dirtied");
-       if (!vvp_account_page_dirtied)
-               vvp_account_page_dirtied = ll_account_page_dirtied;
+#endif
 #endif
 
        return 0;
index 78533cd..323b376 100644 (file)
@@ -1002,12 +1002,14 @@ static inline void ll_account_page_dirtied(struct page *page,
 void vvp_set_pagevec_dirty(struct pagevec *pvec)
 {
        struct page *page = pvec->pages[0];
+       int count = pagevec_count(pvec);
+       int i;
+#ifdef HAVE_KALLSYMS_LOOKUP_NAME
        struct address_space *mapping = page->mapping;
        unsigned long flags;
        unsigned long skip_pages = 0;
-       int count = pagevec_count(pvec);
        int dirtied = 0;
-       int i;
+#endif
 
        ENTRY;
 
@@ -1016,6 +1018,15 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
                 "mapping must be set. page %p, page->private (cl_page) %p\n",
                 page, (void *) page->private);
 
+/* kernels without HAVE_KALLSYMS_LOOKUP_NAME also don't have account_dirty_page
+ * exported, and if we can't access that symbol, we can't do page dirtying in
+ * batch (taking the xarray lock only once) so we just fall back to a looped
+ * call to __set_page_dirty_nobuffers
+ */
+#ifndef HAVE_KALLSYMS_LOOKUP_NAME
+       for (i = 0; i < count; i++)
+               __set_page_dirty_nobuffers(pvec->pages[i]);
+#else
        for (i = 0; i < count; i++) {
                page = pvec->pages[i];
 
@@ -1065,7 +1076,7 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
                /* !PageAnon && !swapper_space */
                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
        }
-
+#endif
        EXIT;
 }