Whamcloud - gitweb
LU-13783 libcfs: support absence of account_page_dirtied 27/40827/7
authorMr NeilBrown <neilb@suse.de>
Thu, 29 Apr 2021 13:04:04 +0000 (09:04 -0400)
committerOleg Drokin <green@whamcloud.com>
Tue, 11 May 2021 22:54:13 +0000 (22:54 +0000)
Some kernels export neither account_page_dirtied nor
kallsyms_lookup_name.
For these kernels we need to use __set_page_dirty() and suffer the
cost of dropping an reclaiming the page-tree lock.

Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: I69d934480832f3909d3ec103f11e1d62489d70d7
Reviewed-on: https://review.whamcloud.com/40827
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Jian Yu <yujian@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/lustre_compat.h
lustre/llite/vvp_dev.c
lustre/llite/vvp_io.c

index cceefee..b5a704b 100644 (file)
@@ -501,10 +501,14 @@ static inline bool bdev_integrity_enabled(struct block_device *bdev, int rw)
 #define page_tree i_pages
 #define ll_xa_lock_irqsave(lockp, flags) xa_lock_irqsave(lockp, flags)
 #define ll_xa_unlock_irqrestore(lockp, flags) xa_unlock_irqrestore(lockp, flags)
+#define ll_xa_lock(lockp) xa_lock(lockp)
+#define ll_xa_unlock(lockp) xa_unlock(lockp)
 #else
 #define i_pages tree_lock
 #define ll_xa_lock_irqsave(lockp, flags) spin_lock_irqsave(lockp, flags)
 #define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
+#define ll_xa_lock(lockp) spin_lock(lockp)
+#define ll_xa_unlock(lockp) spin_unlock(lockp)
 #endif
 
 #ifndef HAVE_LOCK_PAGE_MEMCG
index 4ded8b4..7db2a47 100644 (file)
@@ -268,6 +268,18 @@ struct lu_device_type vvp_device_type = {
 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
 unsigned int (*vvp_account_page_dirtied)(struct page *page,
                                         struct address_space *mapping);
+
+unsigned int ll_account_page_dirtied(struct page *page,
+                                    struct address_space *mapping)
+{
+       /* must use __set_page_dirty, which means unlocking and
+        * relocking, which hurts performance.
+        */
+       ll_xa_unlock(&mapping->i_pages);
+       __set_page_dirty(page, mapping, 0);
+       ll_xa_lock(&mapping->i_pages);
+       return 0;
+}
 #endif
 
 /**
@@ -292,6 +304,8 @@ int vvp_global_init(void)
         */
        vvp_account_page_dirtied = (void *)
                cfs_kallsyms_lookup_name("account_page_dirtied");
+       if (!vvp_account_page_dirtied)
+               vvp_account_page_dirtied = ll_account_page_dirtied;
 #endif
 
        return 0;
index b014b4f..6d752b9 100644 (file)
@@ -952,6 +952,21 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
 }
 
 /*
+ * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary
+ * Prior kernels use radix_tree for tags
+ */
+static inline void ll_page_tag_dirty(struct page *page,
+                                    struct address_space *mapping)
+{
+#ifndef HAVE_RADIX_TREE_TAG_SET
+       __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
+#else
+       radix_tree_tag_set(&mapping->page_tree, page_index(page),
+                          PAGECACHE_TAG_DIRTY);
+#endif
+}
+
+/*
  * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
  * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
  */
@@ -966,24 +981,9 @@ static inline void ll_account_page_dirtied(struct page *page,
 #elif defined(HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT)
        account_page_dirtied(page, mapping);
 #else
-       if (vvp_account_page_dirtied)
-               vvp_account_page_dirtied(page, mapping);
-#endif
-}
-
-/*
- * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary
- * Prior kernels use radix_tree for tags
- */
-static inline void ll_page_tag_dirty(struct page *page,
-                                    struct address_space *mapping)
-{
-#ifndef HAVE_RADIX_TREE_TAG_SET
-       __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
-#else
-       radix_tree_tag_set(&mapping->page_tree, page_index(page),
-                          PAGECACHE_TAG_DIRTY);
+       vvp_account_page_dirtied(page, mapping);
 #endif
+       ll_page_tag_dirty(page, mapping);
 }
 
 /* Taken from kernel set_page_dirty, __set_page_dirty_nobuffers
@@ -1049,7 +1049,6 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
                         page, page->mapping, mapping);
                WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
                ll_account_page_dirtied(page, mapping);
-               ll_page_tag_dirty(page, mapping);
                dirtied++;
                unlock_page_memcg(page);
        }