From 65e391e95b6d53b36840c95c2499f17fd84a1d36 Mon Sep 17 00:00:00 2001 From: Mr NeilBrown Date: Thu, 29 Apr 2021 09:04:04 -0400 Subject: [PATCH] LU-13783 libcfs: support absence of account_page_dirtied Some kernels export neither account_page_dirtied nor kallsyms_lookup_name. For these kernels we need to use __set_page_dirty() and suffer the cost of dropping an reclaiming the page-tree lock. Signed-off-by: Mr NeilBrown Change-Id: I69d934480832f3909d3ec103f11e1d62489d70d7 Reviewed-on: https://review.whamcloud.com/40827 Tested-by: jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Jian Yu Reviewed-by: Oleg Drokin --- lustre/include/lustre_compat.h | 4 ++++ lustre/llite/vvp_dev.c | 14 ++++++++++++++ lustre/llite/vvp_io.c | 35 +++++++++++++++++------------------ 3 files changed, 35 insertions(+), 18 deletions(-) diff --git a/lustre/include/lustre_compat.h b/lustre/include/lustre_compat.h index cceefee..b5a704b 100644 --- a/lustre/include/lustre_compat.h +++ b/lustre/include/lustre_compat.h @@ -501,10 +501,14 @@ static inline bool bdev_integrity_enabled(struct block_device *bdev, int rw) #define page_tree i_pages #define ll_xa_lock_irqsave(lockp, flags) xa_lock_irqsave(lockp, flags) #define ll_xa_unlock_irqrestore(lockp, flags) xa_unlock_irqrestore(lockp, flags) +#define ll_xa_lock(lockp) xa_lock(lockp) +#define ll_xa_unlock(lockp) xa_unlock(lockp) #else #define i_pages tree_lock #define ll_xa_lock_irqsave(lockp, flags) spin_lock_irqsave(lockp, flags) #define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags) +#define ll_xa_lock(lockp) spin_lock(lockp) +#define ll_xa_unlock(lockp) spin_unlock(lockp) #endif #ifndef HAVE_LOCK_PAGE_MEMCG diff --git a/lustre/llite/vvp_dev.c b/lustre/llite/vvp_dev.c index 4ded8b4..7db2a47 100644 --- a/lustre/llite/vvp_dev.c +++ b/lustre/llite/vvp_dev.c @@ -268,6 +268,18 @@ struct lu_device_type vvp_device_type = { #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT unsigned int (*vvp_account_page_dirtied)(struct page *page, struct address_space *mapping); + +unsigned int ll_account_page_dirtied(struct page *page, + struct address_space *mapping) +{ + /* must use __set_page_dirty, which means unlocking and + * relocking, which hurts performance. + */ + ll_xa_unlock(&mapping->i_pages); + __set_page_dirty(page, mapping, 0); + ll_xa_lock(&mapping->i_pages); + return 0; +} #endif /** @@ -292,6 +304,8 @@ int vvp_global_init(void) */ vvp_account_page_dirtied = (void *) cfs_kallsyms_lookup_name("account_page_dirtied"); + if (!vvp_account_page_dirtied) + vvp_account_page_dirtied = ll_account_page_dirtied; #endif return 0; diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index b014b4f..6d752b9 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -952,6 +952,21 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io, } /* + * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary + * Prior kernels use radix_tree for tags + */ +static inline void ll_page_tag_dirty(struct page *page, + struct address_space *mapping) +{ +#ifndef HAVE_RADIX_TREE_TAG_SET + __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY); +#else + radix_tree_tag_set(&mapping->page_tree, page_index(page), + PAGECACHE_TAG_DIRTY); +#endif +} + +/* * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied() * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied */ @@ -966,24 +981,9 @@ static inline void ll_account_page_dirtied(struct page *page, #elif defined(HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT) account_page_dirtied(page, mapping); #else - if (vvp_account_page_dirtied) - vvp_account_page_dirtied(page, mapping); -#endif -} - -/* - * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary - * Prior kernels use radix_tree for tags - */ -static inline void ll_page_tag_dirty(struct page *page, - struct address_space *mapping) -{ -#ifndef HAVE_RADIX_TREE_TAG_SET - __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY); -#else - radix_tree_tag_set(&mapping->page_tree, page_index(page), - PAGECACHE_TAG_DIRTY); + vvp_account_page_dirtied(page, mapping); #endif + ll_page_tag_dirty(page, mapping); } /* Taken from kernel set_page_dirty, __set_page_dirty_nobuffers @@ -1049,7 +1049,6 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec) page, page->mapping, mapping); WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); ll_account_page_dirtied(page, mapping); - ll_page_tag_dirty(page, mapping); dirtied++; unlock_page_memcg(page); } -- 1.8.3.1