If we can't access account_page_dirtied either via export
or via kallsyms lookup, the benefit of vvp_set_pagevec_dirty
is mostly lost, since we have to take the xarray lock
repeatedly to handle accounting dirty pages.
Replace the more complicated compat code by just falling
back to __set_page_dirty_nobuffers in a loop, since this
has the same effect and is much simpler.
This also resolves 5.14 compatibility, as __set_page_dirty
is no longer exported there.
Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I3feb526b8eaaec3811c689a895875b409204a159
Reviewed-on: https://review.whamcloud.com/45927
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Yingjin Qian <qian@ddn.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
#define page_tree i_pages
#define ll_xa_lock_irqsave(lockp, flags) xa_lock_irqsave(lockp, flags)
#define ll_xa_unlock_irqrestore(lockp, flags) xa_unlock_irqrestore(lockp, flags)
#define page_tree i_pages
#define ll_xa_lock_irqsave(lockp, flags) xa_lock_irqsave(lockp, flags)
#define ll_xa_unlock_irqrestore(lockp, flags) xa_unlock_irqrestore(lockp, flags)
-#define ll_xa_lock(lockp) xa_lock(lockp)
-#define ll_xa_unlock(lockp) xa_unlock(lockp)
#else
#define i_pages tree_lock
#define ll_xa_lock_irqsave(lockp, flags) spin_lock_irqsave(lockp, flags)
#define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
#else
#define i_pages tree_lock
#define ll_xa_lock_irqsave(lockp, flags) spin_lock_irqsave(lockp, flags)
#define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
-#define ll_xa_lock(lockp) spin_lock(lockp)
-#define ll_xa_unlock(lockp) spin_unlock(lockp)
#endif
#ifndef HAVE_LOCK_PAGE_MEMCG
#endif
#ifndef HAVE_LOCK_PAGE_MEMCG
.ldt_ctx_tags = LCT_CL_THREAD
};
.ldt_ctx_tags = LCT_CL_THREAD
};
-#ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
unsigned int (*vvp_account_page_dirtied)(struct page *page,
struct address_space *mapping);
unsigned int (*vvp_account_page_dirtied)(struct page *page,
struct address_space *mapping);
-unsigned int ll_account_page_dirtied(struct page *page,
- struct address_space *mapping)
-{
- /* must use __set_page_dirty, which means unlocking and
- * relocking, which hurts performance.
- */
- ll_xa_unlock(&mapping->i_pages);
- __set_page_dirty(page, mapping, 0);
- ll_xa_lock(&mapping->i_pages);
- return 0;
-}
-#endif
-
/**
* A mutex serializing calls to vvp_inode_fini() under extreme memory
* pressure, when environments cannot be allocated.
/**
* A mutex serializing calls to vvp_inode_fini() under extreme memory
* pressure, when environments cannot be allocated.
goto out_kmem;
#ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
goto out_kmem;
#ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
+#ifdef HAVE_KALLSYMS_LOOKUP_NAME
/*
* Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
*/
vvp_account_page_dirtied = (void *)
cfs_kallsyms_lookup_name("account_page_dirtied");
/*
* Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
*/
vvp_account_page_dirtied = (void *)
cfs_kallsyms_lookup_name("account_page_dirtied");
- if (!vvp_account_page_dirtied)
- vvp_account_page_dirtied = ll_account_page_dirtied;
void vvp_set_pagevec_dirty(struct pagevec *pvec)
{
struct page *page = pvec->pages[0];
void vvp_set_pagevec_dirty(struct pagevec *pvec)
{
struct page *page = pvec->pages[0];
+ int count = pagevec_count(pvec);
+ int i;
+#ifdef HAVE_KALLSYMS_LOOKUP_NAME
struct address_space *mapping = page->mapping;
unsigned long flags;
unsigned long skip_pages = 0;
struct address_space *mapping = page->mapping;
unsigned long flags;
unsigned long skip_pages = 0;
- int count = pagevec_count(pvec);
"mapping must be set. page %p, page->private (cl_page) %p\n",
page, (void *) page->private);
"mapping must be set. page %p, page->private (cl_page) %p\n",
page, (void *) page->private);
+/* kernels without HAVE_KALLSYMS_LOOKUP_NAME also don't have account_dirty_page
+ * exported, and if we can't access that symbol, we can't do page dirtying in
+ * batch (taking the xarray lock only once) so we just fall back to a looped
+ * call to __set_page_dirty_nobuffers
+ */
+#ifndef HAVE_KALLSYMS_LOOKUP_NAME
+ for (i = 0; i < count; i++)
+ __set_page_dirty_nobuffers(pvec->pages[i]);
+#else
for (i = 0; i < count; i++) {
page = pvec->pages[i];
for (i = 0; i < count; i++) {
page = pvec->pages[i];
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}