SUSE 15 SP6 6.4 kernel retains kallsyms_lookup_name so
the fast path of vvp_set_batch_dirty() can be enabled.
However the combination of kallsyms_lookup_name without
lock_page_memcg breaks some old assumptions
Prefer folio_memcg_lock to lock_page_memcg however
Linux commit v5.15-12272-g913ffbdd9985
mm: unexport folio_memcg_{,un}lock
folio_memcg_lock is also not exported so use
kallsyms_lookup_name to acquire the symbol
HPE-bug-id: LUS-12371
Test-Parameters: trivial
Fixes:
61e83a6f130 ("LU-16113 build: Fix configure tests for lock_page_memcg")
Signed-off-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Change-Id: I8ac6b7bde8ee8964db5a801c2f3c4dfb2ef459f9
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/55300
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Petros Koutoupis <petros.koutoupis@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
# Kernel version 4.6 adds lock_page_memcg(page)
# Linux commit v5.15-12273-gab2f9d2d3626
# mm: unexport {,un}lock_page_memcg
+# and removed in v6.4-rc4-327-g6c77b607ee26
+# mm: kill lock|unlock_page_memcg()
#
AC_DEFUN([LC_SRC_LOCK_PAGE_MEMCG], [
LB2_LINUX_TEST_SRC([lock_page_memcg], [
]) # LC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG
#
+# LC_FOLIO_MEMCG_LOCK
+#
+# kernel v5.15-rc3-45-gf70ad4487415
+# mm/memcg: Add folio_memcg_lock() and folio_memcg_unlock()
+# Use folio_memcg_[un]lock when [un]lock_page_memcg is removed.
+#
+AC_DEFUN([LC_SRC_FOLIO_MEMCG_LOCK], [
+ LB2_LINUX_TEST_SRC([folio_memcg_lock], [
+ #include <linux/memcontrol.h>
+ ],[
+ folio_memcg_lock(NULL);
+ ],[-Werror])
+])
+AC_DEFUN([LC_FOLIO_MEMCG_LOCK], [
+ LB2_MSG_LINUX_TEST_RESULT([if 'folio_memcg_lock' is defined],
+ [folio_memcg_lock], [
+ AC_DEFINE(HAVE_FOLIO_MEMCG_LOCK, 1, [folio_memcg_lock is defined])
+ ])
+]) # LC_FOLIO_MEMCG_LOCK
+
+#
# LC_HAVE_KIOCB_COMPLETE_2ARGS
#
# kernel v5.15-rc6-145-g6b19b766e8f0
]) # LC_HAVE_KIOCB_COMPLETE_2ARGS
#
+# LC_FOLIO_MEMCG_LOCK_EXPORTED
+#
+# Linux commit v5.15-12272-g913ffbdd9985
+# mm: unexport folio_memcg_{,un}lock
+#
+AC_DEFUN([LC_FOLIO_MEMCG_LOCK_EXPORTED], [
+LB_CHECK_EXPORT([folio_memcg_lock], [mm/memcontrol.c],
+ [AC_DEFINE(FOLIO_MEMCG_LOCK_EXPORTED, 1,
+ [folio_memcg_{,un}lock are exported])])
+]) # LC_FOLIO_MEMCG_LOCK_EXPORTED
+
+#
# LC_EXPORTS_DELETE_FROM_PAGE_CACHE
#
# Linux commit v5.16-rc4-44-g452e9e6992fe
[delete_from_page_cache is exported])])
]) # LC_EXPORTS_DELETE_FROM_PAGE_CACHE
-
#
# LC_HAVE_WB_STAT_MOD
#
# 5.16
LC_SRC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG
+ LC_SRC_FOLIO_MEMCG_LOCK
LC_SRC_HAVE_KIOCB_COMPLETE_2ARGS
# 5.17
LC_HAVE_FILEATTR_GET
LC_HAVE_COPY_PAGE_FROM_ITER_ATOMIC
- # 5.15
- LC_HAVE_GET_ACL_RCU_ARG
-
# 5.15
LC_HAVE_GET_ACL_RCU_ARG
LC_HAVE_INVALIDATE_LOCK
# 5.16
LC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG
+ LC_FOLIO_MEMCG_LOCK
LC_HAVE_KIOCB_COMPLETE_2ARGS
+ LC_FOLIO_MEMCG_LOCK_EXPORTED
LC_EXPORTS_DELETE_FROM_PAGE_CACHE
LC_HAVE_WB_STAT_MOD
flags)
#endif
-/* Linux commit v5.15-12273-gab2f9d2d3626
- * mm: unexport {,un}lock_page_memcg
- *
- * Note that the functions are still defined or declared breaking
- * the simple approach of just defining the missing functions here
- */
-#ifdef HAVE_LOCK_PAGE_MEMCG
-#define vvp_lock_page_memcg(page) lock_page_memcg((page))
-#define vvp_unlock_page_memcg(page) unlock_page_memcg((page))
-#else
-#define vvp_lock_page_memcg(page)
-#define vvp_unlock_page_memcg(page)
-#endif
-
#ifndef KMEM_CACHE_USERCOPY
#define kmem_cache_create_usercopy(name, size, align, flags, useroffset, \
usersize, ctor) \
unsigned int (*vvp_account_page_dirtied)(struct page *page,
struct address_space *mapping);
+#if !defined(FOLIO_MEMCG_LOCK_EXPORTED) && defined(HAVE_FOLIO_MEMCG_LOCK) && \
+ defined(HAVE_KALLSYMS_LOOKUP_NAME)
+void (*vvp_folio_memcg_lock)(struct folio *folio);
+void (*vvp_folio_memcg_unlock)(struct folio *folio);
+#endif
/**
* A mutex serializing calls to vvp_inode_fini() under extreme memory
#endif
#endif
+#if !defined(FOLIO_MEMCG_LOCK_EXPORTED) && defined(HAVE_FOLIO_MEMCG_LOCK) && \
+ defined(HAVE_KALLSYMS_LOOKUP_NAME)
+ vvp_folio_memcg_lock = (void *)
+ cfs_kallsyms_lookup_name("folio_memcg_lock");
+ LASSERT(vvp_folio_memcg_lock);
+
+ vvp_folio_memcg_unlock = (void *)
+ cfs_kallsyms_lookup_name("folio_memcg_unlock");
+ LASSERT(vvp_folio_memcg_unlock);
+#endif
+
return 0;
out_kmem:
struct address_space *mapping);
#endif
+#ifdef HAVE_FOLIO_MEMCG_LOCK
+#ifdef FOLIO_MEMCG_LOCK_EXPORTED
+#define folio_memcg_lock_page(page) folio_memcg_lock(page_folio((page)))
+#define folio_memcg_unlock_page(page) folio_memcg_unlock(page_folio((page)))
+#elif defined(HAVE_KALLSYMS_LOOKUP_NAME)
+/* Use kallsyms_lookup_name to acquire folio_memcg_[un]lock */
+extern void (*vvp_folio_memcg_lock)(struct folio *folio);
+extern void (*vvp_folio_memcg_unlock)(struct folio *folio);
+#define folio_memcg_lock_page(page) \
+ vvp_folio_memcg_lock(page_folio((page)))
+#define folio_memcg_unlock_page(page) \
+ vvp_folio_memcg_unlock(page_folio((page)))
+#endif
+#elif defined HAVE_LOCK_PAGE_MEMCG
+#define folio_memcg_lock_page(page) lock_page_memcg((page))
+#define folio_memcg_unlock_page(page) unlock_page_memcg((page))
+#else
+#define folio_memcg_lock_page(page)
+#define folio_memcg_unlock_page(page)
+#endif
+
extern const struct file_operations vvp_dump_pgcache_file_ops;
#endif /* VVP_INTERNAL_H */
ClearPageReclaim(page);
- vvp_lock_page_memcg(page);
+ folio_memcg_lock_page(page);
if (TestSetPageDirty(page)) {
/* page is already dirty .. no extra work needed
* set a flag for the i'th page to be skipped
*/
- vvp_unlock_page_memcg(page);
+ folio_memcg_unlock_page(page);
skip_pages |= (1ul << pgno++);
LASSERTF(pgno <= BITS_PER_LONG,
"Limit exceeded pgno: %d/%d\n", pgno,
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
ll_account_page_dirtied(page, mapping);
dirtied++;
- vvp_unlock_page_memcg(page);
+ folio_memcg_unlock_page(page);
}
}
ll_xa_unlock_irqrestore(&mapping->i_pages, flags);