Whamcloud - gitweb
LU-16113 build: Fix configure tests for lock_page_memcg 44/49144/8
authorShaun Tancheff <shaun.tancheff@hpe.com>
Tue, 15 Nov 2022 04:06:05 +0000 (22:06 -0600)
committerOleg Drokin <green@whamcloud.com>
Sat, 7 Jan 2023 07:54:20 +0000 (07:54 +0000)
Linux commit v5.15-12273-gab2f9d2d3626
   mm: unexport {,un}lock_page_memcg

Fails when lock_page_memcg exists but is not exported.

Adjust usage of [un]lock_page_memcg() to vvp_[un]lock_page_memcg() and
define the mapping accordingly to avoid the compile error.

Test-Parameters: trivial
HPE-bug-id: LUS-11189
Signed-off-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Change-Id: I18029d078a00a0b21a14721bcdf953939b4118a1
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/49144
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Reviewed-by: Jian Yu <yujian@whamcloud.com>
Reviewed-by: Petros Koutoupis <petros.koutoupis@hpe.com>
lustre/autoconf/lustre-core.m4
lustre/include/lustre_compat.h
lustre/llite/vvp_io.c

index 9063dd3..109a90c 100644 (file)
@@ -1636,7 +1636,9 @@ EXTRA_KCFLAGS="$tmp_flags"
 #
 # LC_LOCK_PAGE_MEMCG
 #
-# Kernel version 4.6 adds lock_page_memcg
+# Kernel version 4.6 adds lock_page_memcg(page)
+# Linux commit v5.15-12273-gab2f9d2d3626
+#   mm: unexport {,un}lock_page_memcg
 #
 AC_DEFUN([LC_LOCK_PAGE_MEMCG], [
 LB_CHECK_COMPILE([if 'lock_page_memcg' is defined],
index 6857d81..e8c6128 100644 (file)
@@ -426,9 +426,18 @@ static inline struct timespec current_time(struct inode *inode)
 #define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
 #endif
 
-#ifndef HAVE_LOCK_PAGE_MEMCG
-#define lock_page_memcg(page) do {} while (0)
-#define unlock_page_memcg(page) do {} while (0)
+/* Linux commit v5.15-12273-gab2f9d2d3626
+ *   mm: unexport {,un}lock_page_memcg
+ *
+ * Note that the functions are still defined or declared breaking
+ * the simple approach of just defining the missing functions here
+ */
+#ifdef HAVE_LOCK_PAGE_MEMCG
+#define vvp_lock_page_memcg(page)      lock_page_memcg((page))
+#define vvp_unlock_page_memcg(page)    unlock_page_memcg((page))
+#else
+#define vvp_lock_page_memcg(page)
+#define vvp_unlock_page_memcg(page)
 #endif
 
 #ifndef KMEM_CACHE_USERCOPY
index dbb918a..3c500a5 100644 (file)
@@ -1042,12 +1042,12 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
 
                ClearPageReclaim(page);
 
-               lock_page_memcg(page);
+               vvp_lock_page_memcg(page);
                if (TestSetPageDirty(page)) {
                        /* page is already dirty .. no extra work needed
                         * set a flag for the i'th page to be skipped
                         */
-                       unlock_page_memcg(page);
+                       vvp_unlock_page_memcg(page);
                        skip_pages |= (1 << i);
                }
        }
@@ -1075,7 +1075,7 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
                WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
                ll_account_page_dirtied(page, mapping);
                dirtied++;
-               unlock_page_memcg(page);
+               vvp_unlock_page_memcg(page);
        }
        ll_xa_unlock_irqrestore(&mapping->i_pages, flags);