#
# LC_ACCOUNT_PAGE_DIRTIED_3ARGS
#
-# 4.2 kernel page dirtied takes 3 arguments
+# 4.2 [to 4.5] kernel page dirtied takes 3 arguments
#
AC_DEFUN([LC_ACCOUNT_PAGE_DIRTIED_3ARGS], [
LB_CHECK_COMPILE([if 'account_page_dirtied' with 3 args exists],
]) # LC_BI_STATUS
#
-# LC_UAPI_LINUX_MOUNT_H
-#
-# kernel 4.20 commit e262e32d6bde0f77fb0c95d977482fc872c51996
-# vfs: Suppress MS_* flag defs within the kernel ...
-#
-AC_DEFUN([LC_UAPI_LINUX_MOUNT_H], [
-tmp_flags="$EXTRA_KCFLAGS"
-EXTRA_KCFLAGS="-Werror"
-LB_CHECK_COMPILE([if MS_RDONLY was moved to uapi/linux/mount.h],
-uapi_linux_mount, [
- #include <uapi/linux/mount.h>
-],[
- int x = MS_RDONLY;
- (void)x;
-],[
- AC_DEFINE(HAVE_UAPI_LINUX_MOUNT_H, 1,
- [if MS_RDONLY was moved to uapi/linux/mount.h])
-])
-EXTRA_KCFLAGS="$tmp_flags"
-]) # LC_UAPI_LINUX_MOUNT_H
-
-#
-# LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK
-#
-# kernel 4.20 commit 1863d77f15da0addcd293a1719fa5d3ef8cde3ca
-# SUNRPC: Replace the cache_detail->hash_lock with a regular spinlock
-#
-# Now that the reader functions are all RCU protected, use a regular
-# spinlock rather than a reader/writer lock.
-#
-AC_DEFUN([LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK], [
-tmp_flags="$EXTRA_KCFLAGS"
-EXTRA_KCFLAGS="-Werror"
-LB_CHECK_COMPILE([if cache_detail->hash_lock is a spinlock],
-hash_lock_isa_spinlock_t, [
- #include <linux/sunrpc/cache.h>
-],[
- spinlock_t *lock = &(((struct cache_detail *)0)->hash_lock);
- spin_lock(lock);
-],[
- AC_DEFINE(HAVE_CACHE_HASH_SPINLOCK, 1,
- [if cache_detail->hash_lock is a spinlock])
-])
-EXTRA_KCFLAGS="$tmp_flags"
-]) # LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK
-
-#
# LC_BIO_INTEGRITY_ENABLED
#
# 4.13 removed bio_integrity_enabled
]) # LC_INODE_TIMESPEC64
#
+# LC___XA_SET_MARK
+#
+# kernel 4.20 commit v4.19-rc5-248-g9b89a0355144
+# xarray: Add XArray marks
+#
+AC_DEFUN([LC___XA_SET_MARK], [
+LB_CHECK_COMPILE([if '__xa_set_mark' exists],
+__xa_set_mark, [
+ #include <linux/xarray.h>
+ #include <linux/fs.h>
+],[
+ struct xarray *xa = NULL;
+
+ __xa_set_mark(xa, 0, PAGECACHE_TAG_DIRTY);
+],[
+ AC_DEFINE(HAVE___XA_SET_MARK, 1,
+ [__xa_set_mark exists])
+])
+]) # LC___XA_SET_MARK
+
+#
+# LC_UAPI_LINUX_MOUNT_H
+#
+# kernel 4.20 commit e262e32d6bde0f77fb0c95d977482fc872c51996
+# vfs: Suppress MS_* flag defs within the kernel ...
+#
+AC_DEFUN([LC_UAPI_LINUX_MOUNT_H], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if MS_RDONLY was moved to uapi/linux/mount.h],
+uapi_linux_mount, [
+ #include <uapi/linux/mount.h>
+],[
+ int x = MS_RDONLY;
+ (void)x;
+],[
+ AC_DEFINE(HAVE_UAPI_LINUX_MOUNT_H, 1,
+ [if MS_RDONLY was moved to uapi/linux/mount.h])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LC_UAPI_LINUX_MOUNT_H
+
+#
+# LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK
+#
+# kernel 4.20 commit 1863d77f15da0addcd293a1719fa5d3ef8cde3ca
+# SUNRPC: Replace the cache_detail->hash_lock with a regular spinlock
+#
+# Now that the reader functions are all RCU protected, use a regular
+# spinlock rather than a reader/writer lock.
+#
+AC_DEFUN([LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if cache_detail->hash_lock is a spinlock],
+hash_lock_isa_spinlock_t, [
+ #include <linux/sunrpc/cache.h>
+],[
+ spinlock_t *lock = &(((struct cache_detail *)0)->hash_lock);
+ spin_lock(lock);
+],[
+ AC_DEFINE(HAVE_CACHE_HASH_SPINLOCK, 1,
+ [if cache_detail->hash_lock is a spinlock])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK
+
+#
# LC_HAS_LINUX_SELINUX_ENABLED
#
# kernel 5.1 commit 3d252529480c68bfd6a6774652df7c8968b28e41
LC_INODE_TIMESPEC64
# 4.20
- LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK
-
- # 5.0
+ LC___XA_SET_MARK
LC_UAPI_LINUX_MOUNT_H
+ LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK
# 5.1
LC_HAS_LINUX_SELINUX_ENABLED
RETURN(bytes > 0 ? bytes : rc);
}
+/*
+ * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
+ */
+static inline void ll_account_page_dirtied(struct page *page,
+ struct address_space *mapping)
+{
+#ifdef HAVE_ACCOUNT_PAGE_DIRTIED_3ARGS
+ struct mem_cgroup *memcg = mem_cgroup_begin_page_stat(page);
+
+ account_page_dirtied(page, mapping, memcg);
+ mem_cgroup_end_page_stat(memcg);
+#else
+ account_page_dirtied(page, mapping);
+#endif
+}
+
+/*
+ * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary
+ * Prior kernels use radix_tree for tags
+ */
+static inline void ll_page_tag_dirty(struct page *page,
+ struct address_space *mapping)
+{
+#ifdef HAVE___XA_SET_MARK
+ __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
+#else
+ radix_tree_tag_set(&mapping->page_tree, page_index(page),
+ PAGECACHE_TAG_DIRTY);
+#endif
+}
+
/* Taken from kernel set_page_dirty, __set_page_dirty_nobuffers
* Last change to this area: b93b016313b3ba8003c3b8bb71f569af91f19fc7
*
* Current with Linus tip of tree (7/13/2019):
* v5.2-rc4-224-ge01e060fe0
*
- * Backwards compat for 3.x, 4.x kernels relating to memcg handling
- * & rename of radix tree to xarray. */
+ * Backwards compat for 3.x, 5.x kernels relating to memcg handling
+ * & rename of radix tree to xarray.
+ */
void vvp_set_pagevec_dirty(struct pagevec *pvec)
{
struct page *page = pvec->pages[0];
struct address_space *mapping = page->mapping;
-#if defined HAVE_ACCOUNT_PAGE_DIRTIED_3ARGS
- struct mem_cgroup *memcg;
-#endif
unsigned long flags;
int count = pagevec_count(pvec);
int dirtied = 0;
"all pages must have the same mapping. page %p, mapping %p, first mapping %p\n",
page, page->mapping, mapping);
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
-#ifdef HAVE_ACCOUNT_PAGE_DIRTIED_3ARGS
- memcg = mem_cgroup_begin_page_stat(page);
- account_page_dirtied(page, mapping, memcg);
- mem_cgroup_end_page_stat(memcg);
-#else
- account_page_dirtied(page, mapping);
-#endif
- radix_tree_tag_set(&mapping->page_tree, page_index(page),
- PAGECACHE_TAG_DIRTY);
+ ll_account_page_dirtied(page, mapping);
+ ll_page_tag_dirty(page, mapping);
dirtied++;
unlock_page_memcg(page);
}