EXTRA_KCFLAGS="$tmp_flags"
]) # LIBCFS_CACHE_DETAIL_WRITERS
+#
+# LIBCFS_HAVE_NR_UNSTABLE_NFS
+#
+# kernel v5.8-rc1~201^2~75
+# mm/writeback: discard NR_UNSTABLE_NFS, use NR_WRITEBACK instead
+#
+AC_DEFUN([LIBCFS_HAVE_NR_UNSTABLE_NFS], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if NR_UNSTABLE_NFS still in use],
+nr_unstable_nfs_exists, [
+ #include <linux/mm.h>
+
+ int i;
+],[
+ i = NR_UNSTABLE_NFS;
+],[
+ AC_DEFINE(HAVE_NR_UNSTABLE_NFS, 1,
+ [NR_UNSTABLE_NFS is still in use.])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LIBCFS_HAVE_NR_UNSTABLE_NFS
#
# LIBCFS_PROG_LINUX
# 5.3
LIBCFS_LOOKUP_USER_KEY
LIBCFS_CACHE_DETAIL_WRITERS
+LIBCFS_HAVE_NR_UNSTABLE_NFS
]) # LIBCFS_PROG_LINUX
#
* In practice this can work pretty good because the pages in the same RPC
* are likely from the same page zone.
*/
+#ifdef HAVE_NR_UNSTABLE_NFS
+/* Old kernels use a separate counter for unstable pages,
+ * newer kernels treat them like any other writeback.
+ */
+#define NR_WRITEBACK NR_UNSTABLE_NFS
+#endif
+
static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
struct osc_brw_async_args *aa,
int factor)
}
if (count > 0) {
- mod_zone_page_state(zone, NR_UNSTABLE_NFS,
+ mod_zone_page_state(zone, NR_WRITEBACK,
factor * count);
count = 0;
}
++count;
}
if (count > 0)
- mod_zone_page_state(zone, NR_UNSTABLE_NFS, factor * count);
+ mod_zone_page_state(zone, NR_WRITEBACK, factor * count);
}
static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc,