]) # LIBCFS_NEW_KERNEL_WRITE
#
+# LIBCFS_MM_TOTALRAM_PAGES_FUNC
+#
+# kernel 5.0 commit ca79b0c211af63fa3276f0e3fd7dd9ada2439839
+# mm: convert totalram_pages and totalhigh_pages variables to atomic
+#
+AC_DEFUN([LIBCFS_MM_TOTALRAM_PAGES_FUNC], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if totalram_pages is a function],
+totalram_pages, [
+ #include <linux/mm.h>
+],[
+ totalram_pages_inc();
+],[
+ AC_DEFINE(HAVE_TOTALRAM_PAGES_AS_FUNC, 1,
+ [if totalram_pages is a function])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LIBCFS_MM_TOTALRAM_PAGES_FUNC
+
+#
# LIBCFS_NEW_KERNEL_WRITE
#
# 4.14 commit bdd1d2d3d251c65b74ac4493e08db18971c09240 changed
LIBCFS_WAIT_VAR_EVENT
# 4.17
LIBCFS_CLEAR_AND_WAKE_UP_BIT
+# 5.0
+LIBCFS_MM_TOTALRAM_PAGES_FUNC
]) # LIBCFS_PROG_LINUX
#
# endif /* !__CHECKER__ */
#endif /* !__must_hold */
+#ifdef HAVE_TOTALRAM_PAGES_AS_FUNC
+ #ifndef cfs_totalram_pages
+ #define cfs_totalram_pages() totalram_pages()
+ #endif
+#else
+ #ifndef cfs_totalram_pages
+ #define cfs_totalram_pages() totalram_pages
+ #endif
+#endif
+
/* need both kernel and user-land acceptor */
#define LNET_ACCEPTOR_MIN_RESERVED_PORT 512
#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
# define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
+ min(cfs_totalram_pages(), 1UL << (30 - PAGE_SHIFT) * 3 / 4)
#else
-# define NUM_CACHEPAGES totalram_pages
+# define NUM_CACHEPAGES cfs_totalram_pages()
#endif
static inline unsigned int memory_pressure_get(void)
int cfs_trace_max_debug_mb(void)
{
- int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
+ int total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}
cli->cl_dirty_max_pages = dirty_max;
}
- if (cli->cl_dirty_max_pages > totalram_pages / 8)
- cli->cl_dirty_max_pages = totalram_pages / 8;
+ if (cli->cl_dirty_max_pages > cfs_totalram_pages() / 8)
+ cli->cl_dirty_max_pages = cfs_totalram_pages() / 8;
/* This value is exported to userspace through the max_dirty_mb
* parameter. So we round up the number of pages to make it a round
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
- } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
+ } else if (cfs_totalram_pages() >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
- } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
+ } else if (cfs_totalram_pages() >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
cli->cl_max_rpcs_in_flight = 3;
- } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
+ } else if (cfs_totalram_pages() >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
if (osc_on_mdt(obddev->obd_name))
return rc;
pages_number = round_up(ra_max_mb, 1024 * 1024) >> PAGE_SHIFT;
- if (pages_number < 0 || pages_number > totalram_pages / 2) {
+ if (pages_number < 0 || pages_number > cfs_totalram_pages() / 2) {
/* 1/2 of RAM */
CERROR("%s: can't set max_readahead_mb=%llu > %luMB\n",
sbi->ll_fsname, PAGES_TO_MiB(pages_number),
- PAGES_TO_MiB(totalram_pages));
+ PAGES_TO_MiB(cfs_totalram_pages()));
return -ERANGE;
}
pages_number >>= PAGE_SHIFT;
- if (pages_number < 0 || pages_number > totalram_pages) {
+ if (pages_number < 0 || pages_number > cfs_totalram_pages()) {
CERROR("%s: can't set max cache more than %lu MB\n",
sbi->ll_fsname,
- PAGES_TO_MiB(totalram_pages));
+ PAGES_TO_MiB(cfs_totalram_pages()));
RETURN(-ERANGE);
}
/* Allow enough cache so clients can make well-formed RPCs */
pages_number = round_up(pages_number, 1024 * 1024) >> PAGE_SHIFT;
if (pages_number <= 0 ||
pages_number >= MiB_TO_PAGES(OSC_MAX_DIRTY_MB_MAX) ||
- pages_number > totalram_pages / 4) /* 1/4 of RAM */
+ pages_number > cfs_totalram_pages() / 4) /* 1/4 of RAM */
return -ERANGE;
spin_lock(&cli->cl_loi_list_lock);
/* Default the dirty page cache cap to 1/2 of system memory.
* For clients with less memory, a larger fraction is needed
* for other purposes (mostly for BGL). */
- if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
- obd_max_dirty_pages = totalram_pages / 4;
+ if (cfs_totalram_pages() <= 512 << (20 - PAGE_SHIFT))
+ obd_max_dirty_pages = cfs_totalram_pages() / 4;
else
- obd_max_dirty_pages = totalram_pages / 2;
+ obd_max_dirty_pages = cfs_totalram_pages() / 2;
err = obd_init_caches();
if (err)
*
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
*/
- cache_size = totalram_pages;
+ cache_size = cfs_totalram_pages();
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
- if (val > ((totalram_pages / 10) * 9)) {
+ if (val > ((cfs_totalram_pages() / 10) * 9)) {
/* Somebody wants to assign too much memory to dirty pages */
return -EINVAL;
}
pages_number = MiB_TO_PAGES(max_dirty_mb);
if (pages_number >= MiB_TO_PAGES(OSC_MAX_DIRTY_MB_MAX) ||
- pages_number > totalram_pages / 4) /* 1/4 of RAM */
+ pages_number > cfs_totalram_pages() / 4) /* 1/4 of RAM */
return -ERANGE;
spin_lock(&cli->cl_loi_list_lock);
struct seq_file *m = file->private_data;
struct ptlrpc_service *svc = m->private;
unsigned long long val;
+ unsigned long long limit;
int bufpages;
int rc;
* will be upgraded */
bufpages = (roundup_pow_of_two(svc->srv_buf_size) + PAGE_SIZE - 1) >>
PAGE_SHIFT;
+ limit = cfs_totalram_pages() / (2 * bufpages);
/* do not allow history to consume more than half max number of rqbds */
- if ((svc->srv_nrqbds_max == 0 && val > totalram_pages / (2 * bufpages)) ||
+ if ((svc->srv_nrqbds_max == 0 && val > limit) ||
(svc->srv_nrqbds_max != 0 && val > svc->srv_nrqbds_max / 2))
return -ERANGE;
"max waitqueue depth: %u\n"
"max wait time ms: %lld\n"
"out of mem: %lu\n",
- totalram_pages, PAGES_PER_POOL,
+ cfs_totalram_pages(), PAGES_PER_POOL,
page_pools.epp_max_pages,
page_pools.epp_max_pools,
page_pools.epp_total_pages,
DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
enc_pools_shrink_count, enc_pools_shrink_scan);
- page_pools.epp_max_pages = totalram_pages / 8;
+ page_pools.epp_max_pages = cfs_totalram_pages() / 8;
if (enc_pool_max_memory_mb > 0 &&
- enc_pool_max_memory_mb <= (totalram_pages >> mult))
+ enc_pool_max_memory_mb <= (cfs_totalram_pages() >> mult))
page_pools.epp_max_pages = enc_pool_max_memory_mb << mult;
page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);