]) # LC_HAVE_INODE_GET_MTIME_SEC
#
+# LC_HAVE_SHRINKER_ALLOC
+#
+# Linux commit v6.6-rc4-53-gc42d50aefd17
+# mm: shrinker: add infrastructure for dynamically allocating shrinker
+#
+AC_DEFUN([LC_SRC_HAVE_SHRINKER_ALLOC], [
+ LB2_LINUX_TEST_SRC([shrinker_alloc_exists], [
+ #include <linux/shrinker.h>
+ ],[
+ struct shrinker *shrink __attribute__ ((unused));
+
+ shrink = shrinker_alloc(0, "%s", "whoami");
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_SHRINKER_ALLOC], [
+ LB2_MSG_LINUX_TEST_RESULT([if 'shrinker_alloc()' exists],
+ [shrinker_alloc_exists], [
+ AC_DEFINE(HAVE_SHRINKER_ALLOC, 1,
+ ['shrinker_alloc()' exists])
+ ])
+]) # LC_HAVE_SHRINKER_ALLOC
+
+#
# LC_PROG_LINUX
#
# Lustre linux kernel checks
# 6.7
LC_SRC_HAVE_INODE_GET_MTIME_SEC
+ LC_SRC_HAVE_SHRINKER_ALLOC
# kernel patch to extend integrity interface
LC_SRC_BIO_INTEGRITY_PREP_FN
# 6.7
LC_HAVE_INODE_GET_MTIME_SEC
+ LC_HAVE_SHRINKER_ALLOC
# kernel patch to extend integrity interface
LC_BIO_INTEGRITY_PREP_FN
#define migrate_folio migratepage
#endif
-#ifdef HAVE_REGISTER_SHRINKER_FORMAT_NAMED
-#define register_shrinker(_s) register_shrinker((_s), "%ps", (_s))
-#elif !defined(HAVE_REGISTER_SHRINKER_RET)
-#define register_shrinker(_s) (register_shrinker(_s), 0)
+struct ll_shrinker_ops {
+#ifdef HAVE_SHRINKER_COUNT
+ unsigned long (*count_objects)(struct shrinker *,
+ struct shrink_control *sc);
+ unsigned long (*scan_objects)(struct shrinker *,
+ struct shrink_control *sc);
+#else
+ int (*shrink)(struct shrinker *, struct shrink_control *sc);
+#endif
+ int seeks; /* seeks to recreate an obj */
+};
+
+#ifndef HAVE_SHRINKER_ALLOC
+static inline void shrinker_free(struct shrinker *shrinker)
+{
+ unregister_shrinker(shrinker);
+ OBD_FREE_PTR(shrinker);
+}
#endif
+/* allocate and register a shrinker, return should be checked with IS_ERR() */
+static inline struct shrinker *
+ll_shrinker_create(struct ll_shrinker_ops *ops, unsigned int flags,
+ const char *fmt, ...)
+{
+ struct shrinker *shrinker;
+ int rc = 0;
+
+#if defined(HAVE_REGISTER_SHRINKER_FORMAT_NAMED) || defined(HAVE_SHRINKER_ALLOC)
+ struct va_format vaf;
+ va_list args;
+#endif
+
+#ifdef HAVE_SHRINKER_ALLOC
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ shrinker = shrinker_alloc(flags, "%pV", &vaf);
+ va_end(args);
+#else
+ OBD_ALLOC_PTR(shrinker);
+#endif
+ if (!shrinker)
+ return ERR_PTR(-ENOMEM);
+
+#ifdef HAVE_SHRINKER_COUNT
+ shrinker->count_objects = ops->count_objects;
+ shrinker->scan_objects = ops->scan_objects;
+#else
+ shrinker->shrink = ops->shrink;
+#endif
+ shrinker->seeks = ops->seeks;
+
+#ifdef HAVE_SHRINKER_ALLOC
+ shrinker_register(shrinker);
+#else
+ #ifdef HAVE_REGISTER_SHRINKER_FORMAT_NAMED
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ rc = register_shrinker(shrinker, "%pV", &vaf);
+ va_end(args);
+ #elif defined(HAVE_REGISTER_SHRINKER_RET)
+ rc = register_shrinker(shrinker);
+ #else
+ register_shrinker(shrinker);
+ #endif
+#endif
+ if (rc) {
+#ifdef HAVE_SHRINKER_ALLOC
+ shrinker_free(shrinker);
+#else
+ OBD_FREE_PTR(shrinker);
+#endif
+ shrinker = ERR_PTR(rc);
+ }
+ return shrinker;
+}
+
#ifndef fallthrough
# if defined(__GNUC__) && __GNUC__ >= 7
# define fallthrough __attribute__((fallthrough)) /* fallthrough */
sc->gfp_mask);
}
-static struct shrinker ldlm_pools_srv_shrinker = {
+static struct ll_shrinker_ops ldlm_pools_srv_sh_ops = {
.count_objects = ldlm_pools_srv_count,
.scan_objects = ldlm_pools_srv_scan,
.seeks = DEFAULT_SEEKS,
};
-static struct shrinker ldlm_pools_cli_shrinker = {
+static struct ll_shrinker_ops ldlm_pools_cli_sh_ops = {
.count_objects = ldlm_pools_cli_count,
.scan_objects = ldlm_pools_cli_scan,
.seeks = DEFAULT_SEEKS,
sc->nr_to_scan, sc->gfp_mask);
}
-static struct shrinker ldlm_pools_srv_shrinker = {
+static struct ll_shrinker_ops ldlm_pools_srv_sh_ops = {
.shrink = ldlm_pools_srv_shrink,
.seeks = DEFAULT_SEEKS,
};
-static struct shrinker ldlm_pools_cli_shrinker = {
+static struct ll_shrinker_ops ldlm_pools_cli_sh_ops = {
.shrink = ldlm_pools_cli_shrink,
.seeks = DEFAULT_SEEKS,
};
static bool ldlm_pools_init_done;
+static struct shrinker *ldlm_pools_srv_shrinker;
+static struct shrinker *ldlm_pools_cli_shrinker;
+
int ldlm_pools_init(void)
{
time64_t delay;
int rc;
+ ENTRY;
+
#ifdef HAVE_SERVER_SUPPORT
delay = min(LDLM_POOL_SRV_DEF_RECALC_PERIOD,
LDLM_POOL_CLI_DEF_RECALC_PERIOD);
#else
delay = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
#endif
+ ldlm_pools_srv_shrinker = ll_shrinker_create(&ldlm_pools_srv_sh_ops, 0,
+ "ldlm_pools_server");
+ if (IS_ERR(ldlm_pools_srv_shrinker))
+ GOTO(out, rc = PTR_ERR(ldlm_pools_srv_shrinker));
- rc = register_shrinker(&ldlm_pools_srv_shrinker);
- if (rc)
- goto out;
-
- rc = register_shrinker(&ldlm_pools_cli_shrinker);
- if (rc)
- goto out_shrinker;
+ ldlm_pools_cli_shrinker = ll_shrinker_create(&ldlm_pools_cli_sh_ops, 0,
+ "ldlm_pools_client");
+ if (IS_ERR(ldlm_pools_cli_shrinker))
+ GOTO(out_shrinker, rc = PTR_ERR(ldlm_pools_cli_shrinker));
schedule_delayed_work(&ldlm_pools_recalc_work, delay);
ldlm_pools_init_done = true;
- return 0;
+ RETURN(0);
out_shrinker:
- unregister_shrinker(&ldlm_pools_cli_shrinker);
+ shrinker_free(ldlm_pools_srv_shrinker);
out:
- return rc;
+ RETURN(rc);
}
void ldlm_pools_fini(void)
{
if (ldlm_pools_init_done) {
- unregister_shrinker(&ldlm_pools_srv_shrinker);
- unregister_shrinker(&ldlm_pools_cli_shrinker);
-
cancel_delayed_work_sync(&ldlm_pools_recalc_work);
+
+ shrinker_free(ldlm_pools_srv_shrinker);
+ shrinker_free(ldlm_pools_cli_shrinker);
}
ldlm_pools_init_done = false;
if (!group_info)
return 0;
- atomic_inc(&group_info->usage);
+ get_group_info(group_info);
rc = lustre_groups_search(group_info, grp);
- if (atomic_dec_and_test(&group_info->usage))
- groups_free(group_info);
+ put_group_info(group_info);
}
return rc;
}
}
#ifdef HAVE_SHRINKER_COUNT
-static struct shrinker lu_site_shrinker = {
+static struct ll_shrinker_ops lu_site_sh_ops = {
.count_objects = lu_cache_shrink_count,
.scan_objects = lu_cache_shrink_scan,
.seeks = DEFAULT_SEEKS,
return cached;
}
-static struct shrinker lu_site_shrinker = {
+static struct ll_shrinker_ops lu_site_sh_ops = {
.shrink = lu_cache_shrink,
.seeks = DEFAULT_SEEKS,
};
#endif /* HAVE_SHRINKER_COUNT */
+static struct shrinker *lu_site_shrinker;
+
/* Initialization of global lu_* data. */
int lu_global_init(void)
{
* inode, one for ea. Unfortunately setting this high value results in
* lu_object/inode cache consuming all the memory.
*/
- result = register_shrinker(&lu_site_shrinker);
- if (result)
+ lu_site_shrinker = ll_shrinker_create(&lu_site_sh_ops, 0, "lu_site");
+ if (IS_ERR(lu_site_shrinker)) {
+ result = PTR_ERR(lu_site_shrinker);
goto out_env;
+ }
result = rhashtable_init(&lu_env_rhash, &lu_env_rhash_params);
return result;
out_shrinker:
- unregister_shrinker(&lu_site_shrinker);
+ shrinker_free(lu_site_shrinker);
out_env:
/* ordering here is explained in lu_global_fini() */
lu_context_key_degister(&lu_global_key);
/* Dual to lu_global_init(). */
void lu_global_fini(void)
{
- unregister_shrinker(&lu_site_shrinker);
+ shrinker_free(lu_site_shrinker);
lu_context_key_degister(&lu_global_key);
bool osc_page_cache_shrink_enabled = true;
#ifdef HAVE_SHRINKER_COUNT
-static struct shrinker osc_cache_shrinker = {
+static struct ll_shrinker_ops osc_cache_sh_ops = {
.count_objects = osc_cache_shrink_count,
.scan_objects = osc_cache_shrink_scan,
.seeks = DEFAULT_SEEKS,
return osc_cache_shrink_count(shrinker, sc);
}
-static struct shrinker osc_cache_shrinker = {
+static struct ll_shrinker_ops osc_cache_sh_ops = {
.shrink = osc_cache_shrink,
.seeks = DEFAULT_SEEKS,
};
#endif
+static struct shrinker *osc_cache_shrinker;
+
static int __init osc_init(void)
{
unsigned int reqpool_size;
if (rc)
RETURN(rc);
- rc = register_shrinker(&osc_cache_shrinker);
- if (rc)
- GOTO(out_kmem, rc);
+ osc_cache_shrinker = ll_shrinker_create(&osc_cache_sh_ops, 0,
+ "osc_cache");
+ if (IS_ERR(osc_cache_shrinker))
+ GOTO(out_kmem, rc = PTR_ERR(osc_cache_shrinker));
/* This is obviously too much memory, only prevent overflow here */
if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
out_req_pool:
ptlrpc_free_rq_pool(osc_rq_pool);
out_shrinker:
- unregister_shrinker(&osc_cache_shrinker);
+ shrinker_free(osc_cache_shrinker);
out_kmem:
lu_kmem_fini(osc_caches);
class_unregister_type(LUSTRE_OSC_NAME);
ptlrpc_free_rq_pool(osc_rq_pool);
osc_stop_grant_work();
- unregister_shrinker(&osc_cache_shrinker);
+ shrinker_free(osc_cache_shrinker);
lu_kmem_fini(osc_caches);
}
/*
* memory shrinker
*/
- struct shrinker pool_shrinker;
+ struct ll_shrinker_ops epp_shops;
+ struct shrinker *pool_shrinker;
struct mutex add_pages_mutex;
} **page_pools;
GOTO(fail, rc = -ENOMEM);
/* Pass pool number as part of pools_shrinker_seeks value */
#ifdef HAVE_SHRINKER_COUNT
- pool->pool_shrinker.count_objects = enc_pools_shrink_count;
- pool->pool_shrinker.scan_objects = enc_pools_shrink_scan;
+ pool->epp_shops.count_objects = enc_pools_shrink_count;
+ pool->epp_shops.scan_objects = enc_pools_shrink_scan;
#else
- pool->pool_shrinker.shrink = enc_pools_shrink;
+ pool->epp_shops.shrink = enc_pools_shrink;
#endif
- pool->pool_shrinker.seeks = INDEX_TO_SEEKS(pool_index);
+ pool->epp_shops.seeks = INDEX_TO_SEEKS(pool_index);
- rc = register_shrinker(&pool->pool_shrinker);
- if (rc)
- GOTO(fail, rc);
+ pool->pool_shrinker = ll_shrinker_create(&pool->epp_shops, 0,
+ "sptlrpc_enc_pool");
+ if (IS_ERR(pool->pool_shrinker))
+ GOTO(fail, rc = PTR_ERR(pool->pool_shrinker));
mutex_init(&pool->add_pages_mutex);
}
for (pool_index = 0; pool_index <= to_revert; pool_index++) {
pool = page_pools[pool_index];
if (pool) {
- if (pool->epp_pools)
+ if (pool->epp_pools)
enc_pools_free(pool_index);
OBD_FREE(pool, sizeof(**page_pools));
}
for (pool_index = 0; pool_index < POOLS_COUNT; pool_index++) {
pool = page_pools[pool_index];
- unregister_shrinker(&pool->pool_shrinker);
+ shrinker_free(pool->pool_shrinker);
LASSERT(pool->epp_pools);
LASSERT(pool->epp_total_pages == pool->epp_free_pages);