/*
* Shrinker
*/
-# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
- struct shrinker *shrinker, \
- struct shrink_control *sc
-# define shrink_param(sc, var) ((sc)->var)
-
-#ifdef HAVE_SHRINKER_COUNT
-struct shrinker_var {
- unsigned long (*count)(struct shrinker *,
- struct shrink_control *sc);
- unsigned long (*scan)(struct shrinker *,
- struct shrink_control *sc);
-};
-# define DEF_SHRINKER_VAR(name, shrink, count_obj, scan_obj) \
- struct shrinker_var name = { .count = count_obj, .scan = scan_obj }
-#else
-struct shrinker_var {
- int (*shrink)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
-};
-# define DEF_SHRINKER_VAR(name, shrinker, count, scan) \
- struct shrinker_var name = { .shrink = shrinker }
+#ifndef SHRINK_STOP
# define SHRINK_STOP (~0UL)
#endif
-static inline
-struct shrinker *set_shrinker(int seek, struct shrinker_var *var)
-{
- struct shrinker *s;
-
- s = kzalloc(sizeof(*s), GFP_KERNEL);
- if (s == NULL)
- return (NULL);
-
-#ifdef HAVE_SHRINKER_COUNT
- s->count_objects = var->count;
- s->scan_objects = var->scan;
-#else
- s->shrink = var->shrink;
-#endif
- s->seeks = seek;
-
- register_shrinker(s);
-
- return s;
-}
-
-static inline
-void remove_shrinker(struct shrinker *shrinker)
-{
- if (shrinker == NULL)
- return;
-
- unregister_shrinker(shrinker);
- kfree(shrinker);
-}
-
#ifndef HAVE_MMAP_LOCK
static inline void mmap_write_lock(struct mm_struct *mm)
{
return atomic_read(&pl->pl_lock_volume_factor);
}
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
-
/*
* count locks from all namespaces (if possible). Returns number of
* cached locks.
sc->gfp_mask);
}
+static struct shrinker ldlm_pools_srv_shrinker = {
+ .count_objects = ldlm_pools_srv_count,
+ .scan_objects = ldlm_pools_srv_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+static struct shrinker ldlm_pools_cli_shrinker = {
+ .count_objects = ldlm_pools_cli_count,
+ .scan_objects = ldlm_pools_cli_scan,
+ .seeks = DEFAULT_SEEKS,
+};
#else
/*
* Cancel \a nr locks from all namespaces (if possible). Returns number of
return ldlm_pools_scan(client, nr, gfp_mask);
}
-static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static int ldlm_pools_srv_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ sc->nr_to_scan, sc->gfp_mask);
}
-static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static int ldlm_pools_cli_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ sc->nr_to_scan, sc->gfp_mask);
}
+static struct shrinker ldlm_pools_srv_shrinker = {
+ .shrink = ldlm_pools_srv_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
+static struct shrinker ldlm_pools_cli_shrinker = {
+ .shrink = ldlm_pools_cli_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
#endif /* HAVE_SHRINKER_COUNT */
static time64_t ldlm_pools_recalc_delay(enum ldlm_side side)
{
time64_t delay;
- DEF_SHRINKER_VAR(shsvar, ldlm_pools_srv_shrink,
- ldlm_pools_srv_count, ldlm_pools_srv_scan);
- DEF_SHRINKER_VAR(shcvar, ldlm_pools_cli_shrink,
- ldlm_pools_cli_count, ldlm_pools_cli_scan);
-
#ifdef HAVE_SERVER_SUPPORT
delay = min(LDLM_POOL_SRV_DEF_RECALC_PERIOD,
LDLM_POOL_CLI_DEF_RECALC_PERIOD);
#endif
schedule_delayed_work(&ldlm_pools_recalc_work, delay);
- ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS, &shsvar);
- ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS, &shcvar);
+ register_shrinker(&ldlm_pools_srv_shrinker);
+ register_shrinker(&ldlm_pools_cli_shrinker);
return 0;
}
void ldlm_pools_fini(void)
{
- if (ldlm_pools_srv_shrinker != NULL) {
- remove_shrinker(ldlm_pools_srv_shrinker);
- ldlm_pools_srv_shrinker = NULL;
- }
- if (ldlm_pools_cli_shrinker != NULL) {
- remove_shrinker(ldlm_pools_cli_shrinker);
- ldlm_pools_cli_shrinker = NULL;
- }
+ unregister_shrinker(&ldlm_pools_srv_shrinker);
+ unregister_shrinker(&ldlm_pools_cli_shrinker);
+
cancel_delayed_work_sync(&ldlm_pools_recalc_work);
}
}
EXPORT_SYMBOL(lu_env_find);
-static struct shrinker *lu_site_shrinker;
-
typedef struct lu_site_stats{
unsigned lss_populated;
unsigned lss_max_search;
return sc->nr_to_scan - remain;
}
-#ifndef HAVE_SHRINKER_COUNT
+#ifdef HAVE_SHRINKER_COUNT
+static struct shrinker lu_site_shrinker = {
+ .count_objects = lu_cache_shrink_count,
+ .scan_objects = lu_cache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+#else
/*
* There exists a potential lock inversion deadlock scenario when using
* Lustre on top of ZFS. This occurs between one of ZFS's
* objects without taking the lu_sites_guard lock, but this is not
* possible in the current implementation.
*/
-static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static int lu_cache_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- int cached = 0;
- struct shrink_control scv = {
- .nr_to_scan = shrink_param(sc, nr_to_scan),
- .gfp_mask = shrink_param(sc, gfp_mask)
- };
+ int cached = 0;
- CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan);
+ CDEBUG(D_INODE, "Shrink %lu objects\n", sc->nr_to_scan);
- if (scv.nr_to_scan != 0)
- lu_cache_shrink_scan(shrinker, &scv);
+ if (sc->nr_to_scan != 0)
+ lu_cache_shrink_scan(shrinker, sc);
- cached = lu_cache_shrink_count(shrinker, &scv);
+ cached = lu_cache_shrink_count(shrinker, sc);
return cached;
}
+static struct shrinker lu_site_shrinker = {
+ .shrink = lu_cache_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
#endif /* HAVE_SHRINKER_COUNT */
int lu_global_init(void)
{
int result;
- DEF_SHRINKER_VAR(shvar, lu_cache_shrink,
- lu_cache_shrink_count, lu_cache_shrink_scan);
- CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
+ CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
- result = lu_ref_global_init();
- if (result != 0)
- return result;
+ result = lu_ref_global_init();
+ if (result != 0)
+ return result;
- LU_CONTEXT_KEY_INIT(&lu_global_key);
- result = lu_context_key_register(&lu_global_key);
- if (result != 0)
- return result;
+ LU_CONTEXT_KEY_INIT(&lu_global_key);
+ result = lu_context_key_register(&lu_global_key);
+ if (result != 0)
+ return result;
- /*
- * At this level, we don't know what tags are needed, so allocate them
- * conservatively. This should not be too bad, because this
- * environment is global.
- */
+ /*
+ * At this level, we don't know what tags are needed, so allocate them
+ * conservatively. This should not be too bad, because this
+ * environment is global.
+ */
down_write(&lu_sites_guard);
- result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
+ result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
up_write(&lu_sites_guard);
- if (result != 0)
- return result;
+ if (result != 0)
+ return result;
- /*
- * seeks estimation: 3 seeks to read a record from oi, one to read
- * inode, one for ea. Unfortunately setting this high value results in
- * lu_object/inode cache consuming all the memory.
- */
- lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar);
- if (lu_site_shrinker == NULL)
- return -ENOMEM;
+ /*
+ * seeks estimation: 3 seeks to read a record from oi, one to read
+ * inode, one for ea. Unfortunately setting this high value results in
+ * lu_object/inode cache consuming all the memory.
+ */
+ register_shrinker(&lu_site_shrinker);
result = rhashtable_init(&lu_env_rhash, &lu_env_rhash_params);
- return result;
+ return result;
}
/**
*/
void lu_global_fini(void)
{
- if (lu_site_shrinker != NULL) {
- remove_shrinker(lu_site_shrinker);
- lu_site_shrinker = NULL;
- }
+ unregister_shrinker(&lu_site_shrinker);
lu_context_key_degister(&lu_global_key);
- /*
- * Tear shrinker environment down _after_ de-registering
- * lu_global_key, because the latter has a value in the former.
- */
+ /*
+ * Tear shrinker environment down _after_ de-registering
+ * lu_global_key, because the latter has a value in the former.
+ */
down_write(&lu_sites_guard);
- lu_env_fini(&lu_shrink_env);
+ lu_env_fini(&lu_shrink_env);
up_write(&lu_sites_guard);
rhashtable_destroy(&lu_env_rhash);
- lu_ref_global_fini();
+ lu_ref_global_fini();
}
static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
.o_quotactl = osc_quotactl,
};
-static struct shrinker *osc_cache_shrinker;
LIST_HEAD(osc_shrink_list);
DEFINE_SPINLOCK(osc_shrink_lock);
-#ifndef HAVE_SHRINKER_COUNT
-static int osc_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+#ifdef HAVE_SHRINKER_COUNT
+static struct shrinker osc_cache_shrinker = {
+ .count_objects = osc_cache_shrink_count,
+ .scan_objects = osc_cache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+#else
+static int osc_cache_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- struct shrink_control scv = {
- .nr_to_scan = shrink_param(sc, nr_to_scan),
- .gfp_mask = shrink_param(sc, gfp_mask)
- };
- (void)osc_cache_shrink_scan(shrinker, &scv);
+ (void)osc_cache_shrink_scan(shrinker, sc);
- return osc_cache_shrink_count(shrinker, &scv);
+ return osc_cache_shrink_count(shrinker, sc);
}
+
+static struct shrinker osc_cache_shrinker = {
+ .shrink = osc_cache_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
#endif
static int __init osc_init(void)
unsigned int reqpool_size;
unsigned int reqsize;
int rc;
- DEF_SHRINKER_VAR(osc_shvar, osc_cache_shrink,
- osc_cache_shrink_count, osc_cache_shrink_scan);
ENTRY;
/* print an address of _any_ initialized kernel symbol from this
if (rc)
GOTO(out_kmem, rc);
- osc_cache_shrinker = set_shrinker(DEFAULT_SEEKS, &osc_shvar);
+ register_shrinker(&osc_cache_shrinker);
/* This is obviously too much memory, only prevent overflow here */
if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
static void __exit osc_exit(void)
{
osc_stop_grant_work();
- remove_shrinker(osc_cache_shrinker);
+ unregister_shrinker(&osc_cache_shrinker);
class_unregister_type(LUSTRE_OSC_NAME);
lu_kmem_fini(osc_caches);
ptlrpc_free_rq_pool(osc_rq_pool);
} page_pools;
/*
- * memory shrinker
- */
-static const int pools_shrinker_seeks = DEFAULT_SEEKS;
-static struct shrinker *pools_shrinker;
-
-
-/*
* /proc/fs/lustre/sptlrpc/encrypt_page_pools
*/
int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
return sc->nr_to_scan;
}
-#ifndef HAVE_SHRINKER_COUNT
+#ifdef HAVE_SHRINKER_COUNT
+static struct shrinker pools_shrinker = {
+ .count_objects = enc_pools_shrink_count,
+ .scan_objects = enc_pools_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+#else
/*
* could be called frequently for query (@nr_to_scan == 0).
* we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
*/
-static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static int enc_pools_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- struct shrink_control scv = {
- .nr_to_scan = shrink_param(sc, nr_to_scan),
- .gfp_mask = shrink_param(sc, gfp_mask)
- };
- enc_pools_shrink_scan(shrinker, &scv);
+ enc_pools_shrink_scan(shrinker, sc);
- return enc_pools_shrink_count(shrinker, &scv);
+ return enc_pools_shrink_count(shrinker, sc);
}
+static struct shrinker pools_shrinker = {
+ .shrink = enc_pools_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
#endif /* HAVE_SHRINKER_COUNT */
static inline
int sptlrpc_enc_pool_init(void)
{
- DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
- enc_pools_shrink_count, enc_pools_shrink_scan);
-
page_pools.epp_max_pages = cfs_totalram_pages() / 8;
if (enc_pool_max_memory_mb > 0 &&
enc_pool_max_memory_mb <= (cfs_totalram_pages() >> mult))
if (page_pools.epp_pools == NULL)
return -ENOMEM;
- pools_shrinker = set_shrinker(pools_shrinker_seeks, &shvar);
- if (pools_shrinker == NULL) {
- enc_pools_free();
- return -ENOMEM;
- }
+ register_shrinker(&pools_shrinker);
return 0;
}
{
unsigned long cleaned, npools;
- LASSERT(pools_shrinker);
LASSERT(page_pools.epp_pools);
LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
- remove_shrinker(pools_shrinker);
+ unregister_shrinker(&pools_shrinker);
npools = npages_to_npools(page_pools.epp_total_pages);
cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);