/*
* Shrinker
*/
-# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
- struct shrinker *shrinker, \
- struct shrink_control *sc
-# define shrink_param(sc, var) ((sc)->var)
-
-#ifdef HAVE_SHRINKER_COUNT
-struct shrinker_var {
- unsigned long (*count)(struct shrinker *,
- struct shrink_control *sc);
- unsigned long (*scan)(struct shrinker *,
- struct shrink_control *sc);
-};
-# define DEF_SHRINKER_VAR(name, shrink, count_obj, scan_obj) \
- struct shrinker_var name = { .count = count_obj, .scan = scan_obj }
-#else
-struct shrinker_var {
- int (*shrink)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
-};
-# define DEF_SHRINKER_VAR(name, shrinker, count, scan) \
- struct shrinker_var name = { .shrink = shrinker }
+#ifndef SHRINK_STOP
# define SHRINK_STOP (~0UL)
#endif
-static inline
-struct shrinker *set_shrinker(int seek, struct shrinker_var *var)
-{
- struct shrinker *s;
-
- s = kzalloc(sizeof(*s), GFP_KERNEL);
- if (s == NULL)
- return (NULL);
-
-#ifdef HAVE_SHRINKER_COUNT
- s->count_objects = var->count;
- s->scan_objects = var->scan;
-#else
- s->shrink = var->shrink;
-#endif
- s->seeks = seek;
-
- register_shrinker(s);
-
- return s;
-}
-
-static inline
-void remove_shrinker(struct shrinker *shrinker)
-{
- if (shrinker == NULL)
- return;
-
- unregister_shrinker(shrinker);
- kfree(shrinker);
-}
-
#ifndef HAVE_MMAP_LOCK
static inline void mmap_write_lock(struct mm_struct *mm)
{
]) # LC_KIOCB_KI_LEFT
#
+# LC_REGISTER_SHRINKER_RET
+#
+# v3.11-8748-g1d3d4437eae1 register_shrinker returns a status
+#
+AC_DEFUN([LC_REGISTER_SHRINKER_RET], [
+LB_CHECK_COMPILE([if register_shrinker() returns status],
+register_shrinker_ret, [
+ #include <linux/mm.h>
+],[
+ if (register_shrinker(NULL))
+ unregister_shrinker(NULL);
+],[
+ AC_DEFINE(HAVE_REGISTER_SHRINKER_RET, 1,
+ [register_shrinker() returns status])
+])
+]) # LC_REGISTER_SHRINKER_RET
+
+#
# LC_VFS_RENAME_5ARGS
#
# 3.13 has vfs_rename with 5 args
LC_OLDSIZE_TRUNCATE_PAGECACHE
LC_PTR_ERR_OR_ZERO_MISSING
LC_KIOCB_KI_LEFT
+ LC_REGISTER_SHRINKER_RET
# 3.13
LC_VFS_RENAME_5ARGS
#ifdef HAVE_REGISTER_SHRINKER_FORMAT_NAMED
#define register_shrinker(_s) register_shrinker((_s), "%ps", (_s))
+#elif !defined(HAVE_REGISTER_SHRINKER_RET)
+#define register_shrinker(_s) (register_shrinker(_s), 0)
#endif
#ifndef fallthrough
return atomic_read(&pl->pl_lock_volume_factor);
}
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
-
/*
* count locks from all namespaces (if possible). Returns number of
* cached locks.
sc->gfp_mask);
}
+static struct shrinker ldlm_pools_srv_shrinker = {
+ .count_objects = ldlm_pools_srv_count,
+ .scan_objects = ldlm_pools_srv_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+static struct shrinker ldlm_pools_cli_shrinker = {
+ .count_objects = ldlm_pools_cli_count,
+ .scan_objects = ldlm_pools_cli_scan,
+ .seeks = DEFAULT_SEEKS,
+};
#else
/*
* Cancel \a nr locks from all namespaces (if possible). Returns number of
return ldlm_pools_scan(client, nr, gfp_mask);
}
-static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static int ldlm_pools_srv_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ sc->nr_to_scan, sc->gfp_mask);
}
-static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static int ldlm_pools_cli_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ sc->nr_to_scan, sc->gfp_mask);
}
+static struct shrinker ldlm_pools_srv_shrinker = {
+ .shrink = ldlm_pools_srv_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
+static struct shrinker ldlm_pools_cli_shrinker = {
+ .shrink = ldlm_pools_cli_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
#endif /* HAVE_SHRINKER_COUNT */
static time64_t ldlm_pools_recalc_delay(enum ldlm_side side)
schedule_delayed_work(&ldlm_pools_recalc_work, cfs_time_seconds(delay));
}
+static bool ldlm_pools_init_done;
+
int ldlm_pools_init(void)
{
time64_t delay;
-
- DEF_SHRINKER_VAR(shsvar, ldlm_pools_srv_shrink,
- ldlm_pools_srv_count, ldlm_pools_srv_scan);
- DEF_SHRINKER_VAR(shcvar, ldlm_pools_cli_shrink,
- ldlm_pools_cli_count, ldlm_pools_cli_scan);
+ int rc;
#ifdef HAVE_SERVER_SUPPORT
delay = min(LDLM_POOL_SRV_DEF_RECALC_PERIOD,
delay = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
#endif
- schedule_delayed_work(&ldlm_pools_recalc_work, delay);
- ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS, &shsvar);
- ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS, &shcvar);
+ rc = register_shrinker(&ldlm_pools_srv_shrinker);
+ if (rc)
+ goto out;
+
+ rc = register_shrinker(&ldlm_pools_cli_shrinker);
+ if (rc)
+ goto out_shrinker;
+ schedule_delayed_work(&ldlm_pools_recalc_work, delay);
+ ldlm_pools_init_done = true;
return 0;
+
+out_shrinker:
+ unregister_shrinker(&ldlm_pools_cli_shrinker);
+out:
+ return rc;
}
void ldlm_pools_fini(void)
{
- if (ldlm_pools_srv_shrinker != NULL) {
- remove_shrinker(ldlm_pools_srv_shrinker);
- ldlm_pools_srv_shrinker = NULL;
- }
- if (ldlm_pools_cli_shrinker != NULL) {
- remove_shrinker(ldlm_pools_cli_shrinker);
- ldlm_pools_cli_shrinker = NULL;
+ if (ldlm_pools_init_done) {
+ unregister_shrinker(&ldlm_pools_srv_shrinker);
+ unregister_shrinker(&ldlm_pools_cli_shrinker);
+
+ cancel_delayed_work_sync(&ldlm_pools_recalc_work);
}
- cancel_delayed_work_sync(&ldlm_pools_recalc_work);
+
+ ldlm_pools_init_done = false;
}
#else /* !HAVE_LRU_RESIZE_SUPPORT */
}
EXPORT_SYMBOL(lu_env_find);
-static struct shrinker *lu_site_shrinker;
-
typedef struct lu_site_stats{
unsigned lss_populated;
unsigned lss_max_search;
return sc->nr_to_scan - remain;
}
-#ifndef HAVE_SHRINKER_COUNT
+#ifdef HAVE_SHRINKER_COUNT
+static struct shrinker lu_site_shrinker = {
+ .count_objects = lu_cache_shrink_count,
+ .scan_objects = lu_cache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+#else
/*
* There exists a potential lock inversion deadlock scenario when using
* Lustre on top of ZFS. This occurs between one of ZFS's
* objects without taking the lu_sites_guard lock, but this is not
* possible in the current implementation.
*/
-static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static int lu_cache_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- int cached = 0;
- struct shrink_control scv = {
- .nr_to_scan = shrink_param(sc, nr_to_scan),
- .gfp_mask = shrink_param(sc, gfp_mask)
- };
+ int cached = 0;
- CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan);
+ CDEBUG(D_INODE, "Shrink %lu objects\n", sc->nr_to_scan);
- if (scv.nr_to_scan != 0)
- lu_cache_shrink_scan(shrinker, &scv);
+ if (sc->nr_to_scan != 0)
+ lu_cache_shrink_scan(shrinker, sc);
- cached = lu_cache_shrink_count(shrinker, &scv);
+ cached = lu_cache_shrink_count(shrinker, sc);
return cached;
}
+static struct shrinker lu_site_shrinker = {
+ .shrink = lu_cache_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
#endif /* HAVE_SHRINKER_COUNT */
int lu_global_init(void)
{
int result;
- DEF_SHRINKER_VAR(shvar, lu_cache_shrink,
- lu_cache_shrink_count, lu_cache_shrink_scan);
- CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
+ CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
- result = lu_ref_global_init();
- if (result != 0)
- return result;
+ result = lu_ref_global_init();
+ if (result != 0)
+ return result;
- LU_CONTEXT_KEY_INIT(&lu_global_key);
- result = lu_context_key_register(&lu_global_key);
- if (result != 0)
- return result;
+ LU_CONTEXT_KEY_INIT(&lu_global_key);
+ result = lu_context_key_register(&lu_global_key);
+ if (result)
+ goto out_lu_ref;
- /*
- * At this level, we don't know what tags are needed, so allocate them
- * conservatively. This should not be too bad, because this
- * environment is global.
- */
+ /*
+ * At this level, we don't know what tags are needed, so allocate them
+ * conservatively. This should not be too bad, because this
+ * environment is global.
+ */
down_write(&lu_sites_guard);
- result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
+ result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
up_write(&lu_sites_guard);
- if (result != 0)
- return result;
+ if (result) {
+ lu_context_key_degister(&lu_global_key);
+ goto out_lu_ref;
+ }
- /*
- * seeks estimation: 3 seeks to read a record from oi, one to read
- * inode, one for ea. Unfortunately setting this high value results in
- * lu_object/inode cache consuming all the memory.
- */
- lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar);
- if (lu_site_shrinker == NULL)
- return -ENOMEM;
+ /*
+ * seeks estimation: 3 seeks to read a record from oi, one to read
+ * inode, one for ea. Unfortunately setting this high value results in
+ * lu_object/inode cache consuming all the memory.
+ */
+ result = register_shrinker(&lu_site_shrinker);
+ if (result)
+ goto out_env;
result = rhashtable_init(&lu_env_rhash, &lu_env_rhash_params);
- return result;
+ if (result)
+ goto out_shrinker;
+
+ return result;
+
+out_shrinker:
+ unregister_shrinker(&lu_site_shrinker);
+out_env:
+ /* ordering here is explained in lu_global_fini() */
+ lu_context_key_degister(&lu_global_key);
+ down_write(&lu_sites_guard);
+ lu_env_fini(&lu_shrink_env);
+ up_write(&lu_sites_guard);
+out_lu_ref:
+ lu_ref_global_fini();
+ return result;
}
/**
*/
void lu_global_fini(void)
{
- if (lu_site_shrinker != NULL) {
- remove_shrinker(lu_site_shrinker);
- lu_site_shrinker = NULL;
- }
+ unregister_shrinker(&lu_site_shrinker);
lu_context_key_degister(&lu_global_key);
- /*
- * Tear shrinker environment down _after_ de-registering
- * lu_global_key, because the latter has a value in the former.
- */
+ /*
+ * Tear shrinker environment down _after_ de-registering
+ * lu_global_key, because the latter has a value in the former.
+ */
down_write(&lu_sites_guard);
- lu_env_fini(&lu_shrink_env);
+ lu_env_fini(&lu_shrink_env);
up_write(&lu_sites_guard);
rhashtable_destroy(&lu_env_rhash);
- lu_ref_global_fini();
+ lu_ref_global_fini();
}
static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
.o_quotactl = osc_quotactl,
};
-static struct shrinker *osc_cache_shrinker;
LIST_HEAD(osc_shrink_list);
DEFINE_SPINLOCK(osc_shrink_lock);
-#ifndef HAVE_SHRINKER_COUNT
-static int osc_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+#ifdef HAVE_SHRINKER_COUNT
+static struct shrinker osc_cache_shrinker = {
+ .count_objects = osc_cache_shrink_count,
+ .scan_objects = osc_cache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+#else
+static int osc_cache_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- struct shrink_control scv = {
- .nr_to_scan = shrink_param(sc, nr_to_scan),
- .gfp_mask = shrink_param(sc, gfp_mask)
- };
- (void)osc_cache_shrink_scan(shrinker, &scv);
+ (void)osc_cache_shrink_scan(shrinker, sc);
- return osc_cache_shrink_count(shrinker, &scv);
+ return osc_cache_shrink_count(shrinker, sc);
}
+
+static struct shrinker osc_cache_shrinker = {
+ .shrink = osc_cache_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
#endif
static int __init osc_init(void)
unsigned int reqpool_size;
unsigned int reqsize;
int rc;
- DEF_SHRINKER_VAR(osc_shvar, osc_cache_shrink,
- osc_cache_shrink_count, osc_cache_shrink_scan);
ENTRY;
/* print an address of _any_ initialized kernel symbol from this
if (rc)
GOTO(out_kmem, rc);
- osc_cache_shrinker = set_shrinker(DEFAULT_SEEKS, &osc_shvar);
+ rc = register_shrinker(&osc_cache_shrinker);
+ if (rc)
+ GOTO(out_type, rc);
/* This is obviously too much memory, only prevent overflow here */
if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
- GOTO(out_type, rc = -EINVAL);
+ GOTO(out_shrinker, rc = -EINVAL);
reqpool_size = osc_reqpool_mem_max << 20;
ptlrpc_add_rqs_to_pool);
if (osc_rq_pool == NULL)
- GOTO(out_type, rc = -ENOMEM);
+ GOTO(out_shrinker, rc = -ENOMEM);
rc = osc_start_grant_work();
if (rc != 0)
out_req_pool:
ptlrpc_free_rq_pool(osc_rq_pool);
+out_shrinker:
+ unregister_shrinker(&osc_cache_shrinker);
out_type:
class_unregister_type(LUSTRE_OSC_NAME);
out_kmem:
static void __exit osc_exit(void)
{
osc_stop_grant_work();
- remove_shrinker(osc_cache_shrinker);
+ unregister_shrinker(&osc_cache_shrinker);
class_unregister_type(LUSTRE_OSC_NAME);
lu_kmem_fini(osc_caches);
ptlrpc_free_rq_pool(osc_rq_pool);
/*
* memory shrinker
*/
- struct shrinker *pool_shrinker;
+ struct shrinker pool_shrinker;
struct mutex add_pages_mutex;
} **page_pools;
* could be called frequently for query (@nr_to_scan == 0).
* we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
*/
-static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static int enc_pools_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- struct shrink_control scv = {
- .nr_to_scan = shrink_param(sc, nr_to_scan),
- .gfp_mask = shrink_param(sc, gfp_mask)
- };
- enc_pools_shrink_scan(shrinker, &scv);
+ enc_pools_shrink_scan(shrinker, sc);
- return enc_pools_shrink_count(shrinker, &scv);
+ return enc_pools_shrink_count(shrinker, sc);
}
-
#endif /* HAVE_SHRINKER_COUNT */
static inline
int rc = 0;
struct ptlrpc_enc_page_pool *pool;
- DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
- enc_pools_shrink_count, enc_pools_shrink_scan);
-
ENTRY;
OBD_ALLOC(page_pools, POOLS_COUNT * sizeof(*page_pools));
if (page_pools == NULL)
CDEBUG(D_SEC, "Allocated pool %i\n", pool_index);
if (pool->epp_pools == NULL)
GOTO(fail, rc = -ENOMEM);
+#ifdef HAVE_SHRINKER_COUNT
+ pool->pool_shrinker.count_objects = enc_pools_shrink_count;
+ pool->pool_shrinker.scan_objects = enc_pools_shrink_scan;
+#else
+ pool->pool_shrinker.shrink = enc_pools_shrink;
+#endif
+ pool->pool_shrinker.seeks = INDEX_TO_SEEKS(pool_index);
/* Pass pool number as part of pools_shrinker_seeks value */
- pool->pool_shrinker =
- set_shrinker(INDEX_TO_SEEKS(pool_index), &shvar);
- if (pool->pool_shrinker == NULL)
- GOTO(fail, rc = -ENOMEM);
-
+ rc = register_shrinker(&pool->pool_shrinker);
+ if (rc)
+ GOTO(fail, rc);
mutex_init(&pool->add_pages_mutex);
}
for (pool_index = 0; pool_index <= to_revert; pool_index++) {
pool = page_pools[pool_index];
if (pool) {
- if (pool->pool_shrinker)
- remove_shrinker(pool->pool_shrinker);
+ unregister_shrinker(&pool->pool_shrinker);
if (pool->epp_pools)
enc_pools_free(pool_index);
OBD_FREE(pool, sizeof(**page_pools));
for (pool_index = 0; pool_index < POOLS_COUNT; pool_index++) {
pool = page_pools[pool_index];
- LASSERT(pool->pool_shrinker);
- remove_shrinker(pool->pool_shrinker);
+ unregister_shrinker(&pool->pool_shrinker);
LASSERT(pool->epp_pools);
LASSERT(pool->epp_total_pages == pool->epp_free_pages);