}
LUSTRE_RW_ATTR(grant_shrink_interval);
+static ssize_t enable_page_cache_shrink_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", osc_page_cache_shrink_enabled);
+}
+
+static ssize_t enable_page_cache_shrink_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t count)
+{
+ bool val;
+ int rc;
+
+ rc = kstrtobool(buffer, &val);
+ if (rc)
+ return rc;
+
+ osc_page_cache_shrink_enabled = val;
+ return count;
+}
+LUSTRE_RW_ATTR(enable_page_cache_shrink);
+
static ssize_t checksums_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
static struct attribute *osc_attrs[] = {
&lustre_attr_active.attr,
+ &lustre_attr_enable_page_cache_shrink.attr,
&lustre_attr_checksums.attr,
&lustre_attr_checksum_dump.attr,
&lustre_attr_cur_dirty_bytes.attr,
extern struct list_head osc_shrink_list;
/** spin lock to protect osc_shrink_list */
extern spinlock_t osc_shrink_lock;
+/** Whether enable page cache shrinker */
+extern bool osc_page_cache_shrink_enabled;
+
extern unsigned long osc_cache_shrink_count(struct shrinker *sk,
struct shrink_control *sc);
extern unsigned long osc_cache_shrink_scan(struct shrinker *sk,
struct client_obd *cli;
unsigned long cached = 0;
+ if (!osc_page_cache_shrink_enabled)
+ return 0;
+
spin_lock(&osc_shrink_lock);
list_for_each_entry(cli, &osc_shrink_list, cl_shrink_list)
cached += atomic_long_read(&cli->cl_lru_in_list);
LIST_HEAD(osc_shrink_list);
DEFINE_SPINLOCK(osc_shrink_lock);
+bool osc_page_cache_shrink_enabled = true;
#ifdef HAVE_SHRINKER_COUNT
static struct shrinker osc_cache_shrinker = {