int nr, unsigned int gfp_mask)
{
__u32 limit;
- ENTRY;
/*
* VM is asking how many entries may be potentially freed.
*/
if (nr == 0)
- RETURN(atomic_read(&pl->pl_granted));
+ return atomic_read(&pl->pl_granted);
/*
* Client already canceled locks but server is already in shrinker
* We did not really free any memory here so far, it only will be
* freed later may be, so that we return 0 to not confuse VM.
*/
- RETURN(0);
+ return 0;
}
/**
* It may be called when SLV has changed much, this is why we do not
* take into account pl->pl_recalc_time here.
*/
- RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_ASYNC,
+ RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_SYNC,
LDLM_CANCEL_LRUR));
}
static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
int nr, unsigned int gfp_mask)
{
- ENTRY;
+ struct ldlm_namespace *ns;
+ int canceled = 0, unused;
+
+ ns = ldlm_pl2ns(pl);
/*
* Do not cancel locks in case lru resize is disabled for this ns.
*/
- if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
+ if (!ns_connect_lru_resize(ns))
RETURN(0);
/*
*/
ldlm_cli_pool_pop_slv(pl);
+ spin_lock(&ns->ns_unused_lock);
+ unused = ns->ns_nr_unused;
+ spin_unlock(&ns->ns_unused_lock);
+
+ if (nr) {
+ canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC,
+ LDLM_CANCEL_SHRINK);
+ }
+#ifdef __KERNEL__
/*
- * Find out how many locks may be released according to shrink
- * policy.
- */
- if (nr == 0)
- RETURN(ldlm_cancel_lru_estimate(ldlm_pl2ns(pl), 0, 0,
- LDLM_CANCEL_SHRINK));
-
- /*
- * Cancel @nr locks accoding to shrink policy.
+ * Retrun the number of potentially reclaimable locks.
*/
- RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), nr, LDLM_SYNC,
- LDLM_CANCEL_SHRINK));
+ return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
+#else
+ return unused - canceled;
+#endif
}
struct ldlm_pool_ops ldlm_srv_pool_ops = {