#define DEBUG_SUBSYSTEM S_LDLM
-#ifdef __KERNEL__
-# include <lustre_dlm.h>
-#else
-# include <liblustre.h>
-#endif
-
+#include <lustre_dlm.h>
#include <cl_object.h>
-
#include <obd_class.h>
#include <obd_support.h>
#include "ldlm_internal.h"
*/
#define LDLM_POOL_SLV_SHIFT (10)
-#ifdef __KERNEL__
extern struct proc_dir_entry *ldlm_ns_proc_dir;
-#endif
static inline __u64 dru(__u64 val, __u32 shift, int round_up)
{
(t >> LDLM_POOL_GSP_STEP_SHIFT));
}
+static inline int ldlm_pool_granted(struct ldlm_pool *pl)
+{
+ return atomic_read(&pl->pl_granted);
+}
+
/**
* Recalculates next grant limit on passed \a pl.
*
int granted, grant_step, limit;
limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
+ granted = ldlm_pool_granted(pl);
grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
grant_step = ((limit - granted) * grant_step) / 100;
slv = pl->pl_server_lock_volume;
grant_plan = pl->pl_grant_plan;
limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
+ granted = ldlm_pool_granted(pl);
round_up = granted < limit;
grant_usage = max_t(int, limit - (granted - grant_plan), 1);
{
int grant_plan = pl->pl_grant_plan;
__u64 slv = pl->pl_server_lock_volume;
- int granted = atomic_read(&pl->pl_granted);
+ int granted = ldlm_pool_granted(pl);
int grant_rate = atomic_read(&pl->pl_grant_rate);
int cancel_rate = atomic_read(&pl->pl_cancel_rate);
* VM is asking how many entries may be potentially freed.
*/
if (nr == 0)
- return atomic_read(&pl->pl_granted);
+ return ldlm_pool_granted(pl);
/*
* Client already canceled locks but server is already in shrinker
* and can't cancel anything. Let's catch this race.
*/
- if (atomic_read(&pl->pl_granted) == 0)
+ if (ldlm_pool_granted(pl) == 0)
RETURN(0);
spin_lock(&pl->pl_lock);
unused = ns->ns_nr_unused;
spin_unlock(&ns->ns_lock);
-#ifdef __KERNEL__
if (nr == 0)
return (unused / 100) * sysctl_vfs_cache_pressure;
else
return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
-#else
- return unused - (nr ? ldlm_cancel_lru(ns, nr, LCF_ASYNC,
- LDLM_CANCEL_SHRINK) : 0);
-#endif
}
struct ldlm_pool_ops ldlm_srv_pool_ops = {
pl->pl_recalc_period;
if (recalc_interval_sec <= 0) {
/* Prevent too frequent recalculation. */
- CDEBUG(D_DLMTRACE, "Negative interval(%ld), "
- "too short period(%ld)",
- recalc_interval_sec,
- pl->pl_recalc_period);
recalc_interval_sec = 1;
}
}
EXPORT_SYMBOL(ldlm_pool_setup);
-#ifdef __KERNEL__
static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
{
int granted, grant_rate, cancel_rate, grant_step;
clv = pl->pl_client_lock_volume;
limit = ldlm_pool_get_limit(pl);
grant_plan = pl->pl_grant_plan;
- granted = atomic_read(&pl->pl_granted);
+ granted = ldlm_pool_granted(pl);
grant_rate = atomic_read(&pl->pl_grant_rate);
cancel_rate = atomic_read(&pl->pl_cancel_rate);
grant_speed = grant_rate - cancel_rate;
LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
LDLM_POOL_PROC_WRITER(recalc_period, int);
-static ssize_t lprocfs_recalc_period_seq_write(struct file *file, const char *buf,
+static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
+ const char __user *buf,
size_t len, loff_t *off)
{
struct seq_file *seq = file->private_data;
pl->pl_proc_dir = NULL;
}
}
-#else /* !__KERNEL__*/
-#define ldlm_pool_proc_init(pl) (0)
-#define ldlm_pool_proc_fini(pl) while (0) {}
-#endif
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
int idx, ldlm_side_t client)
}
EXPORT_SYMBOL(ldlm_pool_get_lvf);
-#ifdef __KERNEL__
-static unsigned int ldlm_pool_granted(struct ldlm_pool *pl)
-{
- return atomic_read(&pl->pl_granted);
-}
-
static struct ptlrpc_thread *ldlm_pools_thread;
static struct shrinker *ldlm_pools_srv_shrinker;
static struct shrinker *ldlm_pools_cli_shrinker;
*/
static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
{
- int total = 0, nr_ns;
+ unsigned long total = 0;
+ int nr_ns;
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL; /* loop detection */
void *cookie;
static int ldlm_pools_shrink(ldlm_side_t client, int nr,
gfp_t gfp_mask)
{
- unsigned int total = 0;
+ unsigned long total = 0;
if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
!(gfp_mask & __GFP_FS))
int ldlm_pools_recalc(ldlm_side_t client)
{
- __u32 nr_l = 0, nr_p = 0, l;
+ unsigned long nr_l = 0, nr_p = 0, l;
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL;
int nr, equal = 0;
*/
if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
CWARN("\"Modest\" pools eat out 2/3 of server locks "
- "limit (%d of %lu). This means that you have too "
+ "limit (%lu of %lu). This means that you have too "
"many clients for this amount of server RAM. "
"Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
equal = 1;
ldlm_pools_thread_stop();
}
EXPORT_SYMBOL(ldlm_pools_fini);
-#endif /* __KERNEL__ */
#else /* !HAVE_LRU_RESIZE_SUPPORT */
int ldlm_pool_setup(struct ldlm_pool *pl, int limit)