* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
* This controls the speed of reaching LDLM_POOL_MAX_GSP
* with increasing thread period.
*/
-#define LDLM_POOL_GSP_STEP (4)
+#define LDLM_POOL_GSP_STEP_SHIFT (2)
/*
* LDLM_POOL_GSP% of all locks is default GP.
*/
#define LDLM_POOL_MAX_AGE (36000)
+/*
+ * The granularity of SLV calculation.
+ */
+#define LDLM_POOL_SLV_SHIFT (10)
+
#ifdef __KERNEL__
extern cfs_proc_dir_entry_t *ldlm_ns_proc_dir;
#endif
-#define avg(src, add) \
- ((src) = ((src) + (add)) / 2)
-
-static inline __u64 dru(__u64 val, __u32 div)
+static inline __u64 dru(__u64 val, __u32 shift, int round_up)
{
- __u64 ret = val + (div - 1);
- do_div(ret, div);
- return ret;
+ return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
}
static inline __u64 ldlm_pool_slv_max(__u32 L)
* Allow to have all locks for 1 client for 10 hrs.
* Formula is the following: limit * 10h / 1 client.
*/
- __u64 lim = L * LDLM_POOL_MAX_AGE / 1;
+ __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
return lim;
}
* Calculates suggested grant_step in % of available locks for passed
* \a period. This is later used in grant_plan calculations.
*/
-static inline int ldlm_pool_t2gsp(int t)
+static inline int ldlm_pool_t2gsp(unsigned int t)
{
/*
- * This yeilds 1% grant step for anything below LDLM_POOL_GSP_STEP
+ * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
* and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
*
* How this will affect execution is the following:
*
- * - for thread peroid 1s we will have grant_step 1% which good from
+ * - for thread period 1s we will have grant_step 1% which good from
* pov of taking some load off from server and push it out to clients.
* This is like that because 1% for grant_step means that server will
- * not allow clients to get lots of locks inshort period of time and
+ * not allow clients to get lots of locks in short period of time and
* keep all old locks in their caches. Clients will always have to
* get some locks back if they want to take some new;
*
* plan is reached.
*/
return LDLM_POOL_MAX_GSP -
- (LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) /
- (1 << (t / LDLM_POOL_GSP_STEP));
+ ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
+ (t >> LDLM_POOL_GSP_STEP_SHIFT));
}
/**
*/
static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
{
- int grant_usage, granted, grant_plan;
- __u64 slv, slv_factor;
+ int granted;
+ int grant_plan;
+ int round_up;
+ __u64 slv;
+ __u64 slv_factor;
+ __u64 grant_usage;
__u32 limit;
slv = pl->pl_server_lock_volume;
grant_plan = pl->pl_grant_plan;
limit = ldlm_pool_get_limit(pl);
granted = cfs_atomic_read(&pl->pl_granted);
+ round_up = granted < limit;
- grant_usage = limit - (granted - grant_plan);
- if (grant_usage <= 0)
- grant_usage = 1;
+ grant_usage = max_t(int, limit - (granted - grant_plan), 1);
/*
* Find out SLV change factor which is the ratio of grant usage
* from limit. SLV changes as fast as the ratio of grant plan
- * consumtion. The more locks from grant plan are not consumed
+ * consumption. The more locks from grant plan are not consumed
* by clients in last interval (idle time), the faster grows
* SLV. And the opposite, the more grant plan is over-consumed
* (load time) the faster drops SLV.
*/
- slv_factor = (grant_usage * 100) / limit;
+ slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
+ do_div(slv_factor, limit);
if (2 * abs(granted - limit) > limit) {
slv_factor *= slv_factor;
- slv_factor = dru(slv_factor, 100);
+ slv_factor = dru(slv_factor, LDLM_POOL_SLV_SHIFT, round_up);
}
slv = slv * slv_factor;
- slv = dru(slv, 100);
+ slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
if (slv > ldlm_pool_slv_max(limit)) {
slv = ldlm_pool_slv_max(limit);
/**
* This function is used on server side as main entry point for memory
- * preasure handling. It decreases SLV on \a pl according to passed
+ * pressure handling. It decreases SLV on \a pl according to passed
* \a nr and \a gfp_mask.
*
* Our goal here is to decrease SLV such a way that clients hold \a nr
cfs_spin_lock(&pl->pl_lock);
/*
- * We want shrinker to possibly cause cancelation of @nr locks from
+ * We want shrinker to possibly cause cancellation of @nr locks from
* clients or grant approximately @nr locks smaller next intervals.
*
- * This is why we decresed SLV by @nr. This effect will only be as
+ * This is why we decreased SLV by @nr. This effect will only be as
* long as one re-calc interval (1s these days) and this should be
* enough to pass this decreased SLV to all clients. On next recalc
* interval pool will either increase SLV if locks load is not high
struct obd_device *obd;
/*
- * Get new SLV and Limit from obd which is updated with comming
+ * Get new SLV and Limit from obd which is updated with coming
* RPCs.
*/
obd = ldlm_pl2ns(pl)->ns_obd;
}
/**
- * Recalculates client sise pool \a pl according to current SLV and Limit.
+ * Recalculates client size pool \a pl according to current SLV and Limit.
*/
static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
{
}
/**
- * This function is main entry point for memory preasure handling on client side.
- * Main goal of this function is to cancel some number of locks on passed \a pl
- * according to \a nr and \a gfp_mask.
+ * This function is main entry point for memory pressure handling on client
+ * side. Main goal of this function is to cancel some number of locks on
+ * passed \a pl according to \a nr and \a gfp_mask.
*/
static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
int nr, unsigned int gfp_mask)
}
#ifdef __KERNEL__
/*
- * Retrun the number of potentially reclaimable locks.
+ * Return the number of potentially reclaimable locks.
*/
return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
#else
pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
} else {
ldlm_pool_set_limit(pl, 1);
- pl->pl_server_lock_volume = 1;
+ pl->pl_server_lock_volume = 0;
pl->pl_ops = &ldlm_cli_pool_ops;
pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
}
/*
* Set the modest pools limit equal to their avg granted
- * locks + 5%.
+ * locks + ~6%.
*/
- l += dru(l * LDLM_POOLS_MODEST_MARGIN, 100);
+ l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
ldlm_pool_setup(&ns->ns_pool, l);
nr_l += l;
nr_p++;