* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
*
* \pre ->pl_lock is locked.
*/
-static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
+static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
{
int granted, grant_step, limit;
grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
grant_step = ((limit - granted) * grant_step) / 100;
pl->pl_grant_plan = granted + grant_step;
+ limit = (limit * 5) >> 2;
+ if (pl->pl_grant_plan > limit)
+ pl->pl_grant_plan = limit;
}
/**
*
* \pre ->pl_lock is locked.
*/
-static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
+static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
{
int granted;
int grant_plan;
*/
slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
do_div(slv_factor, limit);
- if (2 * abs(granted - limit) > limit) {
- slv_factor *= slv_factor;
- slv_factor = dru(slv_factor, LDLM_POOL_SLV_SHIFT, round_up);
- }
slv = slv * slv_factor;
slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
*
* \pre ->pl_lock is locked.
*/
-static inline void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
+static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
{
int grant_plan = pl->pl_grant_plan;
__u64 slv = pl->pl_server_lock_volume;
time_t recalc_interval_sec;
ENTRY;
- cfs_spin_lock(&pl->pl_lock);
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
- if (recalc_interval_sec >= pl->pl_recalc_period) {
- /*
- * Recalc SLV after last period. This should be done
- * _before_ recalculating new grant plan.
- */
- ldlm_pool_recalc_slv(pl);
+ if (recalc_interval_sec < pl->pl_recalc_period)
+ RETURN(0);
- /*
- * Make sure that pool informed obd of last SLV changes.
- */
- ldlm_srv_pool_push_slv(pl);
+ cfs_spin_lock(&pl->pl_lock);
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period) {
+ cfs_spin_unlock(&pl->pl_lock);
+ RETURN(0);
+ }
+ /*
+ * Recalc SLV after last period. This should be done
+ * _before_ recalculating new grant plan.
+ */
+ ldlm_pool_recalc_slv(pl);
- /*
- * Update grant_plan for new period.
- */
- ldlm_pool_recalc_grant_plan(pl);
+ /*
+ * Make sure that pool informed obd of last SLV changes.
+ */
+ ldlm_srv_pool_push_slv(pl);
- pl->pl_recalc_time = cfs_time_current_sec();
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
- recalc_interval_sec);
- }
+ /*
+ * Update grant_plan for new period.
+ */
+ ldlm_pool_recalc_grant_plan(pl);
+ pl->pl_recalc_time = cfs_time_current_sec();
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ recalc_interval_sec);
cfs_spin_unlock(&pl->pl_lock);
RETURN(0);
}
static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
{
struct obd_device *obd;
- ENTRY;
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL && obd != LP_POISON);
cfs_write_unlock(&obd->obd_pool_lock);
ldlm_pool_set_limit(pl, limit);
- RETURN(0);
+ return 0;
}
/**
time_t recalc_interval_sec;
ENTRY;
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period)
+ RETURN(0);
+
cfs_spin_lock(&pl->pl_lock);
/*
* Check if we need to recalc lists now.
time_t recalc_interval_sec;
int count;
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec <= 0)
+ goto recalc;
+
cfs_spin_lock(&pl->pl_lock);
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec > 0) {
*/
cfs_atomic_set(&pl->pl_grant_rate, 0);
cfs_atomic_set(&pl->pl_cancel_rate, 0);
- cfs_atomic_set(&pl->pl_grant_speed, 0);
}
cfs_spin_unlock(&pl->pl_lock);
+ recalc:
if (pl->pl_ops->po_recalc != NULL) {
count = pl->pl_ops->po_recalc(pl);
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
*/
int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
{
- ENTRY;
if (pl->pl_ops->po_setup != NULL)
- RETURN(pl->pl_ops->po_setup(pl, limit));
- RETURN(0);
+ return(pl->pl_ops->po_setup(pl, limit));
+ return 0;
}
EXPORT_SYMBOL(ldlm_pool_setup);
grant_plan = pl->pl_grant_plan;
granted = cfs_atomic_read(&pl->pl_granted);
grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
- lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
- grant_speed = cfs_atomic_read(&pl->pl_grant_speed);
cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
+ grant_speed = grant_rate - cancel_rate;
+ lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
cfs_spin_unlock(&pl->pl_lock);
return nr;
}
+static int lprocfs_rd_grant_speed(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ldlm_pool *pl = data;
+ int grant_speed;
+
+ cfs_spin_lock(&pl->pl_lock);
+ /* serialize with ldlm_pool_recalc */
+ grant_speed = cfs_atomic_read(&pl->pl_grant_rate) -
+ cfs_atomic_read(&pl->pl_cancel_rate);
+ cfs_spin_unlock(&pl->pl_lock);
+ return lprocfs_rd_uint(page, start, off, count, eof, &grant_speed);
+}
+
LDLM_POOL_PROC_READER(grant_plan, int);
LDLM_POOL_PROC_READER(recalc_period, int);
LDLM_POOL_PROC_WRITER(recalc_period, int);
lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
snprintf(var_name, MAX_STRING_SIZE, "grant_speed");
- pool_vars[0].data = &pl->pl_grant_speed;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
+ pool_vars[0].data = pl;
+ pool_vars[0].read_fptr = lprocfs_rd_grant_speed;
lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
snprintf(var_name, MAX_STRING_SIZE, "cancel_rate");
cfs_atomic_set(&pl->pl_grant_rate, 0);
cfs_atomic_set(&pl->pl_cancel_rate, 0);
- cfs_atomic_set(&pl->pl_grant_speed, 0);
pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
*/
if (lock->l_resource->lr_type == LDLM_FLOCK)
return;
- ENTRY;
- LDLM_DEBUG(lock, "add lock to pool");
cfs_atomic_inc(&pl->pl_granted);
cfs_atomic_inc(&pl->pl_grant_rate);
- cfs_atomic_inc(&pl->pl_grant_speed);
-
lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
/*
* Do not do pool recalc for client side as all locks which
*/
if (ns_is_server(ldlm_pl2ns(pl)))
ldlm_pool_recalc(pl);
- EXIT;
}
EXPORT_SYMBOL(ldlm_pool_add);
*/
if (lock->l_resource->lr_type == LDLM_FLOCK)
return;
- ENTRY;
- LDLM_DEBUG(lock, "del lock from pool");
LASSERT(cfs_atomic_read(&pl->pl_granted) > 0);
cfs_atomic_dec(&pl->pl_granted);
cfs_atomic_inc(&pl->pl_cancel_rate);
- cfs_atomic_dec(&pl->pl_grant_speed);
lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
if (ns_is_server(ldlm_pl2ns(pl)))
ldlm_pool_recalc(pl);
- EXIT;
}
EXPORT_SYMBOL(ldlm_pool_del);
struct ldlm_namespace *ns;
void *cookie;
- if (nr != 0 && !(gfp_mask & __GFP_FS))
+ if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
+ !(gfp_mask & __GFP_FS))
return -1;
CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",