* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
* This controls the speed of reaching LDLM_POOL_MAX_GSP
* with increasing thread period.
*/
-#define LDLM_POOL_GSP_STEP (4)
+#define LDLM_POOL_GSP_STEP_SHIFT (2)
/*
* LDLM_POOL_GSP% of all locks is default GP.
*/
#define LDLM_POOL_MAX_AGE (36000)
+/*
+ * The granularity of SLV calculation.
+ */
+#define LDLM_POOL_SLV_SHIFT (10)
+
#ifdef __KERNEL__
extern cfs_proc_dir_entry_t *ldlm_ns_proc_dir;
#endif
-#define avg(src, add) \
- ((src) = ((src) + (add)) / 2)
-
-static inline __u64 dru(__u64 val, __u32 div)
+static inline __u64 dru(__u64 val, __u32 shift, int round_up)
{
- __u64 ret = val + (div - 1);
- do_div(ret, div);
- return ret;
+ return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
}
static inline __u64 ldlm_pool_slv_max(__u32 L)
* Allow to have all locks for 1 client for 10 hrs.
* Formula is the following: limit * 10h / 1 client.
*/
- __u64 lim = L * LDLM_POOL_MAX_AGE / 1;
+ __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
return lim;
}
* Calculates suggested grant_step in % of available locks for passed
* \a period. This is later used in grant_plan calculations.
*/
-static inline int ldlm_pool_t2gsp(int t)
+static inline int ldlm_pool_t2gsp(unsigned int t)
{
/*
- * This yeilds 1% grant step for anything below LDLM_POOL_GSP_STEP
+ * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
* and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
*
* How this will affect execution is the following:
*
- * - for thread peroid 1s we will have grant_step 1% which good from
+ * - for thread period 1s we will have grant_step 1% which good from
* pov of taking some load off from server and push it out to clients.
* This is like that because 1% for grant_step means that server will
- * not allow clients to get lots of locks inshort period of time and
+ * not allow clients to get lots of locks in short period of time and
* keep all old locks in their caches. Clients will always have to
* get some locks back if they want to take some new;
*
* plan is reached.
*/
return LDLM_POOL_MAX_GSP -
- (LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) /
- (1 << (t / LDLM_POOL_GSP_STEP));
+ ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
+ (t >> LDLM_POOL_GSP_STEP_SHIFT));
}
/**
int granted, grant_step, limit;
limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
+ granted = cfs_atomic_read(&pl->pl_granted);
grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
grant_step = ((limit - granted) * grant_step) / 100;
*/
static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
{
- int grant_usage, granted, grant_plan;
- __u64 slv, slv_factor;
+ int granted;
+ int grant_plan;
+ int round_up;
+ __u64 slv;
+ __u64 slv_factor;
+ __u64 grant_usage;
__u32 limit;
slv = pl->pl_server_lock_volume;
grant_plan = pl->pl_grant_plan;
limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
+ granted = cfs_atomic_read(&pl->pl_granted);
+ round_up = granted < limit;
- grant_usage = limit - (granted - grant_plan);
- if (grant_usage <= 0)
- grant_usage = 1;
+ grant_usage = max_t(int, limit - (granted - grant_plan), 1);
/*
* Find out SLV change factor which is the ratio of grant usage
* from limit. SLV changes as fast as the ratio of grant plan
- * consumtion. The more locks from grant plan are not consumed
+ * consumption. The more locks from grant plan are not consumed
* by clients in last interval (idle time), the faster grows
* SLV. And the opposite, the more grant plan is over-consumed
* (load time) the faster drops SLV.
*/
- slv_factor = (grant_usage * 100) / limit;
+ slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
+ do_div(slv_factor, limit);
if (2 * abs(granted - limit) > limit) {
slv_factor *= slv_factor;
- slv_factor = dru(slv_factor, 100);
+ slv_factor = dru(slv_factor, LDLM_POOL_SLV_SHIFT, round_up);
}
slv = slv * slv_factor;
- slv = dru(slv, 100);
+ slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
if (slv > ldlm_pool_slv_max(limit)) {
slv = ldlm_pool_slv_max(limit);
{
int grant_plan = pl->pl_grant_plan;
__u64 slv = pl->pl_server_lock_volume;
- int granted = atomic_read(&pl->pl_granted);
- int grant_rate = atomic_read(&pl->pl_grant_rate);
- int cancel_rate = atomic_read(&pl->pl_cancel_rate);
+ int granted = cfs_atomic_read(&pl->pl_granted);
+ int grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+ int cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
slv);
*/
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL);
- write_lock(&obd->obd_pool_lock);
+ cfs_write_lock(&obd->obd_pool_lock);
obd->obd_pool_slv = pl->pl_server_lock_volume;
- write_unlock(&obd->obd_pool_lock);
+ cfs_write_unlock(&obd->obd_pool_lock);
}
/**
time_t recalc_interval_sec;
ENTRY;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec >= pl->pl_recalc_period) {
/*
recalc_interval_sec);
}
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
RETURN(0);
}
/**
* This function is used on server side as main entry point for memory
- * preasure handling. It decreases SLV on \a pl according to passed
+ * pressure handling. It decreases SLV on \a pl according to passed
* \a nr and \a gfp_mask.
*
* Our goal here is to decrease SLV such a way that clients hold \a nr
* VM is asking how many entries may be potentially freed.
*/
if (nr == 0)
- return atomic_read(&pl->pl_granted);
+ return cfs_atomic_read(&pl->pl_granted);
/*
* Client already canceled locks but server is already in shrinker
* and can't cancel anything. Let's catch this race.
*/
- if (atomic_read(&pl->pl_granted) == 0)
+ if (cfs_atomic_read(&pl->pl_granted) == 0)
RETURN(0);
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
/*
- * We want shrinker to possibly cause cancelation of @nr locks from
+ * We want shrinker to possibly cause cancellation of @nr locks from
* clients or grant approximately @nr locks smaller next intervals.
*
- * This is why we decresed SLV by @nr. This effect will only be as
+ * This is why we decreased SLV by @nr. This effect will only be as
* long as one re-calc interval (1s these days) and this should be
* enough to pass this decreased SLV to all clients. On next recalc
* interval pool will either increase SLV if locks load is not high
* Make sure that pool informed obd of last SLV changes.
*/
ldlm_srv_pool_push_slv(pl);
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
/*
* We did not really free any memory here so far, it only will be
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL && obd != LP_POISON);
LASSERT(obd->obd_type != LP_POISON);
- write_lock(&obd->obd_pool_lock);
+ cfs_write_lock(&obd->obd_pool_lock);
obd->obd_pool_limit = limit;
- write_unlock(&obd->obd_pool_lock);
+ cfs_write_unlock(&obd->obd_pool_lock);
ldlm_pool_set_limit(pl, limit);
RETURN(0);
struct obd_device *obd;
/*
- * Get new SLV and Limit from obd which is updated with comming
+ * Get new SLV and Limit from obd which is updated with coming
* RPCs.
*/
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL);
- read_lock(&obd->obd_pool_lock);
+ cfs_read_lock(&obd->obd_pool_lock);
pl->pl_server_lock_volume = obd->obd_pool_slv;
ldlm_pool_set_limit(pl, obd->obd_pool_limit);
- read_unlock(&obd->obd_pool_lock);
+ cfs_read_unlock(&obd->obd_pool_lock);
}
/**
- * Recalculates client sise pool \a pl according to current SLV and Limit.
+ * Recalculates client size pool \a pl according to current SLV and Limit.
*/
static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
ENTRY;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
/*
* Check if we need to recalc lists now.
*/
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) {
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
RETURN(0);
}
pl->pl_recalc_time = cfs_time_current_sec();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
/*
* Do not cancel locks in case lru resize is disabled for this ns.
}
/**
- * This function is main entry point for memory preasure handling on client side.
- * Main goal of this function is to cancel some number of locks on passed \a pl
- * according to \a nr and \a gfp_mask.
+ * This function is main entry point for memory pressure handling on client
+ * side. Main goal of this function is to cancel some number of locks on
+ * passed \a pl according to \a nr and \a gfp_mask.
*/
static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
int nr, unsigned int gfp_mask)
*/
ldlm_cli_pool_pop_slv(pl);
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_unused_lock);
unused = ns->ns_nr_unused;
- spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_unused_lock);
if (nr) {
canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC,
}
#ifdef __KERNEL__
/*
- * Retrun the number of potentially reclaimable locks.
+ * Return the number of potentially reclaimable locks.
*/
return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
#else
time_t recalc_interval_sec;
int count;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec > 0) {
/*
/*
* Zero out all rates and speed for the last period.
*/
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- atomic_set(&pl->pl_grant_speed, 0);
+ cfs_atomic_set(&pl->pl_grant_rate, 0);
+ cfs_atomic_set(&pl->pl_cancel_rate, 0);
+ cfs_atomic_set(&pl->pl_grant_speed, 0);
}
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
if (pl->pl_ops->po_recalc != NULL) {
count = pl->pl_ops->po_recalc(pl);
__u64 slv, clv;
__u32 limit;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
slv = pl->pl_server_lock_volume;
clv = pl->pl_client_lock_volume;
limit = ldlm_pool_get_limit(pl);
grant_plan = pl->pl_grant_plan;
- granted = atomic_read(&pl->pl_granted);
- grant_rate = atomic_read(&pl->pl_grant_rate);
- lvf = atomic_read(&pl->pl_lock_volume_factor);
- grant_speed = atomic_read(&pl->pl_grant_speed);
- cancel_rate = atomic_read(&pl->pl_cancel_rate);
+ granted = cfs_atomic_read(&pl->pl_granted);
+ grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+ lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
+ grant_speed = cfs_atomic_read(&pl->pl_grant_speed);
+ cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
pl->pl_name);
int rc;
ENTRY;
- spin_lock_init(&pl->pl_lock);
- atomic_set(&pl->pl_granted, 0);
+ cfs_spin_lock_init(&pl->pl_lock);
+ cfs_atomic_set(&pl->pl_granted, 0);
pl->pl_recalc_time = cfs_time_current_sec();
- atomic_set(&pl->pl_lock_volume_factor, 1);
+ cfs_atomic_set(&pl->pl_lock_volume_factor, 1);
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- atomic_set(&pl->pl_grant_speed, 0);
+ cfs_atomic_set(&pl->pl_grant_rate, 0);
+ cfs_atomic_set(&pl->pl_cancel_rate, 0);
+ cfs_atomic_set(&pl->pl_grant_speed, 0);
pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
} else {
ldlm_pool_set_limit(pl, 1);
- pl->pl_server_lock_volume = 1;
+ pl->pl_server_lock_volume = 0;
pl->pl_ops = &ldlm_cli_pool_ops;
pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
}
ENTRY;
LDLM_DEBUG(lock, "add lock to pool");
- atomic_inc(&pl->pl_granted);
- atomic_inc(&pl->pl_grant_rate);
- atomic_inc(&pl->pl_grant_speed);
+ cfs_atomic_inc(&pl->pl_granted);
+ cfs_atomic_inc(&pl->pl_grant_rate);
+ cfs_atomic_inc(&pl->pl_grant_speed);
lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
/*
ENTRY;
LDLM_DEBUG(lock, "del lock from pool");
- LASSERT(atomic_read(&pl->pl_granted) > 0);
- atomic_dec(&pl->pl_granted);
- atomic_inc(&pl->pl_cancel_rate);
- atomic_dec(&pl->pl_grant_speed);
+ LASSERT(cfs_atomic_read(&pl->pl_granted) > 0);
+ cfs_atomic_dec(&pl->pl_granted);
+ cfs_atomic_inc(&pl->pl_cancel_rate);
+ cfs_atomic_dec(&pl->pl_grant_speed);
lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
{
__u64 slv;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
slv = pl->pl_server_lock_volume;
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
return slv;
}
EXPORT_SYMBOL(ldlm_pool_get_slv);
*/
void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
{
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
pl->pl_server_lock_volume = slv;
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
}
EXPORT_SYMBOL(ldlm_pool_set_slv);
__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
{
__u64 slv;
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
slv = pl->pl_client_lock_volume;
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
return slv;
}
EXPORT_SYMBOL(ldlm_pool_get_clv);
*/
void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
{
- spin_lock(&pl->pl_lock);
+ cfs_spin_lock(&pl->pl_lock);
pl->pl_client_lock_volume = clv;
- spin_unlock(&pl->pl_lock);
+ cfs_spin_unlock(&pl->pl_lock);
}
EXPORT_SYMBOL(ldlm_pool_set_clv);
*/
__u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
{
- return atomic_read(&pl->pl_limit);
+ return cfs_atomic_read(&pl->pl_limit);
}
EXPORT_SYMBOL(ldlm_pool_get_limit);
*/
void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
{
- atomic_set(&pl->pl_limit, limit);
+ cfs_atomic_set(&pl->pl_limit, limit);
}
EXPORT_SYMBOL(ldlm_pool_set_limit);
*/
__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
{
- return atomic_read(&pl->pl_lock_volume_factor);
+ return cfs_atomic_read(&pl->pl_lock_volume_factor);
}
EXPORT_SYMBOL(ldlm_pool_get_lvf);
#ifdef __KERNEL__
static int ldlm_pool_granted(struct ldlm_pool *pl)
{
- return atomic_read(&pl->pl_granted);
+ return cfs_atomic_read(&pl->pl_granted);
}
static struct ptlrpc_thread *ldlm_pools_thread;
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
-static struct completion ldlm_pools_comp;
+static struct cfs_shrinker *ldlm_pools_srv_shrinker;
+static struct cfs_shrinker *ldlm_pools_cli_shrinker;
+static cfs_completion_t ldlm_pools_comp;
/*
* Cancel \a nr locks from all namespaces (if possible). Returns number of
/*
* Find out how many resources we may release.
*/
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
nr_ns > 0; nr_ns--)
{
- mutex_down(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ if (cfs_list_empty(ldlm_namespace_list(client))) {
+ cfs_mutex_up(ldlm_namespace_lock(client));
cl_env_reexit(cookie);
return 0;
}
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
ldlm_namespace_put(ns, 1);
}
/*
* Shrink at least ldlm_namespace_nr(client) namespaces.
*/
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
nr_ns > 0; nr_ns--)
{
int cancel, nr_locks;
/*
* Do not call shrink under ldlm_namespace_lock(client)
*/
- mutex_down(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ if (cfs_list_empty(ldlm_namespace_list(client))) {
+ cfs_mutex_up(ldlm_namespace_lock(client));
/*
* If list is empty, we can't return any @cached > 0,
* that probably would cause needless shrinker
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
cancel = 1 + nr_locks * nr / total;
/*
* Check all modest namespaces first.
*/
- mutex_down(ldlm_namespace_lock(client));
- list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain)
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
{
if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
continue;
/*
* Set the modest pools limit equal to their avg granted
- * locks + 5%.
+ * locks + ~6%.
*/
- l += dru(l * LDLM_POOLS_MODEST_MARGIN, 100);
+ l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
ldlm_pool_setup(&ns->ns_pool, l);
nr_l += l;
nr_p++;
/*
* The rest is given to greedy namespaces.
*/
- list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain)
+ cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
{
if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
continue;
* for _all_ pools.
*/
l = LDLM_POOL_HOST_L /
- atomic_read(ldlm_namespace_nr(client));
+ cfs_atomic_read(
+ ldlm_namespace_nr(client));
} else {
/*
* All the rest of greedy pools will have
* all locks in equal parts.
*/
l = (LDLM_POOL_HOST_L - nr_l) /
- (atomic_read(ldlm_namespace_nr(client)) -
+ (cfs_atomic_read(
+ ldlm_namespace_nr(client)) -
nr_p);
}
ldlm_pool_setup(&ns->ns_pool, l);
}
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
}
/*
* Recalc at least ldlm_namespace_nr(client) namespaces.
*/
- for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
+ for (nr = cfs_atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
int skip;
/*
* Lock the list, get first @ns in the list, getref, move it
* rid of potential deadlock on client nodes when canceling
* locks synchronously.
*/
- mutex_down(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_down(ldlm_namespace_lock(client));
+ if (cfs_list_empty(ldlm_namespace_list(client))) {
+ cfs_mutex_up(ldlm_namespace_lock(client));
break;
}
ns = ldlm_namespace_first_locked(client);
- spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_hash_lock);
/*
* skip ns which is being freed, and we don't want to increase
* its refcount again, not even temporarily. bz21519.
skip = 0;
ldlm_namespace_get_locked(ns);
}
- spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_hash_lock);
ldlm_namespace_move_locked(ns, client);
- mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_up(ldlm_namespace_lock(client));
/*
* After setup is done - recalc the pool.
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
t_name, cfs_curproc_pid());
- complete_and_exit(&ldlm_pools_comp, 0);
+ cfs_complete_and_exit(&ldlm_pools_comp, 0);
}
static int ldlm_pools_thread_start(void)
if (ldlm_pools_thread == NULL)
RETURN(-ENOMEM);
- init_completion(&ldlm_pools_comp);
+ cfs_init_completion(&ldlm_pools_comp);
cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
/*
* This fixes possible race and oops due to accessing freed memory
* in pools thread.
*/
- wait_for_completion(&ldlm_pools_comp);
+ cfs_wait_for_completion(&ldlm_pools_comp);
OBD_FREE_PTR(ldlm_pools_thread);
ldlm_pools_thread = NULL;
EXIT;
rc = ldlm_pools_thread_start();
if (rc == 0) {
- ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_srv_shrink);
- ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_cli_shrink);
+ ldlm_pools_srv_shrinker =
+ cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+ ldlm_pools_srv_shrink);
+ ldlm_pools_cli_shrinker =
+ cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+ ldlm_pools_cli_shrink);
}
RETURN(rc);
}
void ldlm_pools_fini(void)
{
if (ldlm_pools_srv_shrinker != NULL) {
- remove_shrinker(ldlm_pools_srv_shrinker);
+ cfs_remove_shrinker(ldlm_pools_srv_shrinker);
ldlm_pools_srv_shrinker = NULL;
}
if (ldlm_pools_cli_shrinker != NULL) {
- remove_shrinker(ldlm_pools_cli_shrinker);
+ cfs_remove_shrinker(ldlm_pools_cli_shrinker);
ldlm_pools_cli_shrinker = NULL;
}
ldlm_pools_thread_stop();