/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (c) 2007 Cluster File Systems, Inc.
- * Author: Yury Umanets <umka@clusterfs.com>
+ * GPL HEADER START
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_pool.c
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
*/
-/* Idea of this code is rather simple. Each second, for each server namespace
+/*
+ * Idea of this code is rather simple. Each second, for each server namespace
* we have SLV - server lock volume which is calculated on current number of
* granted locks, grant speed for past period, etc - that is, locking load.
* This SLV number may be thought as a flow definition for simplicity. It is
* pl_cancel_rate - Number of canceled locks for last T (calculated);
* pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
* pl_grant_plan - Planned number of granted locks for next T (calculated);
- *
- * pl_grant_step - Grant plan step, that is how ->pl_grant_plan
- * will change in next T (tunable);
- *
* pl_server_lock_volume - Current server lock volume (calculated);
*
* As it may be seen from list above, we have few possible tunables which may
# include <lustre_dlm.h>
#else
# include <liblustre.h>
-# include <libcfs/kp30.h>
#endif
+#include <cl_object.h>
+
#include <obd_class.h>
#include <obd_support.h>
#include "ldlm_internal.h"
#ifdef HAVE_LRU_RESIZE_SUPPORT
-/* 50 ldlm locks for 1MB of RAM. */
-#define LDLM_POOL_HOST_L ((num_physpages >> (20 - PAGE_SHIFT)) * 50)
+/*
+ * 50 ldlm locks for 1MB of RAM.
+ */
+#define LDLM_POOL_HOST_L ((num_physpages >> (20 - CFS_PAGE_SHIFT)) * 50)
-/* Default step in % for grant plan. */
-#define LDLM_POOL_GSP (5)
+/*
+ * Maximal possible grant step plan in %.
+ */
+#define LDLM_POOL_MAX_GSP (30)
-/* LDLM_POOL_GSP% of all locks is default GP. */
-#define LDLM_POOL_GP(L) ((L) * LDLM_POOL_GSP / 100)
+/*
+ * Minimal possible grant step plan in %.
+ */
+#define LDLM_POOL_MIN_GSP (1)
-/* Max age for locks on clients. */
+/*
+ * This controls the speed of reaching LDLM_POOL_MAX_GSP
+ * with increasing thread period.
+ */
+#define LDLM_POOL_GSP_STEP (4)
+
+/*
+ * LDLM_POOL_GSP% of all locks is default GP.
+ */
+#define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
+
+/*
+ * Max age for locks on clients.
+ */
#define LDLM_POOL_MAX_AGE (36000)
#ifdef __KERNEL__
static inline __u64 ldlm_pool_slv_max(__u32 L)
{
- /* Allow to have all locks for 1 client for 10 hrs.
- * Formula is the following: limit * 10h / 1 client. */
+ /*
+ * Allow to have all locks for 1 client for 10 hrs.
+ * Formula is the following: limit * 10h / 1 client.
+ */
__u64 lim = L * LDLM_POOL_MAX_AGE / 1;
return lim;
}
}
enum {
- LDLM_POOL_GRANTED_STAT = 0,
+ LDLM_POOL_FIRST_STAT = 0,
+ LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
+ LDLM_POOL_GRANT_STAT,
+ LDLM_POOL_CANCEL_STAT,
LDLM_POOL_GRANT_RATE_STAT,
LDLM_POOL_CANCEL_RATE_STAT,
LDLM_POOL_GRANT_PLAN_STAT,
LDLM_POOL_SLV_STAT,
+ LDLM_POOL_SHRINK_REQTD_STAT,
+ LDLM_POOL_SHRINK_FREED_STAT,
+ LDLM_POOL_RECALC_STAT,
+ LDLM_POOL_TIMING_STAT,
LDLM_POOL_LAST_STAT
};
return container_of(pl, struct ldlm_namespace, ns_pool);
}
-/* Should be called under ->pl_lock taken */
+/**
+ * Calculates suggested grant_step in % of available locks for passed
+ * \a period. This is later used in grant_plan calculations.
+ */
+static inline int ldlm_pool_t2gsp(int t)
+{
+ /*
+ * This yeilds 1% grant step for anything below LDLM_POOL_GSP_STEP
+ * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
+ *
+ * How this will affect execution is the following:
+ *
+ * - for thread peroid 1s we will have grant_step 1% which good from
+ * pov of taking some load off from server and push it out to clients.
+ * This is like that because 1% for grant_step means that server will
+ * not allow clients to get lots of locks inshort period of time and
+ * keep all old locks in their caches. Clients will always have to
+ * get some locks back if they want to take some new;
+ *
+ * - for thread period 10s (which is default) we will have 23% which
+ * means that clients will have enough of room to take some new locks
+ * without getting some back. All locks from this 23% which were not
+ * taken by clients in current period will contribute in SLV growing.
+ * SLV growing means more locks cached on clients until limit or grant
+ * plan is reached.
+ */
+ return LDLM_POOL_MAX_GSP -
+ (LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) /
+ (1 << (t / LDLM_POOL_GSP_STEP));
+}
+
+/**
+ * Recalculates next grant limit on passed \a pl.
+ *
+ * \pre ->pl_lock is locked.
+ */
static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
{
- int grant_plan, granted;
- __u32 limit;
-
+ int granted, grant_step, limit;
+
limit = ldlm_pool_get_limit(pl);
granted = atomic_read(&pl->pl_granted);
- grant_plan = granted + ((limit - granted) *
- atomic_read(&pl->pl_grant_step)) / 100;
- atomic_set(&pl->pl_grant_plan, grant_plan);
+ grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
+ grant_step = ((limit - granted) * grant_step) / 100;
+ pl->pl_grant_plan = granted + grant_step;
}
-/* Should be called under ->pl_lock taken */
+/**
+ * Recalculates next SLV on passed \a pl.
+ *
+ * \pre ->pl_lock is locked.
+ */
static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
{
- int slv_factor, granted, grant_plan;
+ int grant_usage, granted, grant_plan;
+ __u64 slv, slv_factor;
__u32 limit;
- __u64 slv;
- slv = ldlm_pool_get_slv(pl);
+ slv = pl->pl_server_lock_volume;
+ grant_plan = pl->pl_grant_plan;
limit = ldlm_pool_get_limit(pl);
granted = atomic_read(&pl->pl_granted);
- grant_plan = atomic_read(&pl->pl_grant_plan);
- if ((slv_factor = limit - (granted - grant_plan)) <= 0)
- slv_factor = 1;
-
- slv = (slv * ((slv_factor * 100) / limit));
+ grant_usage = limit - (granted - grant_plan);
+ if (grant_usage <= 0)
+ grant_usage = 1;
+
+ /*
+ * Find out SLV change factor which is the ratio of grant usage
+ * from limit. SLV changes as fast as the ratio of grant plan
+ * consumtion. The more locks from grant plan are not consumed
+ * by clients in last interval (idle time), the faster grows
+ * SLV. And the opposite, the more grant plan is over-consumed
+ * (load time) the faster drops SLV.
+ */
+ slv_factor = (grant_usage * 100) / limit;
+ if (2 * abs(granted - limit) > limit) {
+ slv_factor *= slv_factor;
+ slv_factor = dru(slv_factor, 100);
+ }
+ slv = slv * slv_factor;
slv = dru(slv, 100);
if (slv > ldlm_pool_slv_max(limit)) {
slv = ldlm_pool_slv_min(limit);
}
- ldlm_pool_set_slv(pl, slv);
+ pl->pl_server_lock_volume = slv;
}
+/**
+ * Recalculates next stats on passed \a pl.
+ *
+ * \pre ->pl_lock is locked.
+ */
static inline void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
{
- __u64 slv = ldlm_pool_get_slv(pl);
- __u32 granted = atomic_read(&pl->pl_granted);
- __u32 grant_rate = atomic_read(&pl->pl_grant_rate);
- __u32 grant_plan = atomic_read(&pl->pl_grant_plan);
- __u32 cancel_rate = atomic_read(&pl->pl_cancel_rate);
+ int grant_plan = pl->pl_grant_plan;
+ __u64 slv = pl->pl_server_lock_volume;
+ int granted = atomic_read(&pl->pl_granted);
+ int grant_rate = atomic_read(&pl->pl_grant_rate);
+ int cancel_rate = atomic_read(&pl->pl_cancel_rate);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
slv);
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
granted);
cancel_rate);
}
+/**
+ * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
+ */
+static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
+{
+ struct obd_device *obd;
+
+ /*
+ * Set new SLV in obd field for using it later without accessing the
+ * pool. This is required to avoid race between sending reply to client
+ * with new SLV and cleanup server stack in which we can't guarantee
+ * that namespace is still alive. We know only that obd is alive as
+ * long as valid export is alive.
+ */
+ obd = ldlm_pl2ns(pl)->ns_obd;
+ LASSERT(obd != NULL);
+ write_lock(&obd->obd_pool_lock);
+ obd->obd_pool_slv = pl->pl_server_lock_volume;
+ write_unlock(&obd->obd_pool_lock);
+}
+
+/**
+ * Recalculates all pool fields on passed \a pl.
+ *
+ * \pre ->pl_lock is not locked.
+ */
static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
ENTRY;
spin_lock(&pl->pl_lock);
- recalc_interval_sec = cfs_duration_sec(cfs_time_current() -
- pl->pl_update_time);
- if (recalc_interval_sec > 0) {
- /* Update statistics */
- ldlm_pool_recalc_stats(pl);
-
- /* Recalc SLV after last period. This should be done
- * _before_ recalculating new grant plan. */
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec >= pl->pl_recalc_period) {
+ /*
+ * Recalc SLV after last period. This should be done
+ * _before_ recalculating new grant plan.
+ */
ldlm_pool_recalc_slv(pl);
- /* Update grant_plan for new period. */
+ /*
+ * Make sure that pool informed obd of last SLV changes.
+ */
+ ldlm_srv_pool_push_slv(pl);
+
+ /*
+ * Update grant_plan for new period.
+ */
ldlm_pool_recalc_grant_plan(pl);
- pl->pl_update_time = cfs_time_current();
- /* Zero out all rates and speed for the last period. */
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- atomic_set(&pl->pl_grant_speed, 0);
+ pl->pl_recalc_time = cfs_time_current_sec();
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ recalc_interval_sec);
}
+
spin_unlock(&pl->pl_lock);
RETURN(0);
}
-/* Our goal here is to decrease SLV the way to make a client hold
- * @nr locks smaller in next 10h. */
+/**
+ * This function is used on server side as main entry point for memory
+ * preasure handling. It decreases SLV on \a pl according to passed
+ * \a nr and \a gfp_mask.
+ *
+ * Our goal here is to decrease SLV such a way that clients hold \a nr
+ * locks smaller in next 10h.
+ */
static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
int nr, unsigned int gfp_mask)
{
- __u32 granted, limit;
- __u64 slv_delta;
- ENTRY;
+ __u32 limit;
- /* Client already canceled locks but server is already in shrinker and
- * can't cancel anything. Let's catch this race. */
- if ((granted = atomic_read(&pl->pl_granted)) == 0)
+ /*
+ * VM is asking how many entries may be potentially freed.
+ */
+ if (nr == 0)
+ return atomic_read(&pl->pl_granted);
+
+ /*
+ * Client already canceled locks but server is already in shrinker
+ * and can't cancel anything. Let's catch this race.
+ */
+ if (atomic_read(&pl->pl_granted) == 0)
RETURN(0);
spin_lock(&pl->pl_lock);
- /* Simple proportion but it gives impression on how much should be
- * SLV changed for request @nr of locks to be canceled.*/
- slv_delta = nr * ldlm_pool_get_slv(pl);
- limit = ldlm_pool_get_limit(pl);
- do_div(slv_delta, granted);
-
- /* As SLV has some dependence on historical data, that is new value
- * is based on old one, this decreasing will make clients get some
- * locks back to the server and after some time it will stabilize.*/
- if (slv_delta < ldlm_pool_get_slv(pl))
- ldlm_pool_set_slv(pl, ldlm_pool_get_slv(pl) - slv_delta);
- else
- ldlm_pool_set_slv(pl, ldlm_pool_slv_min(limit));
+ /*
+ * We want shrinker to possibly cause cancelation of @nr locks from
+ * clients or grant approximately @nr locks smaller next intervals.
+ *
+ * This is why we decresed SLV by @nr. This effect will only be as
+ * long as one re-calc interval (1s these days) and this should be
+ * enough to pass this decreased SLV to all clients. On next recalc
+ * interval pool will either increase SLV if locks load is not high
+ * or will keep on same level or even decrease again, thus, shrinker
+ * decreased SLV will affect next recalc intervals and this way will
+ * make locking load lower.
+ */
+ if (nr < pl->pl_server_lock_volume) {
+ pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
+ } else {
+ limit = ldlm_pool_get_limit(pl);
+ pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
+ }
+
+ /*
+ * Make sure that pool informed obd of last SLV changes.
+ */
+ ldlm_srv_pool_push_slv(pl);
spin_unlock(&pl->pl_lock);
- /* We did not really free any memory here so far, it only will be
- * freed later may be, so that we return 0 to not confuse VM. */
+ /*
+ * We did not really free any memory here so far, it only will be
+ * freed later may be, so that we return 0 to not confuse VM.
+ */
+ return 0;
+}
+
+/**
+ * Setup server side pool \a pl with passed \a limit.
+ */
+static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
+{
+ struct obd_device *obd;
+ ENTRY;
+
+ obd = ldlm_pl2ns(pl)->ns_obd;
+ LASSERT(obd != NULL && obd != LP_POISON);
+ LASSERT(obd->obd_type != LP_POISON);
+ write_lock(&obd->obd_pool_lock);
+ obd->obd_pool_limit = limit;
+ write_unlock(&obd->obd_pool_lock);
+
+ ldlm_pool_set_limit(pl, limit);
RETURN(0);
}
+/**
+ * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
+ */
+static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
+{
+ struct obd_device *obd;
+
+ /*
+ * Get new SLV and Limit from obd which is updated with comming
+ * RPCs.
+ */
+ obd = ldlm_pl2ns(pl)->ns_obd;
+ LASSERT(obd != NULL);
+ read_lock(&obd->obd_pool_lock);
+ pl->pl_server_lock_volume = obd->obd_pool_slv;
+ ldlm_pool_set_limit(pl, obd->obd_pool_limit);
+ read_unlock(&obd->obd_pool_lock);
+}
+
+/**
+ * Recalculates client sise pool \a pl according to current SLV and Limit.
+ */
static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
ENTRY;
spin_lock(&pl->pl_lock);
+ /*
+ * Check if we need to recalc lists now.
+ */
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period) {
+ spin_unlock(&pl->pl_lock);
+ RETURN(0);
+ }
- recalc_interval_sec = cfs_duration_sec(cfs_time_current() -
- pl->pl_update_time);
- if (recalc_interval_sec > 0) {
- /* Update statistics only every T */
- ldlm_pool_recalc_stats(pl);
+ /*
+ * Make sure that pool knows last SLV and Limit from obd.
+ */
+ ldlm_cli_pool_pop_slv(pl);
- /* Zero out grant/cancel rates and speed for last period. */
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- atomic_set(&pl->pl_grant_speed, 0);
- }
+ pl->pl_recalc_time = cfs_time_current_sec();
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ recalc_interval_sec);
spin_unlock(&pl->pl_lock);
- /* Recalc client pool is done without taking into account pl_update_time
- * as this may be called voluntary in the case of emergency. Client
- * recalc does not calculate anything, we do not risk to have skew
- * of some pool param. */
- ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_ASYNC);
- RETURN(0);
+ /*
+ * Do not cancel locks in case lru resize is disabled for this ns.
+ */
+ if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
+ RETURN(0);
+
+ /*
+ * In the time of canceling locks on client we do not need to maintain
+ * sharp timing, we only want to cancel locks asap according to new SLV.
+ * It may be called when SLV has changed much, this is why we do not
+ * take into account pl->pl_recalc_time here.
+ */
+ RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_SYNC,
+ LDLM_CANCEL_LRUR));
}
+/**
+ * This function is main entry point for memory preasure handling on client side.
+ * Main goal of this function is to cancel some number of locks on passed \a pl
+ * according to \a nr and \a gfp_mask.
+ */
static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
int nr, unsigned int gfp_mask)
{
- ENTRY;
- RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), nr, LDLM_SYNC));
+ struct ldlm_namespace *ns;
+ int canceled = 0, unused;
+
+ ns = ldlm_pl2ns(pl);
+
+ /*
+ * Do not cancel locks in case lru resize is disabled for this ns.
+ */
+ if (!ns_connect_lru_resize(ns))
+ RETURN(0);
+
+ /*
+ * Make sure that pool knows last SLV and Limit from obd.
+ */
+ ldlm_cli_pool_pop_slv(pl);
+
+ spin_lock(&ns->ns_unused_lock);
+ unused = ns->ns_nr_unused;
+ spin_unlock(&ns->ns_unused_lock);
+
+ if (nr) {
+ canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC,
+ LDLM_CANCEL_SHRINK);
+ }
+#ifdef __KERNEL__
+ /*
+ * Retrun the number of potentially reclaimable locks.
+ */
+ return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
+#else
+ return unused - canceled;
+#endif
}
+struct ldlm_pool_ops ldlm_srv_pool_ops = {
+ .po_recalc = ldlm_srv_pool_recalc,
+ .po_shrink = ldlm_srv_pool_shrink,
+ .po_setup = ldlm_srv_pool_setup
+};
+
+struct ldlm_pool_ops ldlm_cli_pool_ops = {
+ .po_recalc = ldlm_cli_pool_recalc,
+ .po_shrink = ldlm_cli_pool_shrink
+};
+
+/**
+ * Pool recalc wrapper. Will call either client or server pool recalc callback
+ * depending what pool \a pl is used.
+ */
int ldlm_pool_recalc(struct ldlm_pool *pl)
{
- if (pl->pl_recalc != NULL && pool_recalc_enabled(pl))
- return pl->pl_recalc(pl);
+ time_t recalc_interval_sec;
+ int count;
+
+ spin_lock(&pl->pl_lock);
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec > 0) {
+ /*
+ * Update pool statistics every 1s.
+ */
+ ldlm_pool_recalc_stats(pl);
+
+ /*
+ * Zero out all rates and speed for the last period.
+ */
+ atomic_set(&pl->pl_grant_rate, 0);
+ atomic_set(&pl->pl_cancel_rate, 0);
+ atomic_set(&pl->pl_grant_speed, 0);
+ }
+ spin_unlock(&pl->pl_lock);
+
+ if (pl->pl_ops->po_recalc != NULL) {
+ count = pl->pl_ops->po_recalc(pl);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
+ count);
+ return count;
+ }
+
return 0;
}
EXPORT_SYMBOL(ldlm_pool_recalc);
+/**
+ * Pool shrink wrapper. Will call either client or server pool recalc callback
+ * depending what pool \a pl is used.
+ */
int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
unsigned int gfp_mask)
{
- if (pl->pl_shrink != NULL && pool_shrink_enabled(pl)) {
- CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks\n",
- pl->pl_name, nr);
- return pl->pl_shrink(pl, nr, gfp_mask);
+ int cancel = 0;
+
+ if (pl->pl_ops->po_shrink != NULL) {
+ cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
+ if (nr > 0) {
+ lprocfs_counter_add(pl->pl_stats,
+ LDLM_POOL_SHRINK_REQTD_STAT,
+ nr);
+ lprocfs_counter_add(pl->pl_stats,
+ LDLM_POOL_SHRINK_FREED_STAT,
+ cancel);
+ CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
+ "shrunk %d\n", pl->pl_name, nr, cancel);
+ }
}
- return 0;
+ return cancel;
}
EXPORT_SYMBOL(ldlm_pool_shrink);
-/* The purpose of this function is to re-setup limit and maximal allowed
- * slv according to the passed limit. */
-int ldlm_pool_setup(struct ldlm_pool *pl, __u32 limit)
+/**
+ * Pool setup wrapper. Will call either client or server pool recalc callback
+ * depending what pool \a pl is used.
+ *
+ * Sets passed \a limit into pool \a pl.
+ */
+int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
{
ENTRY;
- if (ldlm_pl2ns(pl)->ns_client == LDLM_NAMESPACE_SERVER) {
- spin_lock(&pl->pl_lock);
- ldlm_pool_set_limit(pl, limit);
- spin_unlock(&pl->pl_lock);
- }
+ if (pl->pl_ops->po_setup != NULL)
+ RETURN(pl->pl_ops->po_setup(pl, limit));
RETURN(0);
}
EXPORT_SYMBOL(ldlm_pool_setup);
static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- __u32 granted, grant_rate, cancel_rate, grant_step;
- int nr = 0, grant_speed, grant_plan;
+ int granted, grant_rate, cancel_rate, grant_step;
+ int nr = 0, grant_speed, grant_plan, lvf;
struct ldlm_pool *pl = data;
+ __u64 slv, clv;
__u32 limit;
- __u64 slv;
spin_lock(&pl->pl_lock);
- slv = ldlm_pool_get_slv(pl);
+ slv = pl->pl_server_lock_volume;
+ clv = pl->pl_client_lock_volume;
limit = ldlm_pool_get_limit(pl);
+ grant_plan = pl->pl_grant_plan;
granted = atomic_read(&pl->pl_granted);
grant_rate = atomic_read(&pl->pl_grant_rate);
- grant_plan = atomic_read(&pl->pl_grant_plan);
- grant_step = atomic_read(&pl->pl_grant_step);
+ lvf = atomic_read(&pl->pl_lock_volume_factor);
grant_speed = atomic_read(&pl->pl_grant_speed);
cancel_rate = atomic_read(&pl->pl_cancel_rate);
+ grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
spin_unlock(&pl->pl_lock);
nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
pl->pl_name);
nr += snprintf(page + nr, count - nr, " SLV: "LPU64"\n", slv);
-
- if (ldlm_pl2ns(pl)->ns_client == LDLM_NAMESPACE_CLIENT) {
- nr += snprintf(page + nr, count - nr, " LVF: %d\n",
- atomic_read(&pl->pl_lock_volume_factor));
+ nr += snprintf(page + nr, count - nr, " CLV: "LPU64"\n", clv);
+ nr += snprintf(page + nr, count - nr, " LVF: %d\n", lvf);
+
+ if (ns_is_server(ldlm_pl2ns(pl))) {
+ nr += snprintf(page + nr, count - nr, " GSP: %d%%\n",
+ grant_step);
+ nr += snprintf(page + nr, count - nr, " GP: %d\n",
+ grant_plan);
}
- nr += snprintf(page + nr, count - nr, " GSP: %d%%\n",
- grant_step);
- nr += snprintf(page + nr, count - nr, " GP: %d\n",
- grant_plan);
nr += snprintf(page + nr, count - nr, " GR: %d\n",
grant_rate);
nr += snprintf(page + nr, count - nr, " CR: %d\n",
return nr;
}
+LDLM_POOL_PROC_READER(grant_plan, int);
+LDLM_POOL_PROC_READER(recalc_period, int);
+LDLM_POOL_PROC_WRITER(recalc_period, int);
+
static int ldlm_pool_proc_init(struct ldlm_pool *pl)
{
struct ldlm_namespace *ns = ldlm_pl2ns(pl);
pool_vars[0].read_fptr = lprocfs_rd_atomic;
lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
- snprintf(var_name, MAX_STRING_SIZE, "control");
- pool_vars[0].data = &pl->pl_control;
- pool_vars[0].read_fptr = lprocfs_rd_uint;
- pool_vars[0].write_fptr = lprocfs_wr_uint;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
snprintf(var_name, MAX_STRING_SIZE, "grant_speed");
pool_vars[0].data = &pl->pl_grant_speed;
pool_vars[0].read_fptr = lprocfs_rd_atomic;
lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
snprintf(var_name, MAX_STRING_SIZE, "grant_plan");
- pool_vars[0].data = &pl->pl_grant_plan;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
+ pool_vars[0].data = pl;
+ pool_vars[0].read_fptr = lprocfs_rd_grant_plan;
lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
- snprintf(var_name, MAX_STRING_SIZE, "grant_step");
- pool_vars[0].data = &pl->pl_grant_step;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
- if (ns->ns_client == LDLM_NAMESPACE_SERVER)
- pool_vars[0].write_fptr = lprocfs_wr_atomic;
+ snprintf(var_name, MAX_STRING_SIZE, "recalc_period");
+ pool_vars[0].data = pl;
+ pool_vars[0].read_fptr = lprocfs_rd_recalc_period;
+ pool_vars[0].write_fptr = lprocfs_wr_recalc_period;
lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
- if (ns->ns_client == LDLM_NAMESPACE_CLIENT) {
- snprintf(var_name, MAX_STRING_SIZE, "lock_volume_factor");
- pool_vars[0].data = &pl->pl_lock_volume_factor;
- pool_vars[0].read_fptr = lprocfs_rd_uint;
- pool_vars[0].write_fptr = lprocfs_wr_uint;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
- }
+ snprintf(var_name, MAX_STRING_SIZE, "lock_volume_factor");
+ pool_vars[0].data = &pl->pl_lock_volume_factor;
+ pool_vars[0].read_fptr = lprocfs_rd_atomic;
+ pool_vars[0].write_fptr = lprocfs_wr_atomic;
+ lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
snprintf(var_name, MAX_STRING_SIZE, "state");
pool_vars[0].data = pl;
lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
- LDLM_POOL_GRANTED_STAT);
+ LDLM_POOL_FIRST_STAT, 0);
if (!pl->pl_stats)
GOTO(out_free_name, rc = -ENOMEM);
lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
"granted", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "grant", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "cancel", "locks");
lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
"grant_rate", "locks/s");
lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
"slv", "slv");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "shrink_request", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "shrink_freed", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "recalc_freed", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "recalc_timing", "sec");
lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
EXIT;
spin_lock_init(&pl->pl_lock);
atomic_set(&pl->pl_granted, 0);
- pl->pl_update_time = cfs_time_current();
+ pl->pl_recalc_time = cfs_time_current_sec();
atomic_set(&pl->pl_lock_volume_factor, 1);
atomic_set(&pl->pl_grant_rate, 0);
atomic_set(&pl->pl_cancel_rate, 0);
atomic_set(&pl->pl_grant_speed, 0);
- pl->pl_control = LDLM_POOL_CTL_FULL;
- atomic_set(&pl->pl_grant_step, LDLM_POOL_GSP);
- atomic_set(&pl->pl_grant_plan, LDLM_POOL_GP(LDLM_POOL_HOST_L));
+ pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
ns->ns_name, idx);
if (client == LDLM_NAMESPACE_SERVER) {
- pl->pl_recalc = ldlm_srv_pool_recalc;
- pl->pl_shrink = ldlm_srv_pool_shrink;
+ pl->pl_ops = &ldlm_srv_pool_ops;
ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
- ldlm_pool_set_slv(pl, ldlm_pool_slv_max(LDLM_POOL_HOST_L));
+ pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
+ pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
} else {
- ldlm_pool_set_slv(pl, 1);
ldlm_pool_set_limit(pl, 1);
- pl->pl_recalc = ldlm_cli_pool_recalc;
- pl->pl_shrink = ldlm_cli_pool_shrink;
+ pl->pl_server_lock_volume = 1;
+ pl->pl_ops = &ldlm_cli_pool_ops;
+ pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
}
-
+ pl->pl_client_lock_volume = 0;
rc = ldlm_pool_proc_init(pl);
if (rc)
RETURN(rc);
{
ENTRY;
ldlm_pool_proc_fini(pl);
- pl->pl_recalc = NULL;
- pl->pl_shrink = NULL;
+
+ /*
+ * Pool should not be used after this point. We can't free it here as
+ * it lives in struct ldlm_namespace, but still interested in catching
+ * any abnormal using cases.
+ */
+ POISON(pl, 0x5a, sizeof(*pl));
EXIT;
}
EXPORT_SYMBOL(ldlm_pool_fini);
+/**
+ * Add new taken ldlm lock \a lock into pool \a pl accounting.
+ */
void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
{
+ /*
+ * FLOCK locks are special in a sense that they are almost never
+ * cancelled, instead special kind of lock is used to drop them.
+ * also there is no LRU for flock locks, so no point in tracking
+ * them anyway.
+ */
+ if (lock->l_resource->lr_type == LDLM_FLOCK)
+ return;
ENTRY;
+
+ LDLM_DEBUG(lock, "add lock to pool");
atomic_inc(&pl->pl_granted);
atomic_inc(&pl->pl_grant_rate);
atomic_inc(&pl->pl_grant_speed);
- /* No need to recalc client pools here as this is already done
- * on enqueue/cancel and locks to cancel already packed to the
- * rpc. */
- if (ldlm_pl2ns(pl)->ns_client == LDLM_NAMESPACE_SERVER)
+ lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
+ /*
+ * Do not do pool recalc for client side as all locks which
+ * potentially may be canceled has already been packed into
+ * enqueue/cancel rpc. Also we do not want to run out of stack
+ * with too long call paths.
+ */
+ if (ns_is_server(ldlm_pl2ns(pl)))
ldlm_pool_recalc(pl);
EXIT;
}
EXPORT_SYMBOL(ldlm_pool_add);
+/**
+ * Remove ldlm lock \a lock from pool \a pl accounting.
+ */
void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
{
+ /*
+ * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
+ */
+ if (lock->l_resource->lr_type == LDLM_FLOCK)
+ return;
ENTRY;
+
+ LDLM_DEBUG(lock, "del lock from pool");
LASSERT(atomic_read(&pl->pl_granted) > 0);
atomic_dec(&pl->pl_granted);
atomic_inc(&pl->pl_cancel_rate);
atomic_dec(&pl->pl_grant_speed);
-
- /* Same as in ldlm_pool_add() */
- if (ldlm_pl2ns(pl)->ns_client == LDLM_NAMESPACE_SERVER)
+
+ lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
+
+ if (ns_is_server(ldlm_pl2ns(pl)))
ldlm_pool_recalc(pl);
EXIT;
}
EXPORT_SYMBOL(ldlm_pool_del);
-/* ->pl_lock should be taken. */
+/**
+ * Returns current \a pl SLV.
+ *
+ * \pre ->pl_lock is not locked.
+ */
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
{
- return pl->pl_server_lock_volume;
+ __u64 slv;
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_server_lock_volume;
+ spin_unlock(&pl->pl_lock);
+ return slv;
}
EXPORT_SYMBOL(ldlm_pool_get_slv);
-/* ->pl_lock should be taken. */
+/**
+ * Sets passed \a slv to \a pl.
+ *
+ * \pre ->pl_lock is not locked.
+ */
void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
{
+ spin_lock(&pl->pl_lock);
pl->pl_server_lock_volume = slv;
+ spin_unlock(&pl->pl_lock);
}
EXPORT_SYMBOL(ldlm_pool_set_slv);
+/**
+ * Returns current \a pl CLV.
+ *
+ * \pre ->pl_lock is not locked.
+ */
+__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
+{
+ __u64 slv;
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_client_lock_volume;
+ spin_unlock(&pl->pl_lock);
+ return slv;
+}
+EXPORT_SYMBOL(ldlm_pool_get_clv);
+
+/**
+ * Sets passed \a clv to \a pl.
+ *
+ * \pre ->pl_lock is not locked.
+ */
+void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
+{
+ spin_lock(&pl->pl_lock);
+ pl->pl_client_lock_volume = clv;
+ spin_unlock(&pl->pl_lock);
+}
+EXPORT_SYMBOL(ldlm_pool_set_clv);
+
+/**
+ * Returns current \a pl limit.
+ */
__u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
{
return atomic_read(&pl->pl_limit);
}
EXPORT_SYMBOL(ldlm_pool_get_limit);
+/**
+ * Sets passed \a limit to \a pl.
+ */
void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
{
atomic_set(&pl->pl_limit, limit);
}
EXPORT_SYMBOL(ldlm_pool_set_limit);
-/* Server side is only enabled for kernel space for now. */
+/**
+ * Returns current LVF from \a pl.
+ */
+__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
+{
+ return atomic_read(&pl->pl_lock_volume_factor);
+}
+EXPORT_SYMBOL(ldlm_pool_get_lvf);
+
#ifdef __KERNEL__
static int ldlm_pool_granted(struct ldlm_pool *pl)
{
static struct shrinker *ldlm_pools_cli_shrinker;
static struct completion ldlm_pools_comp;
-void ldlm_pools_wakeup(void)
-{
- ENTRY;
- if (ldlm_pools_thread == NULL)
- return;
- ldlm_pools_thread->t_flags |= SVC_EVENT;
- cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
- EXIT;
-}
-EXPORT_SYMBOL(ldlm_pools_wakeup);
-
-/* Cancel @nr locks from all namespaces (if possible). Returns number of
+/*
+ * Cancel \a nr locks from all namespaces (if possible). Returns number of
* cached locks after shrink is finished. All namespaces are asked to
- * cancel approximately equal amount of locks. */
-static int ldlm_pools_shrink(ldlm_side_t client, int nr,
+ * cancel approximately equal amount of locks to keep balancing.
+ */
+static int ldlm_pools_shrink(ldlm_side_t client, int nr,
unsigned int gfp_mask)
{
int total = 0, cached = 0, nr_ns;
struct ldlm_namespace *ns;
+ void *cookie;
if (nr != 0 && !(gfp_mask & __GFP_FS))
return -1;
- CDEBUG(D_DLMTRACE, "request to shrink %d %s locks from all pools\n",
+ CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
- /* Find out how many resources we may release. */
- mutex_down(ldlm_namespace_lock(client));
- list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain)
- total += ldlm_pool_granted(&ns->ns_pool);
- mutex_up(ldlm_namespace_lock(client));
+ cookie = cl_env_reenter();
+
+ /*
+ * Find out how many resources we may release.
+ */
+ for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ nr_ns > 0; nr_ns--)
+ {
+ mutex_down(ldlm_namespace_lock(client));
+ if (list_empty(ldlm_namespace_list(client))) {
+ mutex_up(ldlm_namespace_lock(client));
+ cl_env_reexit(cookie);
+ return 0;
+ }
+ ns = ldlm_namespace_first_locked(client);
+ ldlm_namespace_get(ns);
+ ldlm_namespace_move_locked(ns, client);
+ mutex_up(ldlm_namespace_lock(client));
+ total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
+ ldlm_namespace_put(ns, 1);
+ }
- if (nr == 0 || total == 0)
+ if (nr == 0 || total == 0) {
+ cl_env_reexit(cookie);
return total;
+ }
- /* Shrink at least ldlm_namespace_nr(client) namespaces. */
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
- nr_ns > 0; nr_ns--)
+ /*
+ * Shrink at least ldlm_namespace_nr(client) namespaces.
+ */
+ for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ nr_ns > 0; nr_ns--)
{
int cancel, nr_locks;
- /* Do not call shrink under ldlm_namespace_lock(client) */
+ /*
+ * Do not call shrink under ldlm_namespace_lock(client)
+ */
mutex_down(ldlm_namespace_lock(client));
if (list_empty(ldlm_namespace_list(client))) {
mutex_up(ldlm_namespace_lock(client));
- /* If list is empty, we can't return any @cached > 0,
+ /*
+ * If list is empty, we can't return any @cached > 0,
* that probably would cause needless shrinker
- * call. */
+ * call.
+ */
cached = 0;
break;
}
- ns = ldlm_namespace_first(client);
+ ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
- ldlm_namespace_move(ns, client);
+ ldlm_namespace_move_locked(ns, client);
mutex_up(ldlm_namespace_lock(client));
-
+
nr_locks = ldlm_pool_granted(&ns->ns_pool);
cancel = 1 + nr_locks * nr / total;
ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
cached += ldlm_pool_granted(&ns->ns_pool);
ldlm_namespace_put(ns, 1);
}
+ cl_env_reexit(cookie);
return cached;
}
{
__u32 nr_l = 0, nr_p = 0, l;
struct ldlm_namespace *ns;
- int rc, nr, equal = 0;
+ int nr, equal = 0;
- /* Check all modest namespaces. */
- mutex_down(ldlm_namespace_lock(client));
- list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) {
- if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
- continue;
+ /*
+ * No need to setup pool limit for client pools.
+ */
+ if (client == LDLM_NAMESPACE_SERVER) {
+ /*
+ * Check all modest namespaces first.
+ */
+ mutex_down(ldlm_namespace_lock(client));
+ list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
+ {
+ if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
+ continue;
- if (client == LDLM_NAMESPACE_SERVER) {
l = ldlm_pool_granted(&ns->ns_pool);
if (l == 0)
l = 1;
- /* Set the modest pools limit equal to their avg granted
- * locks + 5%. */
+ /*
+ * Set the modest pools limit equal to their avg granted
+ * locks + 5%.
+ */
l += dru(l * LDLM_POOLS_MODEST_MARGIN, 100);
ldlm_pool_setup(&ns->ns_pool, l);
nr_l += l;
nr_p++;
}
- }
- /* Make sure that modest namespaces did not eat more that 2/3 of limit */
- if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
- CWARN("Modest pools eat out 2/3 of locks limit. %d of %lu. "
- "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
- equal = 1;
- }
+ /*
+ * Make sure that modest namespaces did not eat more that 2/3
+ * of limit.
+ */
+ if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
+ CWARN("\"Modest\" pools eat out 2/3 of server locks "
+ "limit (%d of %lu). This means that you have too "
+ "many clients for this amount of server RAM. "
+ "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
+ equal = 1;
+ }
- /* The rest is given to greedy namespaces. */
- list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) {
- if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
- continue;
+ /*
+ * The rest is given to greedy namespaces.
+ */
+ list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
+ {
+ if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
+ continue;
- if (client == LDLM_NAMESPACE_SERVER) {
if (equal) {
- /* In the case 2/3 locks are eaten out by
+ /*
+ * In the case 2/3 locks are eaten out by
* modest pools, we re-setup equal limit
- * for _all_ pools. */
+ * for _all_ pools.
+ */
l = LDLM_POOL_HOST_L /
atomic_read(ldlm_namespace_nr(client));
} else {
- /* All the rest of greedy pools will have
- * all locks in equal parts.*/
+ /*
+ * All the rest of greedy pools will have
+ * all locks in equal parts.
+ */
l = (LDLM_POOL_HOST_L - nr_l) /
(atomic_read(ldlm_namespace_nr(client)) -
nr_p);
}
ldlm_pool_setup(&ns->ns_pool, l);
}
+ mutex_up(ldlm_namespace_lock(client));
}
- mutex_up(ldlm_namespace_lock(client));
- /* Recalc at least ldlm_namespace_nr(client) namespaces. */
+ /*
+ * Recalc at least ldlm_namespace_nr(client) namespaces.
+ */
for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
- /* Lock the list, get first @ns in the list, getref, move it
+ /*
+ * Lock the list, get first @ns in the list, getref, move it
* to the tail, unlock and call pool recalc. This way we avoid
* calling recalc under @ns lock what is really good as we get
* rid of potential deadlock on client nodes when canceling
- * locks synchronously. */
+ * locks synchronously.
+ */
mutex_down(ldlm_namespace_lock(client));
if (list_empty(ldlm_namespace_list(client))) {
mutex_up(ldlm_namespace_lock(client));
break;
}
- ns = ldlm_namespace_first(client);
+ ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
- ldlm_namespace_move(ns, client);
+ ldlm_namespace_move_locked(ns, client);
mutex_up(ldlm_namespace_lock(client));
- /* After setup is done - recalc the pool. */
- rc = ldlm_pool_recalc(&ns->ns_pool);
- if (rc)
- CERROR("%s: pool recalculation error "
- "%d\n", ns->ns_pool.pl_name, rc);
-
+ /*
+ * After setup is done - recalc the pool.
+ */
+ ldlm_pool_recalc(&ns->ns_pool);
ldlm_namespace_put(ns, 1);
}
}
while (1) {
struct l_wait_info lwi;
- /* Recal all pools on this tick. */
- ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
+ /*
+ * Recal all pools on this tick.
+ */
ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
-
- /* Wait until the next check time, or until we're
- * stopped. */
+ ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
+
+ /*
+ * Wait until the next check time, or until we're
+ * stopped.
+ */
lwi = LWI_TIMEOUT(cfs_time_seconds(LDLM_POOLS_THREAD_PERIOD),
NULL, NULL);
l_wait_event(thread->t_ctl_waitq, (thread->t_flags &
complete_and_exit(&ldlm_pools_comp, 0);
}
-static int ldlm_pools_thread_start(ldlm_side_t client)
+static int ldlm_pools_thread_start(void)
{
struct l_wait_info lwi = { 0 };
int rc;
if (ldlm_pools_thread == NULL)
RETURN(-ENOMEM);
- ldlm_pools_thread->t_id = client;
init_completion(&ldlm_pools_comp);
cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
- /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
- * just drop the VM and FILES in ptlrpc_daemonize() right away. */
+ /*
+ * CLONE_VM and CLONE_FILES just avoid a needless copy, because we
+ * just drop the VM and FILES in ptlrpc_daemonize() right away.
+ */
rc = cfs_kernel_thread(ldlm_pools_thread_main, ldlm_pools_thread,
CLONE_VM | CLONE_FILES);
if (rc < 0) {
ldlm_pools_thread->t_flags = SVC_STOPPING;
cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
- /* Make sure that pools thread is finished before freeing @thread.
+ /*
+ * Make sure that pools thread is finished before freeing @thread.
* This fixes possible race and oops due to accessing freed memory
- * in pools thread. */
+ * in pools thread.
+ */
wait_for_completion(&ldlm_pools_comp);
OBD_FREE_PTR(ldlm_pools_thread);
ldlm_pools_thread = NULL;
EXIT;
}
-int ldlm_pools_init(ldlm_side_t client)
+int ldlm_pools_init(void)
{
int rc;
ENTRY;
- rc = ldlm_pools_thread_start(client);
+ rc = ldlm_pools_thread_start();
if (rc == 0) {
ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS,
ldlm_pools_srv_shrink);
#endif /* __KERNEL__ */
#else /* !HAVE_LRU_RESIZE_SUPPORT */
-int ldlm_pool_setup(struct ldlm_pool *pl, __u32 limit)
+int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
{
return 0;
}
}
EXPORT_SYMBOL(ldlm_pool_set_slv);
+__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
+{
+ return 1;
+}
+EXPORT_SYMBOL(ldlm_pool_get_clv);
+
+void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
+{
+ return;
+}
+EXPORT_SYMBOL(ldlm_pool_set_clv);
+
__u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
{
return 0;
}
EXPORT_SYMBOL(ldlm_pool_set_limit);
-int ldlm_pools_init(ldlm_side_t client)
+__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
{
return 0;
}
-EXPORT_SYMBOL(ldlm_pools_init);
+EXPORT_SYMBOL(ldlm_pool_get_lvf);
-void ldlm_pools_fini(void)
+int ldlm_pools_init(void)
{
- return;
+ return 0;
}
-EXPORT_SYMBOL(ldlm_pools_fini);
+EXPORT_SYMBOL(ldlm_pools_init);
-void ldlm_pools_wakeup(void)
+void ldlm_pools_fini(void)
{
return;
}
-EXPORT_SYMBOL(ldlm_pools_wakeup);
+EXPORT_SYMBOL(ldlm_pools_fini);
void ldlm_pools_recalc(ldlm_side_t client)
{