-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
*
- * Copyright (c) 2007 Cluster File Systems, Inc.
- * Author: Yury Umanets <umka@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_pool.c
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
*/
-/* Idea of this code is rather simple. Each second, for each server namespace
+/*
+ * Idea of this code is rather simple. Each second, for each server namespace
* we have SLV - server lock volume which is calculated on current number of
* granted locks, grant speed for past period, etc - that is, locking load.
* This SLV number may be thought as a flow definition for simplicity. It is
* pl_cancel_rate - Number of canceled locks for last T (calculated);
* pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
* pl_grant_plan - Planned number of granted locks for next T (calculated);
- *
- * pl_grant_step - Grant plan step, that is how ->pl_grant_plan
- * will change in next T (tunable);
- *
* pl_server_lock_volume - Current server lock volume (calculated);
*
* As it may be seen from list above, we have few possible tunables which may
#define DEBUG_SUBSYSTEM S_LDLM
-#ifdef __KERNEL__
-# include <lustre_dlm.h>
-#else
-# include <liblustre.h>
-# include <libcfs/kp30.h>
-#endif
-
+#include <lustre_dlm.h>
+#include <cl_object.h>
#include <obd_class.h>
#include <obd_support.h>
#include "ldlm_internal.h"
#ifdef HAVE_LRU_RESIZE_SUPPORT
-/* 50 ldlm locks for 1MB of RAM. */
-#define LDLM_POOL_HOST_L ((num_physpages >> (20 - PAGE_SHIFT)) * 50)
+/*
+ * 50 ldlm locks for 1MB of RAM.
+ */
+#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
+
+/*
+ * Maximal possible grant step plan in %.
+ */
+#define LDLM_POOL_MAX_GSP (30)
+
+/*
+ * Minimal possible grant step plan in %.
+ */
+#define LDLM_POOL_MIN_GSP (1)
-/* Default step in % for grant plan. */
-#define LDLM_POOL_GSP (5)
+/*
+ * This controls the speed of reaching LDLM_POOL_MAX_GSP
+ * with increasing thread period.
+ */
+#define LDLM_POOL_GSP_STEP_SHIFT (2)
-/* LDLM_POOL_GSP% of all locks is default GP. */
-#define LDLM_POOL_GP(L) ((L) * LDLM_POOL_GSP / 100)
+/*
+ * LDLM_POOL_GSP% of all locks is default GP.
+ */
+#define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
-/* Max age for locks on clients. */
+/*
+ * Max age for locks on clients.
+ */
#define LDLM_POOL_MAX_AGE (36000)
-#ifdef __KERNEL__
-extern cfs_proc_dir_entry_t *ldlm_ns_proc_dir;
-#endif
+/*
+ * The granularity of SLV calculation.
+ */
+#define LDLM_POOL_SLV_SHIFT (10)
-#define avg(src, add) \
- ((src) = ((src) + (add)) / 2)
+extern struct proc_dir_entry *ldlm_ns_proc_dir;
-static inline __u64 dru(__u64 val, __u32 div)
+static inline __u64 dru(__u64 val, __u32 shift, int round_up)
{
- __u64 ret = val + (div - 1);
- do_div(ret, div);
- return ret;
+ return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
}
static inline __u64 ldlm_pool_slv_max(__u32 L)
{
- /* Allow to have all locks for 1 client for 10 hrs.
- * Formula is the following: limit * 10h / 1 client. */
- __u64 lim = L * LDLM_POOL_MAX_AGE / 1;
+ /*
+ * Allow to have all locks for 1 client for 10 hrs.
+ * Formula is the following: limit * 10h / 1 client.
+ */
+ __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
return lim;
}
}
enum {
- LDLM_POOL_GRANTED_STAT = 0,
+ LDLM_POOL_FIRST_STAT = 0,
+ LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
+ LDLM_POOL_GRANT_STAT,
+ LDLM_POOL_CANCEL_STAT,
LDLM_POOL_GRANT_RATE_STAT,
LDLM_POOL_CANCEL_RATE_STAT,
LDLM_POOL_GRANT_PLAN_STAT,
LDLM_POOL_SLV_STAT,
+ LDLM_POOL_SHRINK_REQTD_STAT,
+ LDLM_POOL_SHRINK_FREED_STAT,
+ LDLM_POOL_RECALC_STAT,
+ LDLM_POOL_TIMING_STAT,
LDLM_POOL_LAST_STAT
};
return container_of(pl, struct ldlm_namespace, ns_pool);
}
-/* Should be called under ->pl_lock taken */
-static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
+/**
+ * Calculates suggested grant_step in % of available locks for passed
+ * \a period. This is later used in grant_plan calculations.
+ */
+static inline int ldlm_pool_t2gsp(unsigned int t)
{
- int grant_plan, granted;
- __u32 limit;
-
- limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
+ /*
+ * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
+ * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
+ *
+ * How this will affect execution is the following:
+ *
+ * - for thread period 1s we will have grant_step 1% which good from
+ * pov of taking some load off from server and push it out to clients.
+ * This is like that because 1% for grant_step means that server will
+ * not allow clients to get lots of locks in short period of time and
+ * keep all old locks in their caches. Clients will always have to
+ * get some locks back if they want to take some new;
+ *
+ * - for thread period 10s (which is default) we will have 23% which
+ * means that clients will have enough of room to take some new locks
+ * without getting some back. All locks from this 23% which were not
+ * taken by clients in current period will contribute in SLV growing.
+ * SLV growing means more locks cached on clients until limit or grant
+ * plan is reached.
+ */
+ return LDLM_POOL_MAX_GSP -
+ ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
+ (t >> LDLM_POOL_GSP_STEP_SHIFT));
+}
- grant_plan = granted + ((limit - granted) *
- atomic_read(&pl->pl_grant_step)) / 100;
- atomic_set(&pl->pl_grant_plan, grant_plan);
+static inline int ldlm_pool_granted(struct ldlm_pool *pl)
+{
+ return atomic_read(&pl->pl_granted);
}
-/* Should be called under ->pl_lock taken */
-static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
+/**
+ * Recalculates next grant limit on passed \a pl.
+ *
+ * \pre ->pl_lock is locked.
+ */
+static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
{
- int slv_factor, granted, grant_plan;
- __u32 limit;
- __u64 slv;
+ int granted, grant_step, limit;
- slv = ldlm_pool_get_slv(pl);
- limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
- grant_plan = atomic_read(&pl->pl_grant_plan);
+ limit = ldlm_pool_get_limit(pl);
+ granted = ldlm_pool_granted(pl);
- if ((slv_factor = limit - (granted - grant_plan)) <= 0)
- slv_factor = 1;
+ grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
+ grant_step = ((limit - granted) * grant_step) / 100;
+ pl->pl_grant_plan = granted + grant_step;
+ limit = (limit * 5) >> 2;
+ if (pl->pl_grant_plan > limit)
+ pl->pl_grant_plan = limit;
+}
- slv = (slv * ((slv_factor * 100) / limit));
- slv = dru(slv, 100);
+/**
+ * Recalculates next SLV on passed \a pl.
+ *
+ * \pre ->pl_lock is locked.
+ */
+static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
+{
+ int granted;
+ int grant_plan;
+ int round_up;
+ __u64 slv;
+ __u64 slv_factor;
+ __u64 grant_usage;
+ __u32 limit;
+
+ slv = pl->pl_server_lock_volume;
+ grant_plan = pl->pl_grant_plan;
+ limit = ldlm_pool_get_limit(pl);
+ granted = ldlm_pool_granted(pl);
+ round_up = granted < limit;
+
+ grant_usage = max_t(int, limit - (granted - grant_plan), 1);
+
+ /*
+ * Find out SLV change factor which is the ratio of grant usage
+ * from limit. SLV changes as fast as the ratio of grant plan
+ * consumption. The more locks from grant plan are not consumed
+ * by clients in last interval (idle time), the faster grows
+ * SLV. And the opposite, the more grant plan is over-consumed
+ * (load time) the faster drops SLV.
+ */
+ slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
+ do_div(slv_factor, limit);
+ slv = slv * slv_factor;
+ slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
if (slv > ldlm_pool_slv_max(limit)) {
slv = ldlm_pool_slv_max(limit);
slv = ldlm_pool_slv_min(limit);
}
- ldlm_pool_set_slv(pl, slv);
+ pl->pl_server_lock_volume = slv;
}
-static inline void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
+/**
+ * Recalculates next stats on passed \a pl.
+ *
+ * \pre ->pl_lock is locked.
+ */
+static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
{
- __u64 slv = ldlm_pool_get_slv(pl);
- __u32 granted = atomic_read(&pl->pl_granted);
- __u32 grant_rate = atomic_read(&pl->pl_grant_rate);
- __u32 grant_plan = atomic_read(&pl->pl_grant_plan);
- __u32 cancel_rate = atomic_read(&pl->pl_cancel_rate);
+ int grant_plan = pl->pl_grant_plan;
+ __u64 slv = pl->pl_server_lock_volume;
+ int granted = ldlm_pool_granted(pl);
+ int grant_rate = atomic_read(&pl->pl_grant_rate);
+ int cancel_rate = atomic_read(&pl->pl_cancel_rate);
+
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
+ slv);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
+ granted);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
+ grant_rate);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
+ grant_plan);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
+ cancel_rate);
+}
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
- slv);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
- granted);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
- grant_rate);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
- grant_plan);
- lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
- cancel_rate);
+/**
+ * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
+ */
+static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
+{
+ struct obd_device *obd;
+
+ /*
+ * Set new SLV in obd field for using it later without accessing the
+ * pool. This is required to avoid race between sending reply to client
+ * with new SLV and cleanup server stack in which we can't guarantee
+ * that namespace is still alive. We know only that obd is alive as
+ * long as valid export is alive.
+ */
+ obd = ldlm_pl2ns(pl)->ns_obd;
+ LASSERT(obd != NULL);
+ write_lock(&obd->obd_pool_lock);
+ obd->obd_pool_slv = pl->pl_server_lock_volume;
+ write_unlock(&obd->obd_pool_lock);
}
+/**
+ * Recalculates all pool fields on passed \a pl.
+ *
+ * \pre ->pl_lock is not locked.
+ */
static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
ENTRY;
- spin_lock(&pl->pl_lock);
- recalc_interval_sec = cfs_duration_sec(cfs_time_current() -
- pl->pl_update_time);
- if (recalc_interval_sec > 0) {
- /* Update statistics */
- ldlm_pool_recalc_stats(pl);
-
- /* Recalc SLV after last period. This should be done
- * _before_ recalculating new grant plan. */
- ldlm_pool_recalc_slv(pl);
-
- /* Update grant_plan for new period. */
- ldlm_pool_recalc_grant_plan(pl);
- pl->pl_update_time = cfs_time_current();
-
- /* Zero out all rates and speed for the last period. */
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- atomic_set(&pl->pl_grant_speed, 0);
- }
- spin_unlock(&pl->pl_lock);
- RETURN(0);
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period)
+ RETURN(0);
+
+ spin_lock(&pl->pl_lock);
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period) {
+ spin_unlock(&pl->pl_lock);
+ RETURN(0);
+ }
+ /*
+ * Recalc SLV after last period. This should be done
+ * _before_ recalculating new grant plan.
+ */
+ ldlm_pool_recalc_slv(pl);
+
+ /*
+ * Make sure that pool informed obd of last SLV changes.
+ */
+ ldlm_srv_pool_push_slv(pl);
+
+ /*
+ * Update grant_plan for new period.
+ */
+ ldlm_pool_recalc_grant_plan(pl);
+
+ pl->pl_recalc_time = cfs_time_current_sec();
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ recalc_interval_sec);
+ spin_unlock(&pl->pl_lock);
+ RETURN(0);
}
-/* Our goal here is to decrease SLV the way to make a client hold
- * @nr locks smaller in next 10h. */
+/**
+ * This function is used on server side as main entry point for memory
+ * pressure handling. It decreases SLV on \a pl according to passed
+ * \a nr and \a gfp_mask.
+ *
+ * Our goal here is to decrease SLV such a way that clients hold \a nr
+ * locks smaller in next 10h.
+ */
static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
- int nr, unsigned int gfp_mask)
+ int nr, gfp_t gfp_mask)
{
- __u32 granted, limit;
- __u64 slv_delta;
- ENTRY;
+ __u32 limit;
+
+ /*
+ * VM is asking how many entries may be potentially freed.
+ */
+ if (nr == 0)
+ return ldlm_pool_granted(pl);
+
+ /*
+ * Client already canceled locks but server is already in shrinker
+ * and can't cancel anything. Let's catch this race.
+ */
+ if (ldlm_pool_granted(pl) == 0)
+ RETURN(0);
+
+ spin_lock(&pl->pl_lock);
+
+ /*
+ * We want shrinker to possibly cause cancellation of @nr locks from
+ * clients or grant approximately @nr locks smaller next intervals.
+ *
+ * This is why we decreased SLV by @nr. This effect will only be as
+ * long as one re-calc interval (1s these days) and this should be
+ * enough to pass this decreased SLV to all clients. On next recalc
+ * interval pool will either increase SLV if locks load is not high
+ * or will keep on same level or even decrease again, thus, shrinker
+ * decreased SLV will affect next recalc intervals and this way will
+ * make locking load lower.
+ */
+ if (nr < pl->pl_server_lock_volume) {
+ pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
+ } else {
+ limit = ldlm_pool_get_limit(pl);
+ pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
+ }
- /* Client already canceled locks but server is already in shrinker and
- * can't cancel anything. Let's catch this race. */
- if ((granted = atomic_read(&pl->pl_granted)) == 0)
- RETURN(0);
+ /*
+ * Make sure that pool informed obd of last SLV changes.
+ */
+ ldlm_srv_pool_push_slv(pl);
+ spin_unlock(&pl->pl_lock);
+
+ /*
+ * We did not really free any memory here so far, it only will be
+ * freed later may be, so that we return 0 to not confuse VM.
+ */
+ return 0;
+}
- spin_lock(&pl->pl_lock);
+/**
+ * Setup server side pool \a pl with passed \a limit.
+ */
+static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
+{
+ struct obd_device *obd;
- /* Simple proportion but it gives impression on how much should be
- * SLV changed for request @nr of locks to be canceled.*/
- slv_delta = nr * ldlm_pool_get_slv(pl);
- limit = ldlm_pool_get_limit(pl);
- do_div(slv_delta, granted);
+ obd = ldlm_pl2ns(pl)->ns_obd;
+ LASSERT(obd != NULL && obd != LP_POISON);
+ LASSERT(obd->obd_type != LP_POISON);
+ write_lock(&obd->obd_pool_lock);
+ obd->obd_pool_limit = limit;
+ write_unlock(&obd->obd_pool_lock);
- /* As SLV has some dependence on historical data, that is new value
- * is based on old one, this decreasing will make clients get some
- * locks back to the server and after some time it will stabilize.*/
- if (slv_delta < ldlm_pool_get_slv(pl))
- ldlm_pool_set_slv(pl, ldlm_pool_get_slv(pl) - slv_delta);
- else
- ldlm_pool_set_slv(pl, ldlm_pool_slv_min(limit));
- spin_unlock(&pl->pl_lock);
+ ldlm_pool_set_limit(pl, limit);
+ return 0;
+}
- /* We did not really free any memory here so far, it only will be
- * freed later may be, so that we return 0 to not confuse VM. */
- RETURN(0);
+/**
+ * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
+ */
+static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
+{
+ struct obd_device *obd;
+
+ /*
+ * Get new SLV and Limit from obd which is updated with coming
+ * RPCs.
+ */
+ obd = ldlm_pl2ns(pl)->ns_obd;
+ LASSERT(obd != NULL);
+ read_lock(&obd->obd_pool_lock);
+ pl->pl_server_lock_volume = obd->obd_pool_slv;
+ ldlm_pool_set_limit(pl, obd->obd_pool_limit);
+ read_unlock(&obd->obd_pool_lock);
}
+/**
+ * Recalculates client size pool \a pl according to current SLV and Limit.
+ */
static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
+ int ret;
ENTRY;
- spin_lock(&pl->pl_lock);
-
- recalc_interval_sec = cfs_duration_sec(cfs_time_current() -
- pl->pl_update_time);
- if (recalc_interval_sec > 0) {
- /* Update statistics only every T */
- ldlm_pool_recalc_stats(pl);
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period)
+ RETURN(0);
- /* Zero out grant/cancel rates and speed for last period. */
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- atomic_set(&pl->pl_grant_speed, 0);
+ spin_lock(&pl->pl_lock);
+ /*
+ * Check if we need to recalc lists now.
+ */
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period) {
+ spin_unlock(&pl->pl_lock);
+ RETURN(0);
}
- spin_unlock(&pl->pl_lock);
- /* Recalc client pool is done without taking into account pl_update_time
- * as this may be called voluntary in the case of emergency. Client
- * recalc does not calculate anything, we do not risk to have skew
- * of some pool param. */
- ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_ASYNC);
- RETURN(0);
+ /*
+ * Make sure that pool knows last SLV and Limit from obd.
+ */
+ ldlm_cli_pool_pop_slv(pl);
+ spin_unlock(&pl->pl_lock);
+
+ /*
+ * Do not cancel locks in case lru resize is disabled for this ns.
+ */
+ if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
+ GOTO(out, ret = 0);
+
+ /*
+ * In the time of canceling locks on client we do not need to maintain
+ * sharp timing, we only want to cancel locks asap according to new SLV.
+ * It may be called when SLV has changed much, this is why we do not
+ * take into account pl->pl_recalc_time here.
+ */
+ ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC,
+ LDLM_CANCEL_LRUR);
+
+out:
+ spin_lock(&pl->pl_lock);
+ /*
+ * Time of LRU resizing might be longer than period,
+ * so update after LRU resizing rather than before it.
+ */
+ pl->pl_recalc_time = cfs_time_current_sec();
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ recalc_interval_sec);
+ spin_unlock(&pl->pl_lock);
+ RETURN(ret);
}
+/**
+ * This function is main entry point for memory pressure handling on client
+ * side. Main goal of this function is to cancel some number of locks on
+ * passed \a pl according to \a nr and \a gfp_mask.
+ */
static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
- int nr, unsigned int gfp_mask)
+ int nr, gfp_t gfp_mask)
{
- ENTRY;
- RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), nr, LDLM_SYNC));
+ struct ldlm_namespace *ns;
+ int unused;
+
+ ns = ldlm_pl2ns(pl);
+
+ /*
+ * Do not cancel locks in case lru resize is disabled for this ns.
+ */
+ if (!ns_connect_lru_resize(ns))
+ RETURN(0);
+
+ /*
+ * Make sure that pool knows last SLV and Limit from obd.
+ */
+ ldlm_cli_pool_pop_slv(pl);
+
+ spin_lock(&ns->ns_lock);
+ unused = ns->ns_nr_unused;
+ spin_unlock(&ns->ns_lock);
+
+ if (nr == 0)
+ return (unused / 100) * sysctl_vfs_cache_pressure;
+ else
+ return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
}
+struct ldlm_pool_ops ldlm_srv_pool_ops = {
+ .po_recalc = ldlm_srv_pool_recalc,
+ .po_shrink = ldlm_srv_pool_shrink,
+ .po_setup = ldlm_srv_pool_setup
+};
+
+struct ldlm_pool_ops ldlm_cli_pool_ops = {
+ .po_recalc = ldlm_cli_pool_recalc,
+ .po_shrink = ldlm_cli_pool_shrink
+};
+
+/**
+ * Pool recalc wrapper. Will call either client or server pool recalc callback
+ * depending what pool \a pl is used.
+ */
int ldlm_pool_recalc(struct ldlm_pool *pl)
{
- if (pl->pl_recalc != NULL && pool_recalc_enabled(pl))
- return pl->pl_recalc(pl);
- return 0;
+ time_t recalc_interval_sec;
+ int count;
+
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec <= 0)
+ goto recalc;
+
+ spin_lock(&pl->pl_lock);
+ if (recalc_interval_sec > 0) {
+ /*
+ * Update pool statistics every 1s.
+ */
+ ldlm_pool_recalc_stats(pl);
+
+ /*
+ * Zero out all rates and speed for the last period.
+ */
+ atomic_set(&pl->pl_grant_rate, 0);
+ atomic_set(&pl->pl_cancel_rate, 0);
+ }
+ spin_unlock(&pl->pl_lock);
+
+ recalc:
+ if (pl->pl_ops->po_recalc != NULL) {
+ count = pl->pl_ops->po_recalc(pl);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
+ count);
+ }
+ recalc_interval_sec = pl->pl_recalc_time - cfs_time_current_sec() +
+ pl->pl_recalc_period;
+ if (recalc_interval_sec <= 0) {
+ /* Prevent too frequent recalculation. */
+ recalc_interval_sec = 1;
+ }
+
+ return recalc_interval_sec;
}
-EXPORT_SYMBOL(ldlm_pool_recalc);
-int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
- unsigned int gfp_mask)
+/**
+ * Pool shrink wrapper. Will call either client or server pool recalc callback
+ * depending what pool \a pl is used.
+ */
+int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
{
- if (pl->pl_shrink != NULL && pool_shrink_enabled(pl)) {
- CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks\n",
- pl->pl_name, nr);
- return pl->pl_shrink(pl, nr, gfp_mask);
+ int cancel = 0;
+
+ if (pl->pl_ops->po_shrink != NULL) {
+ cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
+ if (nr > 0) {
+ lprocfs_counter_add(pl->pl_stats,
+ LDLM_POOL_SHRINK_REQTD_STAT,
+ nr);
+ lprocfs_counter_add(pl->pl_stats,
+ LDLM_POOL_SHRINK_FREED_STAT,
+ cancel);
+ CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
+ "shrunk %d\n", pl->pl_name, nr, cancel);
+ }
}
- return 0;
+ return cancel;
}
EXPORT_SYMBOL(ldlm_pool_shrink);
-/* The purpose of this function is to re-setup limit and maximal allowed
- * slv according to the passed limit. */
-int ldlm_pool_setup(struct ldlm_pool *pl, __u32 limit)
+/**
+ * Pool setup wrapper. Will call either client or server pool recalc callback
+ * depending what pool \a pl is used.
+ *
+ * Sets passed \a limit into pool \a pl.
+ */
+int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
{
- ENTRY;
- if (ldlm_pl2ns(pl)->ns_client == LDLM_NAMESPACE_SERVER)
- ldlm_pool_set_limit(pl, limit);
- RETURN(0);
+ if (pl->pl_ops->po_setup != NULL)
+ return(pl->pl_ops->po_setup(pl, limit));
+ return 0;
}
EXPORT_SYMBOL(ldlm_pool_setup);
-#ifdef __KERNEL__
-static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
{
- __u32 granted, grant_rate, cancel_rate, grant_step;
- int nr = 0, grant_speed, grant_plan;
- struct ldlm_pool *pl = data;
- __u32 limit;
- __u64 slv;
+ int granted, grant_rate, cancel_rate, grant_step;
+ int grant_speed, grant_plan, lvf;
+ struct ldlm_pool *pl = m->private;
+ __u64 slv, clv;
+ __u32 limit;
+
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_server_lock_volume;
+ clv = pl->pl_client_lock_volume;
+ limit = ldlm_pool_get_limit(pl);
+ grant_plan = pl->pl_grant_plan;
+ granted = ldlm_pool_granted(pl);
+ grant_rate = atomic_read(&pl->pl_grant_rate);
+ cancel_rate = atomic_read(&pl->pl_cancel_rate);
+ grant_speed = grant_rate - cancel_rate;
+ lvf = atomic_read(&pl->pl_lock_volume_factor);
+ grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
+ spin_unlock(&pl->pl_lock);
+
+ seq_printf(m, "LDLM pool state (%s):\n"
+ " SLV: "LPU64"\n"
+ " CLV: "LPU64"\n"
+ " LVF: %d\n",
+ pl->pl_name, slv, clv, lvf);
+
+ if (ns_is_server(ldlm_pl2ns(pl))) {
+ seq_printf(m, " GSP: %d%%\n"
+ " GP: %d\n",
+ grant_step, grant_plan);
+ }
+ seq_printf(m, " GR: %d\n" " CR: %d\n" " GS: %d\n"
+ " G: %d\n" " L: %d\n",
+ grant_rate, cancel_rate, grant_speed,
+ granted, limit);
+ return 0;
+}
+LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
- spin_lock(&pl->pl_lock);
- slv = ldlm_pool_get_slv(pl);
- limit = ldlm_pool_get_limit(pl);
- granted = atomic_read(&pl->pl_granted);
- grant_rate = atomic_read(&pl->pl_grant_rate);
- grant_plan = atomic_read(&pl->pl_grant_plan);
- grant_step = atomic_read(&pl->pl_grant_step);
- grant_speed = atomic_read(&pl->pl_grant_speed);
- cancel_rate = atomic_read(&pl->pl_cancel_rate);
- spin_unlock(&pl->pl_lock);
-
- nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
- pl->pl_name);
- nr += snprintf(page + nr, count - nr, " SLV: "LPU64"\n", slv);
-
- if (ldlm_pl2ns(pl)->ns_client == LDLM_NAMESPACE_CLIENT) {
- nr += snprintf(page + nr, count - nr, " LVF: %d\n",
- atomic_read(&pl->pl_lock_volume_factor));
- }
- nr += snprintf(page + nr, count - nr, " GSP: %d%%\n",
- grant_step);
- nr += snprintf(page + nr, count - nr, " GP: %d\n",
- grant_plan);
- nr += snprintf(page + nr, count - nr, " GR: %d\n",
- grant_rate);
- nr += snprintf(page + nr, count - nr, " CR: %d\n",
- cancel_rate);
- nr += snprintf(page + nr, count - nr, " GS: %d\n",
- grant_speed);
- nr += snprintf(page + nr, count - nr, " G: %d\n",
- granted);
- nr += snprintf(page + nr, count - nr, " L: %d\n",
- limit);
- return nr;
+static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
+{
+ struct ldlm_pool *pl = m->private;
+ int grant_speed;
+
+ spin_lock(&pl->pl_lock);
+ /* serialize with ldlm_pool_recalc */
+ grant_speed = atomic_read(&pl->pl_grant_rate) -
+ atomic_read(&pl->pl_cancel_rate);
+ spin_unlock(&pl->pl_lock);
+ return lprocfs_uint_seq_show(m, &grant_speed);
}
-static int ldlm_pool_proc_init(struct ldlm_pool *pl)
+LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int);
+LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
+
+LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
+LDLM_POOL_PROC_WRITER(recalc_period, int);
+static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *off)
{
- struct ldlm_namespace *ns = ldlm_pl2ns(pl);
- struct proc_dir_entry *parent_ns_proc;
- struct lprocfs_vars pool_vars[2];
- char *var_name = NULL;
- int rc = 0;
- ENTRY;
+ struct seq_file *seq = file->private_data;
- OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
- if (!var_name)
- RETURN(-ENOMEM);
+ return lprocfs_wr_recalc_period(file, buf, len, seq->private);
+}
+LPROC_SEQ_FOPS(lprocfs_recalc_period);
- parent_ns_proc = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
- if (parent_ns_proc == NULL) {
- CERROR("%s: proc entry is not initialized\n",
- ns->ns_name);
- GOTO(out_free_name, rc = -EINVAL);
- }
- pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
- NULL, NULL);
- if (IS_ERR(pl->pl_proc_dir)) {
- CERROR("LProcFS failed in ldlm-pool-init\n");
- rc = PTR_ERR(pl->pl_proc_dir);
- GOTO(out_free_name, rc);
- }
+LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64);
+LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic);
+LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic);
- var_name[MAX_STRING_SIZE] = '\0';
- memset(pool_vars, 0, sizeof(pool_vars));
- pool_vars[0].name = var_name;
-
- snprintf(var_name, MAX_STRING_SIZE, "server_lock_volume");
- pool_vars[0].data = &pl->pl_server_lock_volume;
- pool_vars[0].read_fptr = lprocfs_rd_u64;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
- snprintf(var_name, MAX_STRING_SIZE, "limit");
- pool_vars[0].data = &pl->pl_limit;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
- pool_vars[0].write_fptr = lprocfs_wr_atomic;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
- snprintf(var_name, MAX_STRING_SIZE, "granted");
- pool_vars[0].data = &pl->pl_granted;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
- snprintf(var_name, MAX_STRING_SIZE, "control");
- pool_vars[0].data = &pl->pl_control;
- pool_vars[0].read_fptr = lprocfs_rd_uint;
- pool_vars[0].write_fptr = lprocfs_wr_uint;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
- snprintf(var_name, MAX_STRING_SIZE, "grant_speed");
- pool_vars[0].data = &pl->pl_grant_speed;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
- snprintf(var_name, MAX_STRING_SIZE, "cancel_rate");
- pool_vars[0].data = &pl->pl_cancel_rate;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
- snprintf(var_name, MAX_STRING_SIZE, "grant_rate");
- pool_vars[0].data = &pl->pl_grant_rate;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
- snprintf(var_name, MAX_STRING_SIZE, "grant_plan");
- pool_vars[0].data = &pl->pl_grant_plan;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
- snprintf(var_name, MAX_STRING_SIZE, "grant_step");
- pool_vars[0].data = &pl->pl_grant_step;
- pool_vars[0].read_fptr = lprocfs_rd_atomic;
- if (ns->ns_client == LDLM_NAMESPACE_SERVER)
- pool_vars[0].write_fptr = lprocfs_wr_atomic;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
-
- if (ns->ns_client == LDLM_NAMESPACE_CLIENT) {
- snprintf(var_name, MAX_STRING_SIZE, "lock_volume_factor");
- pool_vars[0].data = &pl->pl_lock_volume_factor;
- pool_vars[0].read_fptr = lprocfs_rd_uint;
- pool_vars[0].write_fptr = lprocfs_wr_uint;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
- }
+LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
- snprintf(var_name, MAX_STRING_SIZE, "state");
- pool_vars[0].data = pl;
- pool_vars[0].read_fptr = lprocfs_rd_pool_state;
- lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
+static int ldlm_pool_proc_init(struct ldlm_pool *pl)
+{
+ struct ldlm_namespace *ns = ldlm_pl2ns(pl);
+ struct proc_dir_entry *parent_ns_proc;
+ struct lprocfs_seq_vars pool_vars[2];
+ char *var_name = NULL;
+ int rc = 0;
+ ENTRY;
+
+ OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
+ if (!var_name)
+ RETURN(-ENOMEM);
+
+ parent_ns_proc = ns->ns_proc_dir_entry;
+ if (parent_ns_proc == NULL) {
+ CERROR("%s: proc entry is not initialized\n",
+ ldlm_ns_name(ns));
+ GOTO(out_free_name, rc = -EINVAL);
+ }
+ pl->pl_proc_dir = lprocfs_seq_register("pool", parent_ns_proc,
+ NULL, NULL);
+ if (IS_ERR(pl->pl_proc_dir)) {
+ rc = PTR_ERR(pl->pl_proc_dir);
+ pl->pl_proc_dir = NULL;
+ CERROR("%s: cannot create 'pool' proc entry: rc = %d\n",
+ ldlm_ns_name(ns), rc);
+ GOTO(out_free_name, rc);
+ }
+
+ var_name[MAX_STRING_SIZE] = '\0';
+ memset(pool_vars, 0, sizeof(pool_vars));
+ pool_vars[0].name = var_name;
+
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "server_lock_volume",
+ &pl->pl_server_lock_volume, &ldlm_pool_u64_fops);
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "limit", &pl->pl_limit,
+ &ldlm_pool_rw_atomic_fops);
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "granted",
+ &pl->pl_granted, &ldlm_pool_atomic_fops);
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_speed", pl,
+ &lprocfs_grant_speed_fops);
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "cancel_rate",
+ &pl->pl_cancel_rate, &ldlm_pool_atomic_fops);
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_rate",
+ &pl->pl_grant_rate, &ldlm_pool_atomic_fops);
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_plan", pl,
+ &lprocfs_grant_plan_fops);
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "recalc_period",
+ pl, &lprocfs_recalc_period_fops);
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "lock_volume_factor",
+ &pl->pl_lock_volume_factor, &ldlm_pool_rw_atomic_fops);
+ ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "state", pl,
+ &lprocfs_pool_state_fops);
pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
- LDLM_POOL_GRANTED_STAT, 0);
+ LDLM_POOL_FIRST_STAT, 0);
if (!pl->pl_stats)
GOTO(out_free_name, rc = -ENOMEM);
lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
"granted", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "grant", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "cancel", "locks");
lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
"grant_rate", "locks/s");
lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
"slv", "slv");
- lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "shrink_request", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "shrink_freed", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "recalc_freed", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "recalc_timing", "sec");
+ rc = lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
EXIT;
out_free_name:
pl->pl_proc_dir = NULL;
}
}
-#else /* !__KERNEL__*/
-#define ldlm_pool_proc_init(pl) (0)
-#define ldlm_pool_proc_fini(pl) while (0) {}
-#endif
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client)
+ int idx, ldlm_side_t client)
{
- int rc;
- ENTRY;
+ int rc;
+ ENTRY;
- spin_lock_init(&pl->pl_lock);
- atomic_set(&pl->pl_granted, 0);
- pl->pl_update_time = cfs_time_current();
- atomic_set(&pl->pl_lock_volume_factor, 1);
+ spin_lock_init(&pl->pl_lock);
+ atomic_set(&pl->pl_granted, 0);
+ pl->pl_recalc_time = cfs_time_current_sec();
+ atomic_set(&pl->pl_lock_volume_factor, 1);
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
- atomic_set(&pl->pl_grant_speed, 0);
- pl->pl_control = LDLM_POOL_CTL_FULL;
- atomic_set(&pl->pl_grant_step, LDLM_POOL_GSP);
- atomic_set(&pl->pl_grant_plan, LDLM_POOL_GP(LDLM_POOL_HOST_L));
+ atomic_set(&pl->pl_grant_rate, 0);
+ atomic_set(&pl->pl_cancel_rate, 0);
+ pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
- snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
- ns->ns_name, idx);
+ snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
+ ldlm_ns_name(ns), idx);
if (client == LDLM_NAMESPACE_SERVER) {
- pl->pl_recalc = ldlm_srv_pool_recalc;
- pl->pl_shrink = ldlm_srv_pool_shrink;
+ pl->pl_ops = &ldlm_srv_pool_ops;
ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
- ldlm_pool_set_slv(pl, ldlm_pool_slv_max(LDLM_POOL_HOST_L));
+ pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
+ pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
} else {
- ldlm_pool_set_slv(pl, 1);
ldlm_pool_set_limit(pl, 1);
- pl->pl_recalc = ldlm_cli_pool_recalc;
- pl->pl_shrink = ldlm_cli_pool_shrink;
+ pl->pl_server_lock_volume = 0;
+ pl->pl_ops = &ldlm_cli_pool_ops;
+ pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
}
-
+ pl->pl_client_lock_volume = 0;
rc = ldlm_pool_proc_init(pl);
if (rc)
RETURN(rc);
{
ENTRY;
ldlm_pool_proc_fini(pl);
- pl->pl_recalc = NULL;
- pl->pl_shrink = NULL;
+
+ /*
+ * Pool should not be used after this point. We can't free it here as
+ * it lives in struct ldlm_namespace, but still interested in catching
+ * any abnormal using cases.
+ */
+ POISON(pl, 0x5a, sizeof(*pl));
EXIT;
}
EXPORT_SYMBOL(ldlm_pool_fini);
+/**
+ * Add new taken ldlm lock \a lock into pool \a pl accounting.
+ */
void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
{
- ENTRY;
- atomic_inc(&pl->pl_granted);
- atomic_inc(&pl->pl_grant_rate);
- atomic_inc(&pl->pl_grant_speed);
-
- /* No need to recalc client pools here as this is already done
- * on enqueue/cancel and locks to cancel already packed to the
- * rpc. */
- if (ldlm_pl2ns(pl)->ns_client == LDLM_NAMESPACE_SERVER)
- ldlm_pool_recalc(pl);
- EXIT;
+ /*
+ * FLOCK locks are special in a sense that they are almost never
+ * cancelled, instead special kind of lock is used to drop them.
+ * also there is no LRU for flock locks, so no point in tracking
+ * them anyway.
+ */
+ if (lock->l_resource->lr_type == LDLM_FLOCK)
+ return;
+
+ atomic_inc(&pl->pl_granted);
+ atomic_inc(&pl->pl_grant_rate);
+ lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
+ /*
+ * Do not do pool recalc for client side as all locks which
+ * potentially may be canceled has already been packed into
+ * enqueue/cancel rpc. Also we do not want to run out of stack
+ * with too long call paths.
+ */
+ if (ns_is_server(ldlm_pl2ns(pl)))
+ ldlm_pool_recalc(pl);
}
EXPORT_SYMBOL(ldlm_pool_add);
+/**
+ * Remove ldlm lock \a lock from pool \a pl accounting.
+ */
void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
{
- ENTRY;
- LASSERT(atomic_read(&pl->pl_granted) > 0);
- atomic_dec(&pl->pl_granted);
- atomic_inc(&pl->pl_cancel_rate);
- atomic_dec(&pl->pl_grant_speed);
-
- /* Same as in ldlm_pool_add() */
- if (ldlm_pl2ns(pl)->ns_client == LDLM_NAMESPACE_SERVER)
- ldlm_pool_recalc(pl);
- EXIT;
+ /*
+ * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
+ */
+ if (lock->l_resource->lr_type == LDLM_FLOCK)
+ return;
+
+ LASSERT(atomic_read(&pl->pl_granted) > 0);
+ atomic_dec(&pl->pl_granted);
+ atomic_inc(&pl->pl_cancel_rate);
+
+ lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
+
+ if (ns_is_server(ldlm_pl2ns(pl)))
+ ldlm_pool_recalc(pl);
}
EXPORT_SYMBOL(ldlm_pool_del);
-/* ->pl_lock should be taken. */
+/**
+ * Returns current \a pl SLV.
+ *
+ * \pre ->pl_lock is not locked.
+ */
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
{
- return pl->pl_server_lock_volume;
+ __u64 slv;
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_server_lock_volume;
+ spin_unlock(&pl->pl_lock);
+ return slv;
}
EXPORT_SYMBOL(ldlm_pool_get_slv);
-/* ->pl_lock should be taken. */
+/**
+ * Sets passed \a slv to \a pl.
+ *
+ * \pre ->pl_lock is not locked.
+ */
void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
{
- pl->pl_server_lock_volume = slv;
+ spin_lock(&pl->pl_lock);
+ pl->pl_server_lock_volume = slv;
+ spin_unlock(&pl->pl_lock);
}
EXPORT_SYMBOL(ldlm_pool_set_slv);
+/**
+ * Returns current \a pl CLV.
+ *
+ * \pre ->pl_lock is not locked.
+ */
+__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
+{
+ __u64 slv;
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_client_lock_volume;
+ spin_unlock(&pl->pl_lock);
+ return slv;
+}
+EXPORT_SYMBOL(ldlm_pool_get_clv);
+
+/**
+ * Sets passed \a clv to \a pl.
+ *
+ * \pre ->pl_lock is not locked.
+ */
+void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
+{
+ spin_lock(&pl->pl_lock);
+ pl->pl_client_lock_volume = clv;
+ spin_unlock(&pl->pl_lock);
+}
+EXPORT_SYMBOL(ldlm_pool_set_clv);
+
+/**
+ * Returns current \a pl limit.
+ */
__u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
{
- return atomic_read(&pl->pl_limit);
+ return atomic_read(&pl->pl_limit);
}
EXPORT_SYMBOL(ldlm_pool_get_limit);
+/**
+ * Sets passed \a limit to \a pl.
+ */
void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
{
- atomic_set(&pl->pl_limit, limit);
+ atomic_set(&pl->pl_limit, limit);
}
EXPORT_SYMBOL(ldlm_pool_set_limit);
-/* Server side is only enabled for kernel space for now. */
-#ifdef __KERNEL__
-static int ldlm_pool_granted(struct ldlm_pool *pl)
+/**
+ * Returns current LVF from \a pl.
+ */
+__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
{
- return atomic_read(&pl->pl_granted);
+ return atomic_read(&pl->pl_lock_volume_factor);
}
+EXPORT_SYMBOL(ldlm_pool_get_lvf);
static struct ptlrpc_thread *ldlm_pools_thread;
static struct shrinker *ldlm_pools_srv_shrinker;
static struct shrinker *ldlm_pools_cli_shrinker;
static struct completion ldlm_pools_comp;
-void ldlm_pools_wakeup(void)
+/*
+* count locks from all namespaces (if possible). Returns number of
+* cached locks.
+*/
+static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
{
- ENTRY;
- if (ldlm_pools_thread == NULL)
- return;
- ldlm_pools_thread->t_flags |= SVC_EVENT;
- cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
- EXIT;
+ unsigned long total = 0;
+ int nr_ns;
+ struct ldlm_namespace *ns;
+ struct ldlm_namespace *ns_old = NULL; /* loop detection */
+ void *cookie;
+
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return 0;
+
+ CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
+ client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
+
+ cookie = cl_env_reenter();
+
+ /*
+ * Find out how many resources we may release.
+ */
+ for (nr_ns = ldlm_namespace_nr_read(client);
+ nr_ns > 0; nr_ns--) {
+ mutex_lock(ldlm_namespace_lock(client));
+ if (list_empty(ldlm_namespace_list(client))) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ cl_env_reexit(cookie);
+ return 0;
+ }
+ ns = ldlm_namespace_first_locked(client);
+
+ if (ns == ns_old) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+
+ if (ldlm_ns_empty(ns)) {
+ ldlm_namespace_move_to_inactive_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ continue;
+ }
+
+ if (ns_old == NULL)
+ ns_old = ns;
+
+ ldlm_namespace_get(ns);
+ ldlm_namespace_move_to_active_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
+ ldlm_namespace_put(ns);
+ }
+
+ cl_env_reexit(cookie);
+ return total;
+}
+
+static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr,
+ gfp_t gfp_mask)
+{
+ unsigned long freed = 0;
+ int tmp, nr_ns;
+ struct ldlm_namespace *ns;
+ void *cookie;
+
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return -1;
+
+ cookie = cl_env_reenter();
+
+ /*
+ * Shrink at least ldlm_namespace_nr_read(client) namespaces.
+ */
+ for (tmp = nr_ns = ldlm_namespace_nr_read(client);
+ tmp > 0; tmp--) {
+ int cancel, nr_locks;
+
+ /*
+ * Do not call shrink under ldlm_namespace_lock(client)
+ */
+ mutex_lock(ldlm_namespace_lock(client));
+ if (list_empty(ldlm_namespace_list(client))) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+ ns = ldlm_namespace_first_locked(client);
+ ldlm_namespace_get(ns);
+ ldlm_namespace_move_to_active_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+
+ nr_locks = ldlm_pool_granted(&ns->ns_pool);
+ /*
+ * We use to shrink propotionally but with new shrinker API,
+ * we lost the total number of freeable locks.
+ */
+ cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
+ freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
+ ldlm_namespace_put(ns);
+ }
+ cl_env_reexit(cookie);
+ /*
+ * we only decrease the SLV in server pools shrinker, return
+ * SHRINK_STOP to kernel to avoid needless loop. LU-1128
+ */
+ return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
}
-EXPORT_SYMBOL(ldlm_pools_wakeup);
-/* Cancel @nr locks from all namespaces (if possible). Returns number of
+#ifdef HAVE_SHRINKER_COUNT
+static unsigned long ldlm_pools_srv_count(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
+}
+
+static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
+ sc->gfp_mask);
+}
+
+static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
+}
+
+static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
+ sc->gfp_mask);
+}
+
+#else
+/*
+ * Cancel \a nr locks from all namespaces (if possible). Returns number of
* cached locks after shrink is finished. All namespaces are asked to
- * cancel approximately equal amount of locks. */
-static int ldlm_pools_shrink(ldlm_side_t client, int nr,
- unsigned int gfp_mask)
+ * cancel approximately equal amount of locks to keep balancing.
+ */
+static int ldlm_pools_shrink(ldlm_side_t client, int nr,
+ gfp_t gfp_mask)
{
- int total = 0, cached = 0, nr_ns;
- struct ldlm_namespace *ns;
+ unsigned long total = 0;
- if (nr != 0 && !(gfp_mask & __GFP_FS))
- return -1;
-
- CDEBUG(D_DLMTRACE, "request to shrink %d %s locks from all pools\n",
- nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
-
- /* Find out how many resources we may release. */
- mutex_down(ldlm_namespace_lock(client));
- list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain)
- total += ldlm_pool_granted(&ns->ns_pool);
- mutex_up(ldlm_namespace_lock(client));
-
- if (nr == 0 || total == 0)
- return total;
-
- /* Shrink at least ldlm_namespace_nr(client) namespaces. */
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
- nr_ns > 0; nr_ns--)
- {
- int cancel, nr_locks;
-
- /* Do not call shrink under ldlm_namespace_lock(client) */
- mutex_down(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_up(ldlm_namespace_lock(client));
- /* If list is empty, we can't return any @cached > 0,
- * that probably would cause needless shrinker
- * call. */
- cached = 0;
- break;
- }
- ns = ldlm_namespace_first(client);
- ldlm_namespace_get(ns);
- ldlm_namespace_move(ns, client);
- mutex_up(ldlm_namespace_lock(client));
-
- nr_locks = ldlm_pool_granted(&ns->ns_pool);
- cancel = 1 + nr_locks * nr / total;
- ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
- cached += ldlm_pool_granted(&ns->ns_pool);
- ldlm_namespace_put(ns, 1);
- }
- return cached;
+ if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
+ !(gfp_mask & __GFP_FS))
+ return -1;
+
+ CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
+ nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
+
+ total = ldlm_pools_count(client, gfp_mask);
+
+ if (nr == 0 || total == 0)
+ return total;
+
+ return ldlm_pools_scan(client, nr, gfp_mask);
}
-static int ldlm_pools_srv_shrink(int nr, unsigned int gfp_mask)
+static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER, nr, gfp_mask);
+ return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
+ shrink_param(sc, nr_to_scan),
+ shrink_param(sc, gfp_mask));
}
-static int ldlm_pools_cli_shrink(int nr, unsigned int gfp_mask)
+static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT, nr, gfp_mask);
+ return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
+ shrink_param(sc, nr_to_scan),
+ shrink_param(sc, gfp_mask));
}
-void ldlm_pools_recalc(ldlm_side_t client)
+#endif /* HAVE_SHRINKER_COUNT */
+
+int ldlm_pools_recalc(ldlm_side_t client)
{
- __u32 nr_l = 0, nr_p = 0, l;
+ unsigned long nr_l = 0, nr_p = 0, l;
struct ldlm_namespace *ns;
- int rc, nr, equal = 0;
+ struct ldlm_namespace *ns_old = NULL;
+ int nr, equal = 0;
+ int time = 50; /* seconds of sleep if no active namespaces */
+
+ /*
+ * No need to setup pool limit for client pools.
+ */
+ if (client == LDLM_NAMESPACE_SERVER) {
+ /*
+ * Check all modest namespaces first.
+ */
+ mutex_lock(ldlm_namespace_lock(client));
+ list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
+ {
+ if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
+ continue;
- /* Check all modest namespaces. */
- mutex_down(ldlm_namespace_lock(client));
- list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) {
- if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
- continue;
-
- if (client == LDLM_NAMESPACE_SERVER) {
l = ldlm_pool_granted(&ns->ns_pool);
if (l == 0)
l = 1;
- /* Set the modest pools limit equal to their avg granted
- * locks + 5%. */
- l += dru(l * LDLM_POOLS_MODEST_MARGIN, 100);
+ /*
+ * Set the modest pools limit equal to their avg granted
+ * locks + ~6%.
+ */
+ l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
ldlm_pool_setup(&ns->ns_pool, l);
nr_l += l;
nr_p++;
}
- }
- /* Make sure that modest namespaces did not eat more that 2/3 of limit */
- if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
- CWARN("Modest pools eat out 2/3 of locks limit. %d of %lu. "
- "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
- equal = 1;
- }
+ /*
+ * Make sure that modest namespaces did not eat more that 2/3
+ * of limit.
+ */
+ if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
+ CWARN("\"Modest\" pools eat out 2/3 of server locks "
+ "limit (%lu of %lu). This means that you have too "
+ "many clients for this amount of server RAM. "
+ "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
+ equal = 1;
+ }
- /* The rest is given to greedy namespaces. */
- list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) {
- if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
- continue;
+ /*
+ * The rest is given to greedy namespaces.
+ */
+ list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
+ {
+ if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
+ continue;
- if (client == LDLM_NAMESPACE_SERVER) {
if (equal) {
- /* In the case 2/3 locks are eaten out by
+ /*
+ * In the case 2/3 locks are eaten out by
* modest pools, we re-setup equal limit
- * for _all_ pools. */
+ * for _all_ pools.
+ */
l = LDLM_POOL_HOST_L /
- atomic_read(ldlm_namespace_nr(client));
+ ldlm_namespace_nr_read(client);
} else {
- /* All the rest of greedy pools will have
- * all locks in equal parts.*/
+ /*
+ * All the rest of greedy pools will have
+ * all locks in equal parts.
+ */
l = (LDLM_POOL_HOST_L - nr_l) /
- (atomic_read(ldlm_namespace_nr(client)) -
+ (ldlm_namespace_nr_read(client) -
nr_p);
}
ldlm_pool_setup(&ns->ns_pool, l);
}
+ mutex_unlock(ldlm_namespace_lock(client));
}
- mutex_up(ldlm_namespace_lock(client));
- /* Recalc at least ldlm_namespace_nr(client) namespaces. */
- for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
- /* Lock the list, get first @ns in the list, getref, move it
+ /*
+ * Recalc at least ldlm_namespace_nr(client) namespaces.
+ */
+ for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
+ int skip;
+ /*
+ * Lock the list, get first @ns in the list, getref, move it
* to the tail, unlock and call pool recalc. This way we avoid
* calling recalc under @ns lock what is really good as we get
* rid of potential deadlock on client nodes when canceling
- * locks synchronously. */
- mutex_down(ldlm_namespace_lock(client));
- if (list_empty(ldlm_namespace_list(client))) {
- mutex_up(ldlm_namespace_lock(client));
- break;
- }
- ns = ldlm_namespace_first(client);
- ldlm_namespace_get(ns);
- ldlm_namespace_move(ns, client);
- mutex_up(ldlm_namespace_lock(client));
-
- /* After setup is done - recalc the pool. */
- rc = ldlm_pool_recalc(&ns->ns_pool);
- if (rc)
- CERROR("%s: pool recalculation error "
- "%d\n", ns->ns_pool.pl_name, rc);
-
- ldlm_namespace_put(ns, 1);
+ * locks synchronously.
+ */
+ mutex_lock(ldlm_namespace_lock(client));
+ if (list_empty(ldlm_namespace_list(client))) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+ ns = ldlm_namespace_first_locked(client);
+
+ if (ns_old == ns) { /* Full pass complete */
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+
+ /* We got an empty namespace, need to move it back to inactive
+ * list.
+ * The race with parallel resource creation is fine:
+ * - If they do namespace_get before our check, we fail the
+ * check and they move this item to the end of the list anyway
+ * - If we do the check and then they do namespace_get, then
+ * we move the namespace to inactive and they will move
+ * it back to active (synchronised by the lock, so no clash
+ * there).
+ */
+ if (ldlm_ns_empty(ns)) {
+ ldlm_namespace_move_to_inactive_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ continue;
+ }
+
+ if (ns_old == NULL)
+ ns_old = ns;
+
+ spin_lock(&ns->ns_lock);
+ /*
+ * skip ns which is being freed, and we don't want to increase
+ * its refcount again, not even temporarily. bz21519 & LU-499.
+ */
+ if (ns->ns_stopping) {
+ skip = 1;
+ } else {
+ skip = 0;
+ ldlm_namespace_get(ns);
+ }
+ spin_unlock(&ns->ns_lock);
+
+ ldlm_namespace_move_to_active_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+
+ /*
+ * After setup is done - recalc the pool.
+ */
+ if (!skip) {
+ int ttime = ldlm_pool_recalc(&ns->ns_pool);
+
+ if (ttime < time)
+ time = ttime;
+
+ ldlm_namespace_put(ns);
+ }
}
+ return time;
}
EXPORT_SYMBOL(ldlm_pools_recalc);
static int ldlm_pools_thread_main(void *arg)
{
- struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
- char *t_name = "ldlm_poold";
- ENTRY;
+ struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
+ int s_time, c_time;
+ ENTRY;
- cfs_daemonize(t_name);
- thread->t_flags = SVC_RUNNING;
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ thread_set_flags(thread, SVC_RUNNING);
+ wake_up(&thread->t_ctl_waitq);
- CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
- t_name, cfs_curproc_pid());
+ CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
+ "ldlm_poold", current_pid());
while (1) {
struct l_wait_info lwi;
- /* Recal all pools on this tick. */
- ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
- ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
-
- /* Wait until the next check time, or until we're
- * stopped. */
- lwi = LWI_TIMEOUT(cfs_time_seconds(LDLM_POOLS_THREAD_PERIOD),
- NULL, NULL);
- l_wait_event(thread->t_ctl_waitq, (thread->t_flags &
- (SVC_STOPPING|SVC_EVENT)),
+ /*
+ * Recal all pools on this tick.
+ */
+ s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
+ c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
+
+ /*
+ * Wait until the next check time, or until we're
+ * stopped.
+ */
+ lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)),
+ NULL, NULL);
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopping(thread) ||
+ thread_is_event(thread),
&lwi);
- if (thread->t_flags & SVC_STOPPING) {
- thread->t_flags &= ~SVC_STOPPING;
+ if (thread_test_and_clear_flags(thread, SVC_STOPPING))
break;
- } else if (thread->t_flags & SVC_EVENT) {
- thread->t_flags &= ~SVC_EVENT;
- }
+ else
+ thread_test_and_clear_flags(thread, SVC_EVENT);
}
- thread->t_flags = SVC_STOPPED;
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ thread_set_flags(thread, SVC_STOPPED);
+ wake_up(&thread->t_ctl_waitq);
- CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
- t_name, cfs_curproc_pid());
+ CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
+ "ldlm_poold", current_pid());
- complete_and_exit(&ldlm_pools_comp, 0);
+ complete_and_exit(&ldlm_pools_comp, 0);
}
-static int ldlm_pools_thread_start(ldlm_side_t client)
+static int ldlm_pools_thread_start(void)
{
- struct l_wait_info lwi = { 0 };
- int rc;
- ENTRY;
-
- if (ldlm_pools_thread != NULL)
- RETURN(-EALREADY);
-
- OBD_ALLOC_PTR(ldlm_pools_thread);
- if (ldlm_pools_thread == NULL)
- RETURN(-ENOMEM);
-
- ldlm_pools_thread->t_id = client;
- init_completion(&ldlm_pools_comp);
- cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
-
- /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
- * just drop the VM and FILES in ptlrpc_daemonize() right away. */
- rc = cfs_kernel_thread(ldlm_pools_thread_main, ldlm_pools_thread,
- CLONE_VM | CLONE_FILES);
- if (rc < 0) {
- CERROR("Can't start pool thread, error %d\n",
- rc);
- OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
- ldlm_pools_thread = NULL;
- RETURN(rc);
- }
- l_wait_event(ldlm_pools_thread->t_ctl_waitq,
- (ldlm_pools_thread->t_flags & SVC_RUNNING), &lwi);
- RETURN(0);
+ struct l_wait_info lwi = { 0 };
+ struct task_struct *task;
+ ENTRY;
+
+ if (ldlm_pools_thread != NULL)
+ RETURN(-EALREADY);
+
+ OBD_ALLOC_PTR(ldlm_pools_thread);
+ if (ldlm_pools_thread == NULL)
+ RETURN(-ENOMEM);
+
+ init_completion(&ldlm_pools_comp);
+ init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
+
+ task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
+ "ldlm_poold");
+ if (IS_ERR(task)) {
+ CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
+ OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
+ ldlm_pools_thread = NULL;
+ RETURN(PTR_ERR(task));
+ }
+ l_wait_event(ldlm_pools_thread->t_ctl_waitq,
+ thread_is_running(ldlm_pools_thread), &lwi);
+ RETURN(0);
}
static void ldlm_pools_thread_stop(void)
{
- ENTRY;
-
- if (ldlm_pools_thread == NULL) {
- EXIT;
- return;
- }
-
- ldlm_pools_thread->t_flags = SVC_STOPPING;
- cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
-
- /* Make sure that pools thread is finished before freeing @thread.
- * This fixes possible race and oops due to accessing freed memory
- * in pools thread. */
- wait_for_completion(&ldlm_pools_comp);
- OBD_FREE_PTR(ldlm_pools_thread);
- ldlm_pools_thread = NULL;
- EXIT;
+ ENTRY;
+
+ if (ldlm_pools_thread == NULL) {
+ EXIT;
+ return;
+ }
+
+ thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
+ wake_up(&ldlm_pools_thread->t_ctl_waitq);
+
+ /*
+ * Make sure that pools thread is finished before freeing @thread.
+ * This fixes possible race and oops due to accessing freed memory
+ * in pools thread.
+ */
+ wait_for_completion(&ldlm_pools_comp);
+ OBD_FREE_PTR(ldlm_pools_thread);
+ ldlm_pools_thread = NULL;
+ EXIT;
}
-int ldlm_pools_init(ldlm_side_t client)
+int ldlm_pools_init(void)
{
- int rc;
- ENTRY;
-
- rc = ldlm_pools_thread_start(client);
- if (rc == 0) {
- ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_srv_shrink);
- ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_cli_shrink);
- }
- RETURN(rc);
+ int rc;
+ DEF_SHRINKER_VAR(shsvar, ldlm_pools_srv_shrink,
+ ldlm_pools_srv_count, ldlm_pools_srv_scan);
+ DEF_SHRINKER_VAR(shcvar, ldlm_pools_cli_shrink,
+ ldlm_pools_cli_count, ldlm_pools_cli_scan);
+ ENTRY;
+
+ rc = ldlm_pools_thread_start();
+ if (rc == 0) {
+ ldlm_pools_srv_shrinker =
+ set_shrinker(DEFAULT_SEEKS, &shsvar);
+ ldlm_pools_cli_shrinker =
+ set_shrinker(DEFAULT_SEEKS, &shcvar);
+ }
+ RETURN(rc);
}
EXPORT_SYMBOL(ldlm_pools_init);
void ldlm_pools_fini(void)
{
- if (ldlm_pools_srv_shrinker != NULL) {
- remove_shrinker(ldlm_pools_srv_shrinker);
- ldlm_pools_srv_shrinker = NULL;
- }
- if (ldlm_pools_cli_shrinker != NULL) {
- remove_shrinker(ldlm_pools_cli_shrinker);
- ldlm_pools_cli_shrinker = NULL;
- }
- ldlm_pools_thread_stop();
+ if (ldlm_pools_srv_shrinker != NULL) {
+ remove_shrinker(ldlm_pools_srv_shrinker);
+ ldlm_pools_srv_shrinker = NULL;
+ }
+ if (ldlm_pools_cli_shrinker != NULL) {
+ remove_shrinker(ldlm_pools_cli_shrinker);
+ ldlm_pools_cli_shrinker = NULL;
+ }
+ ldlm_pools_thread_stop();
}
EXPORT_SYMBOL(ldlm_pools_fini);
-#endif /* __KERNEL__ */
#else /* !HAVE_LRU_RESIZE_SUPPORT */
-int ldlm_pool_setup(struct ldlm_pool *pl, __u32 limit)
+int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
{
return 0;
}
EXPORT_SYMBOL(ldlm_pool_recalc);
int ldlm_pool_shrink(struct ldlm_pool *pl,
- int nr, unsigned int gfp_mask)
+ int nr, gfp_t gfp_mask)
{
return 0;
}
}
EXPORT_SYMBOL(ldlm_pool_set_slv);
+__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
+{
+ return 1;
+}
+EXPORT_SYMBOL(ldlm_pool_get_clv);
+
+void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
+{
+ return;
+}
+EXPORT_SYMBOL(ldlm_pool_set_clv);
+
__u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
{
return 0;
}
EXPORT_SYMBOL(ldlm_pool_set_limit);
-int ldlm_pools_init(ldlm_side_t client)
+__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
{
return 0;
}
-EXPORT_SYMBOL(ldlm_pools_init);
+EXPORT_SYMBOL(ldlm_pool_get_lvf);
-void ldlm_pools_fini(void)
+int ldlm_pools_init(void)
{
- return;
+ return 0;
}
-EXPORT_SYMBOL(ldlm_pools_fini);
+EXPORT_SYMBOL(ldlm_pools_init);
-void ldlm_pools_wakeup(void)
+void ldlm_pools_fini(void)
{
return;
}
-EXPORT_SYMBOL(ldlm_pools_wakeup);
+EXPORT_SYMBOL(ldlm_pools_fini);
-void ldlm_pools_recalc(ldlm_side_t client)
+int ldlm_pools_recalc(ldlm_side_t client)
{
- return;
+ return 0;
}
EXPORT_SYMBOL(ldlm_pools_recalc);
#endif /* HAVE_LRU_RESIZE_SUPPORT */