Whamcloud - gitweb
b=21882 lru-resize comments
[fs/lustre-release.git] / lustre / ldlm / ldlm_pool.c
index 3d5c712..bb3b11c 100644 (file)
@@ -1,29 +1,44 @@
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  * vim:expandtab:shiftwidth=8:tabstop=8:
  *
- *  Copyright (c) 2007 Cluster File Systems, Inc.
- *   Author: Yury Umanets <umka@clusterfs.com>
+ * GPL HEADER START
  *
- *   This file is part of the Lustre file system, http://www.lustre.org
- *   Lustre is a trademark of Cluster File Systems, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   You may have signed or agreed to another license before downloading
- *   this software.  If so, you are bound by the terms and conditions
- *   of that agreement, and the following does not apply to you.  See the
- *   LICENSE file included with this distribution for more information.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   If you did not agree to a different license, then this copy of Lustre
- *   is open source software; you can redistribute it and/or modify it
- *   under the terms of version 2 of the GNU General Public License as
- *   published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   In either case, Lustre is distributed in the hope that it will be
- *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   license text for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_pool.c
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
  */
 
-/* 
+/*
  * Idea of this code is rather simple. Each second, for each server namespace
  * we have SLV - server lock volume which is calculated on current number of
  * granted locks, grant speed for past period, etc - that is, locking load.
  * pl_cancel_rate - Number of canceled locks for last T (calculated);
  * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
  * pl_grant_plan - Planned number of granted locks for next T (calculated);
- *
- * pl_grant_step - Grant plan step, that is how ->pl_grant_plan
- * will change in next T (tunable);
- *
  * pl_server_lock_volume - Current server lock volume (calculated);
  *
  * As it may be seen from list above, we have few possible tunables which may
 # include <liblustre.h>
 #endif
 
+#include <cl_object.h>
+
 #include <obd_class.h>
 #include <obd_support.h>
 #include "ldlm_internal.h"
 #ifdef HAVE_LRU_RESIZE_SUPPORT
 
 /*
- * 50 ldlm locks for 1MB of RAM. 
+ * 50 ldlm locks for 1MB of RAM.
+ */
+#define LDLM_POOL_HOST_L ((CFS_NUM_CACHEPAGES >> (20 - CFS_PAGE_SHIFT)) * 50)
+
+/*
+ * Maximal possible grant step plan in %.
  */
-#define LDLM_POOL_HOST_L ((num_physpages >> (20 - CFS_PAGE_SHIFT)) * 50)
+#define LDLM_POOL_MAX_GSP (30)
 
 /*
- * Default step in % for grant plan. 
+ * Minimal possible grant step plan in %.
  */
-#define LDLM_POOL_GSP (10)
+#define LDLM_POOL_MIN_GSP (1)
 
-/* 
- * LDLM_POOL_GSP% of all locks is default GP. 
+/*
+ * This controls the speed of reaching LDLM_POOL_MAX_GSP
+ * with increasing thread period.
  */
-#define LDLM_POOL_GP(L)   (((L) * LDLM_POOL_GSP) / 100)
+#define LDLM_POOL_GSP_STEP (4)
 
-/* 
- * Max age for locks on clients. 
+/*
+ * LDLM_POOL_GSP% of all locks is default GP.
+ */
+#define LDLM_POOL_GP(L)   (((L) * LDLM_POOL_MAX_GSP) / 100)
+
+/*
+ * Max age for locks on clients.
  */
 #define LDLM_POOL_MAX_AGE (36000)
 
@@ -136,7 +160,7 @@ static inline __u64 ldlm_pool_slv_max(__u32 L)
 {
         /*
          * Allow to have all locks for 1 client for 10 hrs.
-         * Formula is the following: limit * 10h / 1 client. 
+         * Formula is the following: limit * 10h / 1 client.
          */
         __u64 lim = L *  LDLM_POOL_MAX_AGE / 1;
         return lim;
@@ -169,25 +193,57 @@ static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
 }
 
 /**
+ * Calculates suggested grant_step in % of available locks for passed
+ * \a period. This is later used in grant_plan calculations.
+ */
+static inline int ldlm_pool_t2gsp(int t)
+{
+        /*
+         * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
+         * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
+         *
+         * How this will affect execution is the following:
+         *
+         * - for thread period 1s we will have grant_step 1% which good from
+         * pov of taking some load off from server and push it out to clients.
+         * This is like that because 1% for grant_step means that server will
+         * not allow clients to get lots of locks in short period of time and
+         * keep all old locks in their caches. Clients will always have to
+         * get some locks back if they want to take some new;
+         *
+         * - for thread period 10s (which is default) we will have 23% which
+         * means that clients will have enough of room to take some new locks
+         * without getting some back. All locks from this 23% which were not
+         * taken by clients in current period will contribute in SLV growing.
+         * SLV growing means more locks cached on clients until limit or grant
+         * plan is reached.
+         */
+        return LDLM_POOL_MAX_GSP -
+                (LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) /
+                (1 << (t / LDLM_POOL_GSP_STEP));
+}
+
+/**
  * Recalculates next grant limit on passed \a pl.
  *
- * \pre ->pl_lock is locked. 
+ * \pre ->pl_lock is locked.
  */
 static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
 {
         int granted, grant_step, limit;
-        
+
         limit = ldlm_pool_get_limit(pl);
-        granted = atomic_read(&pl->pl_granted);
+        granted = cfs_atomic_read(&pl->pl_granted);
 
-        grant_step = ((limit - granted) * pl->pl_grant_step) / 100;
+        grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
+        grant_step = ((limit - granted) * grant_step) / 100;
         pl->pl_grant_plan = granted + grant_step;
 }
 
 /**
  * Recalculates next SLV on passed \a pl.
  *
- * \pre ->pl_lock is locked. 
+ * \pre ->pl_lock is locked.
  */
 static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
 {
@@ -198,19 +254,19 @@ static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
         slv = pl->pl_server_lock_volume;
         grant_plan = pl->pl_grant_plan;
         limit = ldlm_pool_get_limit(pl);
-        granted = atomic_read(&pl->pl_granted);
+        granted = cfs_atomic_read(&pl->pl_granted);
 
         grant_usage = limit - (granted - grant_plan);
         if (grant_usage <= 0)
                 grant_usage = 1;
 
-        /* 
-         * Find out SLV change factor which is the ratio of grant usage 
-         * from limit. SLV changes as fast as the ratio of grant plan 
-         * consumtion. The more locks from grant plan are not consumed 
-         * by clients in last interval (idle time), the faster grows 
+        /*
+         * Find out SLV change factor which is the ratio of grant usage
+         * from limit. SLV changes as fast as the ratio of grant plan
+         * consumption. The more locks from grant plan are not consumed
+         * by clients in last interval (idle time), the faster grows
          * SLV. And the opposite, the more grant plan is over-consumed
-         * (load time) the faster drops SLV. 
+         * (load time) the faster drops SLV.
          */
         slv_factor = (grant_usage * 100) / limit;
         if (2 * abs(granted - limit) > limit) {
@@ -232,17 +288,17 @@ static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
 /**
  * Recalculates next stats on passed \a pl.
  *
- * \pre ->pl_lock is locked. 
+ * \pre ->pl_lock is locked.
  */
 static inline void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
 {
         int grant_plan = pl->pl_grant_plan;
         __u64 slv = pl->pl_server_lock_volume;
-        int granted = atomic_read(&pl->pl_granted);
-        int grant_rate = atomic_read(&pl->pl_grant_rate);
-        int cancel_rate = atomic_read(&pl->pl_cancel_rate);
+        int granted = cfs_atomic_read(&pl->pl_granted);
+        int grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+        int cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
 
-        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT, 
+        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
                             slv);
         lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
                             granted);
@@ -261,108 +317,97 @@ static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
 {
         struct obd_device *obd;
 
-        /* 
+        /*
          * Set new SLV in obd field for using it later without accessing the
          * pool. This is required to avoid race between sending reply to client
          * with new SLV and cleanup server stack in which we can't guarantee
          * that namespace is still alive. We know only that obd is alive as
-         * long as valid export is alive. 
+         * long as valid export is alive.
          */
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL);
-        write_lock(&obd->obd_pool_lock);
+        cfs_write_lock(&obd->obd_pool_lock);
         obd->obd_pool_slv = pl->pl_server_lock_volume;
-        write_unlock(&obd->obd_pool_lock);
+        cfs_write_unlock(&obd->obd_pool_lock);
 }
 
 /**
  * Recalculates all pool fields on passed \a pl.
  *
- * \pre ->pl_lock is not locked. 
+ * \pre ->pl_lock is not locked.
  */
 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
 {
         time_t recalc_interval_sec;
         ENTRY;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
-        if (recalc_interval_sec > 0) {
-                /* 
-                 * Update statistics.
-                 */
-                ldlm_pool_recalc_stats(pl);
-
-                /* 
+        if (recalc_interval_sec >= pl->pl_recalc_period) {
+                /*
                  * Recalc SLV after last period. This should be done
-                 * _before_ recalculating new grant plan. 
+                 * _before_ recalculating new grant plan.
                  */
                 ldlm_pool_recalc_slv(pl);
-                
-                /* 
-                 * Make sure that pool informed obd of last SLV changes. 
+
+                /*
+                 * Make sure that pool informed obd of last SLV changes.
                  */
                 ldlm_srv_pool_push_slv(pl);
 
-                /* 
-                 * Update grant_plan for new period. 
+                /*
+                 * Update grant_plan for new period.
                  */
                 ldlm_pool_recalc_grant_plan(pl);
 
-                /* 
-                 * Zero out all rates and speed for the last period. 
-                 */
-                atomic_set(&pl->pl_grant_rate, 0);
-                atomic_set(&pl->pl_cancel_rate, 0);
-                atomic_set(&pl->pl_grant_speed, 0);
                 pl->pl_recalc_time = cfs_time_current_sec();
-                lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, 
+                lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
                                     recalc_interval_sec);
         }
-        spin_unlock(&pl->pl_lock);
+
+        cfs_spin_unlock(&pl->pl_lock);
         RETURN(0);
 }
 
 /**
  * This function is used on server side as main entry point for memory
- * preasure handling. It decreases SLV on \a pl according to passed
+ * pressure handling. It decreases SLV on \a pl according to passed
  * \a nr and \a gfp_mask.
- * 
+ *
  * Our goal here is to decrease SLV such a way that clients hold \a nr
- * locks smaller in next 10h. 
+ * locks smaller in next 10h.
  */
 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
                                 int nr, unsigned int gfp_mask)
 {
         __u32 limit;
-        ENTRY;
 
-        /* 
-         * VM is asking how many entries may be potentially freed. 
+        /*
+         * VM is asking how many entries may be potentially freed.
          */
         if (nr == 0)
-                RETURN(atomic_read(&pl->pl_granted));
+                return cfs_atomic_read(&pl->pl_granted);
 
-        /* 
+        /*
          * Client already canceled locks but server is already in shrinker
-         * and can't cancel anything. Let's catch this race. 
+         * and can't cancel anything. Let's catch this race.
          */
-        if (atomic_read(&pl->pl_granted) == 0)
+        if (cfs_atomic_read(&pl->pl_granted) == 0)
                 RETURN(0);
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
 
-        /* 
-         * We want shrinker to possibly cause cancelation of @nr locks from
+        /*
+         * We want shrinker to possibly cause cancellation of @nr locks from
          * clients or grant approximately @nr locks smaller next intervals.
          *
-         * This is why we decresed SLV by @nr. This effect will only be as
+         * This is why we decreased SLV by @nr. This effect will only be as
          * long as one re-calc interval (1s these days) and this should be
          * enough to pass this decreased SLV to all clients. On next recalc
          * interval pool will either increase SLV if locks load is not high
          * or will keep on same level or even decrease again, thus, shrinker
          * decreased SLV will affect next recalc intervals and this way will
-         * make locking load lower. 
+         * make locking load lower.
          */
         if (nr < pl->pl_server_lock_volume) {
                 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
@@ -371,17 +416,17 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
                 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
         }
 
-        /* 
-         * Make sure that pool informed obd of last SLV changes. 
+        /*
+         * Make sure that pool informed obd of last SLV changes.
          */
         ldlm_srv_pool_push_slv(pl);
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 
-        /* 
+        /*
          * We did not really free any memory here so far, it only will be
-         * freed later may be, so that we return 0 to not confuse VM. 
+         * freed later may be, so that we return 0 to not confuse VM.
          */
-        RETURN(0);
+        return 0;
 }
 
 /**
@@ -391,13 +436,13 @@ static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
 {
         struct obd_device *obd;
         ENTRY;
-        
+
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL && obd != LP_POISON);
         LASSERT(obd->obd_type != LP_POISON);
-        write_lock(&obd->obd_pool_lock);
+        cfs_write_lock(&obd->obd_pool_lock);
         obd->obd_pool_limit = limit;
-        write_unlock(&obd->obd_pool_lock);
+        cfs_write_unlock(&obd->obd_pool_lock);
 
         ldlm_pool_set_limit(pl, limit);
         RETURN(0);
@@ -410,102 +455,102 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
 {
         struct obd_device *obd;
 
-        /* 
-         * Get new SLV and Limit from obd which is updated with comming 
-         * RPCs. 
+        /*
+         * Get new SLV and Limit from obd which is updated with coming
+         * RPCs.
          */
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL);
-        read_lock(&obd->obd_pool_lock);
+        cfs_read_lock(&obd->obd_pool_lock);
         pl->pl_server_lock_volume = obd->obd_pool_slv;
         ldlm_pool_set_limit(pl, obd->obd_pool_limit);
-        read_unlock(&obd->obd_pool_lock);
+        cfs_read_unlock(&obd->obd_pool_lock);
 }
 
 /**
- * Recalculates client sise pool \a pl according to current SLV and Limit.
+ * Recalculates client size pool \a pl according to current SLV and Limit.
  */
 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
 {
         time_t recalc_interval_sec;
         ENTRY;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
+        /*
+         * Check if we need to recalc lists now.
+         */
+        recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+        if (recalc_interval_sec < pl->pl_recalc_period) {
+                cfs_spin_unlock(&pl->pl_lock);
+                RETURN(0);
+        }
 
-        /* 
-         * Make sure that pool knows last SLV and Limit from obd. 
+        /*
+         * Make sure that pool knows last SLV and Limit from obd.
          */
         ldlm_cli_pool_pop_slv(pl);
 
-        recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
-        if (recalc_interval_sec > 0) {
-                /* 
-                 * Update statistics only every T. 
-                 */
-                ldlm_pool_recalc_stats(pl);
-
-                /* 
-                 * Zero out grant/cancel rates and speed for last period. 
-                 */
-                atomic_set(&pl->pl_grant_rate, 0);
-                atomic_set(&pl->pl_cancel_rate, 0);
-                atomic_set(&pl->pl_grant_speed, 0);
-                pl->pl_recalc_time = cfs_time_current_sec();
-                lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, 
-                                    recalc_interval_sec);
-        }
-        spin_unlock(&pl->pl_lock);
+        pl->pl_recalc_time = cfs_time_current_sec();
+        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+                            recalc_interval_sec);
+        cfs_spin_unlock(&pl->pl_lock);
 
-        /* 
-         * Do not cancel locks in case lru resize is disabled for this ns. 
+        /*
+         * Do not cancel locks in case lru resize is disabled for this ns.
          */
         if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
                 RETURN(0);
 
-        /* 
+        /*
          * In the time of canceling locks on client we do not need to maintain
          * sharp timing, we only want to cancel locks asap according to new SLV.
          * It may be called when SLV has changed much, this is why we do not
-         * take into account pl->pl_recalc_time here. 
+         * take into account pl->pl_recalc_time here.
          */
-        RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_ASYNC, 
+        RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_SYNC, 
                                LDLM_CANCEL_LRUR));
 }
 
 /**
- * This function is main entry point for memory preasure handling on client side.
- * Main goal of this function is to cancel some number of locks on passed \a pl
- * according to \a nr and \a gfp_mask.
+ * This function is main entry point for memory pressure handling on client
+ * side.  Main goal of this function is to cancel some number of locks on
+ * passed \a pl according to \a nr and \a gfp_mask.
  */
 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
                                 int nr, unsigned int gfp_mask)
 {
-        ENTRY;
-        
-        /* 
-         * Do not cancel locks in case lru resize is disabled for this ns. 
+        struct ldlm_namespace *ns;
+        int canceled = 0, unused;
+
+        ns = ldlm_pl2ns(pl);
+
+        /*
+         * Do not cancel locks in case lru resize is disabled for this ns.
          */
-        if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
+        if (!ns_connect_lru_resize(ns))
                 RETURN(0);
 
-        /* 
-         * Make sure that pool knows last SLV and Limit from obd. 
+        /*
+         * Make sure that pool knows last SLV and Limit from obd.
          */
         ldlm_cli_pool_pop_slv(pl);
 
-        /* 
-         * Find out how many locks may be released according to shrink 
-         * policy. 
-         */
-        if (nr == 0)
-                RETURN(ldlm_cancel_lru_estimate(ldlm_pl2ns(pl), 0, 0, 
-                                                LDLM_CANCEL_SHRINK));
-
-        /* 
-         * Cancel @nr locks accoding to shrink policy. 
+        cfs_spin_lock(&ns->ns_unused_lock);
+        unused = ns->ns_nr_unused;
+        cfs_spin_unlock(&ns->ns_unused_lock);
+        
+        if (nr) {
+                canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC, 
+                                           LDLM_CANCEL_SHRINK);
+        }
+#ifdef __KERNEL__
+        /*
+         * Return the number of potentially reclaimable locks.
          */
-        RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), nr, LDLM_SYNC, 
-                               LDLM_CANCEL_SHRINK));
+        return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
+#else
+        return unused - canceled;
+#endif
 }
 
 struct ldlm_pool_ops ldlm_srv_pool_ops = {
@@ -525,14 +570,33 @@ struct ldlm_pool_ops ldlm_cli_pool_ops = {
  */
 int ldlm_pool_recalc(struct ldlm_pool *pl)
 {
+        time_t recalc_interval_sec;
         int count;
 
+        cfs_spin_lock(&pl->pl_lock);
+        recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+        if (recalc_interval_sec > 0) {
+                /*
+                 * Update pool statistics every 1s.
+                 */
+                ldlm_pool_recalc_stats(pl);
+
+                /*
+                 * Zero out all rates and speed for the last period.
+                 */
+                cfs_atomic_set(&pl->pl_grant_rate, 0);
+                cfs_atomic_set(&pl->pl_cancel_rate, 0);
+                cfs_atomic_set(&pl->pl_grant_speed, 0);
+        }
+        cfs_spin_unlock(&pl->pl_lock);
+
         if (pl->pl_ops->po_recalc != NULL) {
                 count = pl->pl_ops->po_recalc(pl);
-                lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT, 
+                lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
                                     count);
                 return count;
         }
+
         return 0;
 }
 EXPORT_SYMBOL(ldlm_pool_recalc);
@@ -545,14 +609,14 @@ int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
                      unsigned int gfp_mask)
 {
         int cancel = 0;
-        
+
         if (pl->pl_ops->po_shrink != NULL) {
                 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
                 if (nr > 0) {
-                        lprocfs_counter_add(pl->pl_stats, 
+                        lprocfs_counter_add(pl->pl_stats,
                                             LDLM_POOL_SHRINK_REQTD_STAT,
                                             nr);
-                        lprocfs_counter_add(pl->pl_stats, 
+                        lprocfs_counter_add(pl->pl_stats,
                                             LDLM_POOL_SHRINK_FREED_STAT,
                                             cancel);
                         CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
@@ -583,35 +647,36 @@ static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
                                  int count, int *eof, void *data)
 {
         int granted, grant_rate, cancel_rate, grant_step;
-        int nr = 0, grant_speed, grant_plan;
+        int nr = 0, grant_speed, grant_plan, lvf;
         struct ldlm_pool *pl = data;
         __u64 slv, clv;
         __u32 limit;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         slv = pl->pl_server_lock_volume;
         clv = pl->pl_client_lock_volume;
         limit = ldlm_pool_get_limit(pl);
         grant_plan = pl->pl_grant_plan;
-        grant_step = pl->pl_grant_step;
-        granted = atomic_read(&pl->pl_granted);
-        grant_rate = atomic_read(&pl->pl_grant_rate);
-        grant_speed = atomic_read(&pl->pl_grant_speed);
-        cancel_rate = atomic_read(&pl->pl_cancel_rate);
-        spin_unlock(&pl->pl_lock);
+        granted = cfs_atomic_read(&pl->pl_granted);
+        grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+        lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
+        grant_speed = cfs_atomic_read(&pl->pl_grant_speed);
+        cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
+        grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
+        cfs_spin_unlock(&pl->pl_lock);
 
         nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
                        pl->pl_name);
         nr += snprintf(page + nr, count - nr, "  SLV: "LPU64"\n", slv);
         nr += snprintf(page + nr, count - nr, "  CLV: "LPU64"\n", clv);
+        nr += snprintf(page + nr, count - nr, "  LVF: %d\n", lvf);
 
-        nr += snprintf(page + nr, count - nr, "  LVF: %d\n",
-                       atomic_read(&pl->pl_lock_volume_factor));
-
-        nr += snprintf(page + nr, count - nr, "  GSP: %d%%\n",
-                       grant_step);
-        nr += snprintf(page + nr, count - nr, "  GP:  %d\n",
-                       grant_plan);
+        if (ns_is_server(ldlm_pl2ns(pl))) {
+                nr += snprintf(page + nr, count - nr, "  GSP: %d%%\n",
+                               grant_step);
+                nr += snprintf(page + nr, count - nr, "  GP:  %d\n",
+                               grant_plan);
+        }
         nr += snprintf(page + nr, count - nr, "  GR:  %d\n",
                        grant_rate);
         nr += snprintf(page + nr, count - nr, "  CR:  %d\n",
@@ -626,8 +691,8 @@ static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
 }
 
 LDLM_POOL_PROC_READER(grant_plan, int);
-LDLM_POOL_PROC_READER(grant_step, int);
-LDLM_POOL_PROC_WRITER(grant_step, int);
+LDLM_POOL_PROC_READER(recalc_period, int);
+LDLM_POOL_PROC_WRITER(recalc_period, int);
 
 static int ldlm_pool_proc_init(struct ldlm_pool *pl)
 {
@@ -696,11 +761,10 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
         pool_vars[0].read_fptr = lprocfs_rd_grant_plan;
         lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
 
-        snprintf(var_name, MAX_STRING_SIZE, "grant_step");
+        snprintf(var_name, MAX_STRING_SIZE, "recalc_period");
         pool_vars[0].data = pl;
-        pool_vars[0].read_fptr = lprocfs_rd_grant_step;
-        if (ns_is_server(ns))
-                pool_vars[0].write_fptr = lprocfs_wr_grant_step;
+        pool_vars[0].read_fptr = lprocfs_rd_recalc_period;
+        pool_vars[0].write_fptr = lprocfs_wr_recalc_period;
         lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
 
         snprintf(var_name, MAX_STRING_SIZE, "lock_volume_factor");
@@ -722,10 +786,10 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
         lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
                              LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
                              "granted", "locks");
-        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT, 
+        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
                              LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
                              "grant", "locks");
-        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT, 
+        lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
                              LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
                              "cancel", "locks");
         lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
@@ -782,15 +846,14 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
         int rc;
         ENTRY;
 
-        spin_lock_init(&pl->pl_lock);
-        atomic_set(&pl->pl_granted, 0);
+        cfs_spin_lock_init(&pl->pl_lock);
+        cfs_atomic_set(&pl->pl_granted, 0);
         pl->pl_recalc_time = cfs_time_current_sec();
-        atomic_set(&pl->pl_lock_volume_factor, 1);
+        cfs_atomic_set(&pl->pl_lock_volume_factor, 1);
 
-        atomic_set(&pl->pl_grant_rate, 0);
-        atomic_set(&pl->pl_cancel_rate, 0);
-        atomic_set(&pl->pl_grant_speed, 0);
-        pl->pl_grant_step = LDLM_POOL_GSP;
+        cfs_atomic_set(&pl->pl_grant_rate, 0);
+        cfs_atomic_set(&pl->pl_cancel_rate, 0);
+        cfs_atomic_set(&pl->pl_grant_speed, 0);
         pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
 
         snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
@@ -799,11 +862,13 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
         if (client == LDLM_NAMESPACE_SERVER) {
                 pl->pl_ops = &ldlm_srv_pool_ops;
                 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
+                pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
                 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
         } else {
-                pl->pl_server_lock_volume = 1;
                 ldlm_pool_set_limit(pl, 1);
+                pl->pl_server_lock_volume = 1;
                 pl->pl_ops = &ldlm_cli_pool_ops;
+                pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
         }
         pl->pl_client_lock_volume = 0;
         rc = ldlm_pool_proc_init(pl);
@@ -820,8 +885,8 @@ void ldlm_pool_fini(struct ldlm_pool *pl)
 {
         ENTRY;
         ldlm_pool_proc_fini(pl);
-        
-        /* 
+
+        /*
          * Pool should not be used after this point. We can't free it here as
          * it lives in struct ldlm_namespace, but still interested in catching
          * any abnormal using cases.
@@ -836,28 +901,27 @@ EXPORT_SYMBOL(ldlm_pool_fini);
  */
 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
 {
-        /* 
+        /*
          * FLOCK locks are special in a sense that they are almost never
          * cancelled, instead special kind of lock is used to drop them.
          * also there is no LRU for flock locks, so no point in tracking
-         * them anyway. 
+         * them anyway.
          */
         if (lock->l_resource->lr_type == LDLM_FLOCK)
                 return;
-
         ENTRY;
-                
-        atomic_inc(&pl->pl_granted);
-        atomic_inc(&pl->pl_grant_rate);
-        atomic_inc(&pl->pl_grant_speed);
+
+        LDLM_DEBUG(lock, "add lock to pool");
+        cfs_atomic_inc(&pl->pl_granted);
+        cfs_atomic_inc(&pl->pl_grant_rate);
+        cfs_atomic_inc(&pl->pl_grant_speed);
 
         lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
-        /* 
+        /*
          * Do not do pool recalc for client side as all locks which
-         * potentially may be canceled has already been packed into 
+         * potentially may be canceled has already been packed into
          * enqueue/cancel rpc. Also we do not want to run out of stack
-         * with too long call paths. 
+         * with too long call paths.
          */
         if (ns_is_server(ldlm_pl2ns(pl)))
                 ldlm_pool_recalc(pl);
@@ -876,11 +940,13 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
         if (lock->l_resource->lr_type == LDLM_FLOCK)
                 return;
         ENTRY;
-        LASSERT(atomic_read(&pl->pl_granted) > 0);
-        atomic_dec(&pl->pl_granted);
-        atomic_inc(&pl->pl_cancel_rate);
-        atomic_dec(&pl->pl_grant_speed);
-        
+
+        LDLM_DEBUG(lock, "del lock from pool");
+        LASSERT(cfs_atomic_read(&pl->pl_granted) > 0);
+        cfs_atomic_dec(&pl->pl_granted);
+        cfs_atomic_inc(&pl->pl_cancel_rate);
+        cfs_atomic_dec(&pl->pl_grant_speed);
+
         lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
 
         if (ns_is_server(ldlm_pl2ns(pl)))
@@ -892,14 +958,14 @@ EXPORT_SYMBOL(ldlm_pool_del);
 /**
  * Returns current \a pl SLV.
  *
- * \pre ->pl_lock is not locked. 
+ * \pre ->pl_lock is not locked.
  */
 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
 {
         __u64 slv;
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         slv = pl->pl_server_lock_volume;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
         return slv;
 }
 EXPORT_SYMBOL(ldlm_pool_get_slv);
@@ -907,27 +973,27 @@ EXPORT_SYMBOL(ldlm_pool_get_slv);
 /**
  * Sets passed \a slv to \a pl.
  *
- * \pre ->pl_lock is not locked. 
+ * \pre ->pl_lock is not locked.
  */
 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
 {
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         pl->pl_server_lock_volume = slv;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 }
 EXPORT_SYMBOL(ldlm_pool_set_slv);
 
 /**
  * Returns current \a pl CLV.
  *
- * \pre ->pl_lock is not locked. 
+ * \pre ->pl_lock is not locked.
  */
 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
 {
         __u64 slv;
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         slv = pl->pl_client_lock_volume;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
         return slv;
 }
 EXPORT_SYMBOL(ldlm_pool_get_clv);
@@ -935,13 +1001,13 @@ EXPORT_SYMBOL(ldlm_pool_get_clv);
 /**
  * Sets passed \a clv to \a pl.
  *
- * \pre ->pl_lock is not locked. 
+ * \pre ->pl_lock is not locked.
  */
 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
 {
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         pl->pl_client_lock_volume = clv;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 }
 EXPORT_SYMBOL(ldlm_pool_set_clv);
 
@@ -950,7 +1016,7 @@ EXPORT_SYMBOL(ldlm_pool_set_clv);
  */
 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
 {
-        return atomic_read(&pl->pl_limit);
+        return cfs_atomic_read(&pl->pl_limit);
 }
 EXPORT_SYMBOL(ldlm_pool_get_limit);
 
@@ -959,7 +1025,7 @@ EXPORT_SYMBOL(ldlm_pool_get_limit);
  */
 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
 {
-        atomic_set(&pl->pl_limit, limit);
+        cfs_atomic_set(&pl->pl_limit, limit);
 }
 EXPORT_SYMBOL(ldlm_pool_set_limit);
 
@@ -968,42 +1034,32 @@ EXPORT_SYMBOL(ldlm_pool_set_limit);
  */
 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
 {
-        return atomic_read(&pl->pl_lock_volume_factor);
+        return cfs_atomic_read(&pl->pl_lock_volume_factor);
 }
 EXPORT_SYMBOL(ldlm_pool_get_lvf);
 
 #ifdef __KERNEL__
 static int ldlm_pool_granted(struct ldlm_pool *pl)
 {
-        return atomic_read(&pl->pl_granted);
+        return cfs_atomic_read(&pl->pl_granted);
 }
 
 static struct ptlrpc_thread *ldlm_pools_thread;
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
-static struct completion ldlm_pools_comp;
-
-void ldlm_pools_wakeup(void)
-{
-        ENTRY;
-        if (ldlm_pools_thread == NULL)
-                return;
-        ldlm_pools_thread->t_flags |= SVC_EVENT;
-        cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
-        EXIT;
-}
-EXPORT_SYMBOL(ldlm_pools_wakeup);
+static struct cfs_shrinker *ldlm_pools_srv_shrinker;
+static struct cfs_shrinker *ldlm_pools_cli_shrinker;
+static cfs_completion_t ldlm_pools_comp;
 
-/* 
+/*
  * Cancel \a nr locks from all namespaces (if possible). Returns number of
  * cached locks after shrink is finished. All namespaces are asked to
  * cancel approximately equal amount of locks to keep balancing.
  */
-static int ldlm_pools_shrink(ldlm_side_t client, int nr, 
+static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                              unsigned int gfp_mask)
 {
         int total = 0, cached = 0, nr_ns;
         struct ldlm_namespace *ns;
+        void *cookie;
 
         if (nr != 0 && !(gfp_mask & __GFP_FS))
                 return -1;
@@ -1011,46 +1067,51 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
         CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
                nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
 
-        /* 
-         * Find out how many resources we may release. 
+        cookie = cl_env_reenter();
+
+        /*
+         * Find out how many resources we may release.
          */
-        for (nr_ns = atomic_read(ldlm_namespace_nr(client)); 
-             nr_ns > 0; nr_ns--) 
+        for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
+             nr_ns > 0; nr_ns--)
         {
-                mutex_down(ldlm_namespace_lock(client));
-                if (list_empty(ldlm_namespace_list(client))) {
-                        mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                if (cfs_list_empty(ldlm_namespace_list(client))) {
+                        cfs_mutex_up(ldlm_namespace_lock(client));
+                        cl_env_reexit(cookie);
                         return 0;
                 }
                 ns = ldlm_namespace_first_locked(client);
                 ldlm_namespace_get(ns);
                 ldlm_namespace_move_locked(ns, client);
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
                 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
                 ldlm_namespace_put(ns, 1);
         }
-        if (nr == 0 || total == 0)
+
+        if (nr == 0 || total == 0) {
+                cl_env_reexit(cookie);
                 return total;
+        }
 
-        /* 
-         * Shrink at least ldlm_namespace_nr(client) namespaces. 
+        /*
+         * Shrink at least ldlm_namespace_nr(client) namespaces.
          */
-        for (nr_ns = atomic_read(ldlm_namespace_nr(client)); 
-             nr_ns > 0; nr_ns--) 
+        for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
+             nr_ns > 0; nr_ns--)
         {
                 int cancel, nr_locks;
 
-                /* 
-                 * Do not call shrink under ldlm_namespace_lock(client) 
+                /*
+                 * Do not call shrink under ldlm_namespace_lock(client)
                  */
-                mutex_down(ldlm_namespace_lock(client));
-                if (list_empty(ldlm_namespace_list(client))) {
-                        mutex_up(ldlm_namespace_lock(client));
-                        /* 
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                if (cfs_list_empty(ldlm_namespace_list(client))) {
+                        cfs_mutex_up(ldlm_namespace_lock(client));
+                        /*
                          * If list is empty, we can't return any @cached > 0,
                          * that probably would cause needless shrinker
-                         * call. 
+                         * call.
                          */
                         cached = 0;
                         break;
@@ -1058,14 +1119,15 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                 ns = ldlm_namespace_first_locked(client);
                 ldlm_namespace_get(ns);
                 ldlm_namespace_move_locked(ns, client);
-                mutex_up(ldlm_namespace_lock(client));
-                
+                cfs_mutex_up(ldlm_namespace_lock(client));
+
                 nr_locks = ldlm_pool_granted(&ns->ns_pool);
                 cancel = 1 + nr_locks * nr / total;
                 ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
                 cached += ldlm_pool_granted(&ns->ns_pool);
                 ldlm_namespace_put(ns, 1);
         }
+        cl_env_reexit(cookie);
         return cached;
 }
 
@@ -1085,16 +1147,16 @@ void ldlm_pools_recalc(ldlm_side_t client)
         struct ldlm_namespace *ns;
         int nr, equal = 0;
 
-        /* 
+        /*
          * No need to setup pool limit for client pools.
          */
         if (client == LDLM_NAMESPACE_SERVER) {
-                /* 
-                 * Check all modest namespaces first. 
+                /*
+                 * Check all modest namespaces first.
                  */
-                mutex_down(ldlm_namespace_lock(client));
-                list_for_each_entry(ns, ldlm_namespace_list(client), 
-                                    ns_list_chain) 
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+                                        ns_list_chain)
                 {
                         if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
                                 continue;
@@ -1103,9 +1165,9 @@ void ldlm_pools_recalc(ldlm_side_t client)
                         if (l == 0)
                                 l = 1;
 
-                        /* 
+                        /*
                          * Set the modest pools limit equal to their avg granted
-                         * locks + 5%. 
+                         * locks + 5%.
                          */
                         l += dru(l * LDLM_POOLS_MODEST_MARGIN, 100);
                         ldlm_pool_setup(&ns->ns_pool, l);
@@ -1113,9 +1175,9 @@ void ldlm_pools_recalc(ldlm_side_t client)
                         nr_p++;
                 }
 
-                /* 
-                 * Make sure that modest namespaces did not eat more that 2/3 
-                 * of limit. 
+                /*
+                 * Make sure that modest namespaces did not eat more that 2/3
+                 * of limit.
                  */
                 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
                         CWARN("\"Modest\" pools eat out 2/3 of server locks "
@@ -1125,63 +1187,81 @@ void ldlm_pools_recalc(ldlm_side_t client)
                         equal = 1;
                 }
 
-                /* 
-                 * The rest is given to greedy namespaces. 
+                /*
+                 * The rest is given to greedy namespaces.
                  */
-                list_for_each_entry(ns, ldlm_namespace_list(client), 
-                                    ns_list_chain) 
+                cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+                                        ns_list_chain)
                 {
                         if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
                                 continue;
 
                         if (equal) {
-                                /* 
+                                /*
                                  * In the case 2/3 locks are eaten out by
                                  * modest pools, we re-setup equal limit
-                                 * for _all_ pools. 
+                                 * for _all_ pools.
                                  */
                                 l = LDLM_POOL_HOST_L /
-                                        atomic_read(ldlm_namespace_nr(client));
+                                        cfs_atomic_read(
+                                                ldlm_namespace_nr(client));
                         } else {
-                                /* 
+                                /*
                                  * All the rest of greedy pools will have
                                  * all locks in equal parts.
                                  */
                                 l = (LDLM_POOL_HOST_L - nr_l) /
-                                        (atomic_read(ldlm_namespace_nr(client)) -
+                                        (cfs_atomic_read(
+                                                ldlm_namespace_nr(client)) -
                                          nr_p);
                         }
                         ldlm_pool_setup(&ns->ns_pool, l);
                 }
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
         }
 
-        /* 
-         * Recalc at least ldlm_namespace_nr(client) namespaces. 
+        /*
+         * Recalc at least ldlm_namespace_nr(client) namespaces.
          */
-        for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
-                /* 
+        for (nr = cfs_atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
+                int     skip;
+                /*
                  * Lock the list, get first @ns in the list, getref, move it
                  * to the tail, unlock and call pool recalc. This way we avoid
                  * calling recalc under @ns lock what is really good as we get
                  * rid of potential deadlock on client nodes when canceling
-                 * locks synchronously. 
+                 * locks synchronously.
                  */
-                mutex_down(ldlm_namespace_lock(client));
-                if (list_empty(ldlm_namespace_list(client))) {
-                        mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                if (cfs_list_empty(ldlm_namespace_list(client))) {
+                        cfs_mutex_up(ldlm_namespace_lock(client));
                         break;
                 }
                 ns = ldlm_namespace_first_locked(client);
-                ldlm_namespace_get(ns);
+
+                cfs_spin_lock(&ns->ns_hash_lock);
+                /*
+                 * skip ns which is being freed, and we don't want to increase
+                 * its refcount again, not even temporarily. bz21519.
+                 */
+                if (ns->ns_refcount == 0) {
+                        skip = 1;
+                } else {
+                        skip = 0;
+                        ldlm_namespace_get_locked(ns);
+                }
+                cfs_spin_unlock(&ns->ns_hash_lock);
+
                 ldlm_namespace_move_locked(ns, client);
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
 
-                /* 
-                 * After setup is done - recalc the pool. 
+                /*
+                 * After setup is done - recalc the pool.
                  */
-                ldlm_pool_recalc(&ns->ns_pool);
-                ldlm_namespace_put(ns, 1);
+                if (!skip) {
+                        ldlm_pool_recalc(&ns->ns_pool);
+                        ldlm_namespace_put(ns, 1);
+                }
         }
 }
 EXPORT_SYMBOL(ldlm_pools_recalc);
@@ -1203,14 +1283,14 @@ static int ldlm_pools_thread_main(void *arg)
                 struct l_wait_info lwi;
 
                 /*
-                 * Recal all pools on this tick. 
+                 * Recal all pools on this tick.
                  */
                 ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
                 ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
-                
+
                 /*
                  * Wait until the next check time, or until we're
-                 * stopped. 
+                 * stopped.
                  */
                 lwi = LWI_TIMEOUT(cfs_time_seconds(LDLM_POOLS_THREAD_PERIOD),
                                   NULL, NULL);
@@ -1232,7 +1312,7 @@ static int ldlm_pools_thread_main(void *arg)
         CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
                t_name, cfs_curproc_pid());
 
-        complete_and_exit(&ldlm_pools_comp, 0);
+        cfs_complete_and_exit(&ldlm_pools_comp, 0);
 }
 
 static int ldlm_pools_thread_start(void)
@@ -1248,12 +1328,12 @@ static int ldlm_pools_thread_start(void)
         if (ldlm_pools_thread == NULL)
                 RETURN(-ENOMEM);
 
-        init_completion(&ldlm_pools_comp);
+        cfs_init_completion(&ldlm_pools_comp);
         cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
 
-        /* 
+        /*
          * CLONE_VM and CLONE_FILES just avoid a needless copy, because we
-         * just drop the VM and FILES in ptlrpc_daemonize() right away. 
+         * just drop the VM and FILES in cfs_daemonize() right away.
          */
         rc = cfs_kernel_thread(ldlm_pools_thread_main, ldlm_pools_thread,
                                CLONE_VM | CLONE_FILES);
@@ -1281,12 +1361,12 @@ static void ldlm_pools_thread_stop(void)
         ldlm_pools_thread->t_flags = SVC_STOPPING;
         cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
 
-        /* 
+        /*
          * Make sure that pools thread is finished before freeing @thread.
          * This fixes possible race and oops due to accessing freed memory
-         * in pools thread. 
+         * in pools thread.
          */
-        wait_for_completion(&ldlm_pools_comp);
+        cfs_wait_for_completion(&ldlm_pools_comp);
         OBD_FREE_PTR(ldlm_pools_thread);
         ldlm_pools_thread = NULL;
         EXIT;
@@ -1299,10 +1379,12 @@ int ldlm_pools_init(void)
 
         rc = ldlm_pools_thread_start();
         if (rc == 0) {
-                ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS,
-                                                       ldlm_pools_srv_shrink);
-                ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS,
-                                                       ldlm_pools_cli_shrink);
+                ldlm_pools_srv_shrinker =
+                        cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+                                         ldlm_pools_srv_shrink);
+                ldlm_pools_cli_shrinker =
+                        cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+                                         ldlm_pools_cli_shrink);
         }
         RETURN(rc);
 }
@@ -1311,11 +1393,11 @@ EXPORT_SYMBOL(ldlm_pools_init);
 void ldlm_pools_fini(void)
 {
         if (ldlm_pools_srv_shrinker != NULL) {
-                remove_shrinker(ldlm_pools_srv_shrinker);
+                cfs_remove_shrinker(ldlm_pools_srv_shrinker);
                 ldlm_pools_srv_shrinker = NULL;
         }
         if (ldlm_pools_cli_shrinker != NULL) {
-                remove_shrinker(ldlm_pools_cli_shrinker);
+                cfs_remove_shrinker(ldlm_pools_cli_shrinker);
                 ldlm_pools_cli_shrinker = NULL;
         }
         ldlm_pools_thread_stop();
@@ -1422,12 +1504,6 @@ void ldlm_pools_fini(void)
 }
 EXPORT_SYMBOL(ldlm_pools_fini);
 
-void ldlm_pools_wakeup(void)
-{
-        return;
-}
-EXPORT_SYMBOL(ldlm_pools_wakeup);
-
 void ldlm_pools_recalc(ldlm_side_t client)
 {
         return;