Whamcloud - gitweb
b=21882 lru-resize comments
[fs/lustre-release.git] / lustre / ldlm / ldlm_pool.c
index 54c0cf5..bb3b11c 100644 (file)
 /*
  * 50 ldlm locks for 1MB of RAM.
  */
-#define LDLM_POOL_HOST_L ((num_physpages >> (20 - CFS_PAGE_SHIFT)) * 50)
+#define LDLM_POOL_HOST_L ((CFS_NUM_CACHEPAGES >> (20 - CFS_PAGE_SHIFT)) * 50)
 
 /*
  * Maximal possible grant step plan in %.
@@ -199,15 +199,15 @@ static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
 static inline int ldlm_pool_t2gsp(int t)
 {
         /*
-         * This yeilds 1% grant step for anything below LDLM_POOL_GSP_STEP
+         * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
          * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
          *
          * How this will affect execution is the following:
          *
-         * - for thread peroid 1s we will have grant_step 1% which good from
+         * - for thread period 1s we will have grant_step 1% which good from
          * pov of taking some load off from server and push it out to clients.
          * This is like that because 1% for grant_step means that server will
-         * not allow clients to get lots of locks inshort period of time and
+         * not allow clients to get lots of locks in short period of time and
          * keep all old locks in their caches. Clients will always have to
          * get some locks back if they want to take some new;
          *
@@ -233,7 +233,7 @@ static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
         int granted, grant_step, limit;
 
         limit = ldlm_pool_get_limit(pl);
-        granted = atomic_read(&pl->pl_granted);
+        granted = cfs_atomic_read(&pl->pl_granted);
 
         grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
         grant_step = ((limit - granted) * grant_step) / 100;
@@ -254,7 +254,7 @@ static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
         slv = pl->pl_server_lock_volume;
         grant_plan = pl->pl_grant_plan;
         limit = ldlm_pool_get_limit(pl);
-        granted = atomic_read(&pl->pl_granted);
+        granted = cfs_atomic_read(&pl->pl_granted);
 
         grant_usage = limit - (granted - grant_plan);
         if (grant_usage <= 0)
@@ -263,7 +263,7 @@ static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
         /*
          * Find out SLV change factor which is the ratio of grant usage
          * from limit. SLV changes as fast as the ratio of grant plan
-         * consumtion. The more locks from grant plan are not consumed
+         * consumption. The more locks from grant plan are not consumed
          * by clients in last interval (idle time), the faster grows
          * SLV. And the opposite, the more grant plan is over-consumed
          * (load time) the faster drops SLV.
@@ -294,9 +294,9 @@ static inline void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
 {
         int grant_plan = pl->pl_grant_plan;
         __u64 slv = pl->pl_server_lock_volume;
-        int granted = atomic_read(&pl->pl_granted);
-        int grant_rate = atomic_read(&pl->pl_grant_rate);
-        int cancel_rate = atomic_read(&pl->pl_cancel_rate);
+        int granted = cfs_atomic_read(&pl->pl_granted);
+        int grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+        int cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
 
         lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
                             slv);
@@ -326,9 +326,9 @@ static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
          */
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL);
-        write_lock(&obd->obd_pool_lock);
+        cfs_write_lock(&obd->obd_pool_lock);
         obd->obd_pool_slv = pl->pl_server_lock_volume;
-        write_unlock(&obd->obd_pool_lock);
+        cfs_write_unlock(&obd->obd_pool_lock);
 }
 
 /**
@@ -341,7 +341,7 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
         time_t recalc_interval_sec;
         ENTRY;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
         if (recalc_interval_sec >= pl->pl_recalc_period) {
                 /*
@@ -365,13 +365,13 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
                                     recalc_interval_sec);
         }
 
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
         RETURN(0);
 }
 
 /**
  * This function is used on server side as main entry point for memory
- * preasure handling. It decreases SLV on \a pl according to passed
+ * pressure handling. It decreases SLV on \a pl according to passed
  * \a nr and \a gfp_mask.
  *
  * Our goal here is to decrease SLV such a way that clients hold \a nr
@@ -386,22 +386,22 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
          * VM is asking how many entries may be potentially freed.
          */
         if (nr == 0)
-                return atomic_read(&pl->pl_granted);
+                return cfs_atomic_read(&pl->pl_granted);
 
         /*
          * Client already canceled locks but server is already in shrinker
          * and can't cancel anything. Let's catch this race.
          */
-        if (atomic_read(&pl->pl_granted) == 0)
+        if (cfs_atomic_read(&pl->pl_granted) == 0)
                 RETURN(0);
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
 
         /*
-         * We want shrinker to possibly cause cancelation of @nr locks from
+         * We want shrinker to possibly cause cancellation of @nr locks from
          * clients or grant approximately @nr locks smaller next intervals.
          *
-         * This is why we decresed SLV by @nr. This effect will only be as
+         * This is why we decreased SLV by @nr. This effect will only be as
          * long as one re-calc interval (1s these days) and this should be
          * enough to pass this decreased SLV to all clients. On next recalc
          * interval pool will either increase SLV if locks load is not high
@@ -420,7 +420,7 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
          * Make sure that pool informed obd of last SLV changes.
          */
         ldlm_srv_pool_push_slv(pl);
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 
         /*
          * We did not really free any memory here so far, it only will be
@@ -440,9 +440,9 @@ static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL && obd != LP_POISON);
         LASSERT(obd->obd_type != LP_POISON);
-        write_lock(&obd->obd_pool_lock);
+        cfs_write_lock(&obd->obd_pool_lock);
         obd->obd_pool_limit = limit;
-        write_unlock(&obd->obd_pool_lock);
+        cfs_write_unlock(&obd->obd_pool_lock);
 
         ldlm_pool_set_limit(pl, limit);
         RETURN(0);
@@ -456,32 +456,32 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
         struct obd_device *obd;
 
         /*
-         * Get new SLV and Limit from obd which is updated with comming
+         * Get new SLV and Limit from obd which is updated with coming
          * RPCs.
          */
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL);
-        read_lock(&obd->obd_pool_lock);
+        cfs_read_lock(&obd->obd_pool_lock);
         pl->pl_server_lock_volume = obd->obd_pool_slv;
         ldlm_pool_set_limit(pl, obd->obd_pool_limit);
-        read_unlock(&obd->obd_pool_lock);
+        cfs_read_unlock(&obd->obd_pool_lock);
 }
 
 /**
- * Recalculates client sise pool \a pl according to current SLV and Limit.
+ * Recalculates client size pool \a pl according to current SLV and Limit.
  */
 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
 {
         time_t recalc_interval_sec;
         ENTRY;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         /*
          * Check if we need to recalc lists now.
          */
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
         if (recalc_interval_sec < pl->pl_recalc_period) {
-                spin_unlock(&pl->pl_lock);
+                cfs_spin_unlock(&pl->pl_lock);
                 RETURN(0);
         }
 
@@ -493,7 +493,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
         pl->pl_recalc_time = cfs_time_current_sec();
         lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
                             recalc_interval_sec);
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 
         /*
          * Do not cancel locks in case lru resize is disabled for this ns.
@@ -512,9 +512,9 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
 }
 
 /**
- * This function is main entry point for memory preasure handling on client side.
- * Main goal of this function is to cancel some number of locks on passed \a pl
- * according to \a nr and \a gfp_mask.
+ * This function is main entry point for memory pressure handling on client
+ * side.  Main goal of this function is to cancel some number of locks on
+ * passed \a pl according to \a nr and \a gfp_mask.
  */
 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
                                 int nr, unsigned int gfp_mask)
@@ -535,9 +535,9 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
          */
         ldlm_cli_pool_pop_slv(pl);
 
-        spin_lock(&ns->ns_unused_lock);
+        cfs_spin_lock(&ns->ns_unused_lock);
         unused = ns->ns_nr_unused;
-        spin_unlock(&ns->ns_unused_lock);
+        cfs_spin_unlock(&ns->ns_unused_lock);
         
         if (nr) {
                 canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC, 
@@ -545,7 +545,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
         }
 #ifdef __KERNEL__
         /*
-         * Retrun the number of potentially reclaimable locks.
+         * Return the number of potentially reclaimable locks.
          */
         return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
 #else
@@ -573,7 +573,7 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
         time_t recalc_interval_sec;
         int count;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
         if (recalc_interval_sec > 0) {
                 /*
@@ -584,11 +584,11 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
                 /*
                  * Zero out all rates and speed for the last period.
                  */
-                atomic_set(&pl->pl_grant_rate, 0);
-                atomic_set(&pl->pl_cancel_rate, 0);
-                atomic_set(&pl->pl_grant_speed, 0);
+                cfs_atomic_set(&pl->pl_grant_rate, 0);
+                cfs_atomic_set(&pl->pl_cancel_rate, 0);
+                cfs_atomic_set(&pl->pl_grant_speed, 0);
         }
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 
         if (pl->pl_ops->po_recalc != NULL) {
                 count = pl->pl_ops->po_recalc(pl);
@@ -652,18 +652,18 @@ static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
         __u64 slv, clv;
         __u32 limit;
 
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         slv = pl->pl_server_lock_volume;
         clv = pl->pl_client_lock_volume;
         limit = ldlm_pool_get_limit(pl);
         grant_plan = pl->pl_grant_plan;
-        granted = atomic_read(&pl->pl_granted);
-        grant_rate = atomic_read(&pl->pl_grant_rate);
-        lvf = atomic_read(&pl->pl_lock_volume_factor);
-        grant_speed = atomic_read(&pl->pl_grant_speed);
-        cancel_rate = atomic_read(&pl->pl_cancel_rate);
+        granted = cfs_atomic_read(&pl->pl_granted);
+        grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
+        lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
+        grant_speed = cfs_atomic_read(&pl->pl_grant_speed);
+        cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
         grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 
         nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
                        pl->pl_name);
@@ -846,14 +846,14 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
         int rc;
         ENTRY;
 
-        spin_lock_init(&pl->pl_lock);
-        atomic_set(&pl->pl_granted, 0);
+        cfs_spin_lock_init(&pl->pl_lock);
+        cfs_atomic_set(&pl->pl_granted, 0);
         pl->pl_recalc_time = cfs_time_current_sec();
-        atomic_set(&pl->pl_lock_volume_factor, 1);
+        cfs_atomic_set(&pl->pl_lock_volume_factor, 1);
 
-        atomic_set(&pl->pl_grant_rate, 0);
-        atomic_set(&pl->pl_cancel_rate, 0);
-        atomic_set(&pl->pl_grant_speed, 0);
+        cfs_atomic_set(&pl->pl_grant_rate, 0);
+        cfs_atomic_set(&pl->pl_cancel_rate, 0);
+        cfs_atomic_set(&pl->pl_grant_speed, 0);
         pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
 
         snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
@@ -912,9 +912,9 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
         ENTRY;
 
         LDLM_DEBUG(lock, "add lock to pool");
-        atomic_inc(&pl->pl_granted);
-        atomic_inc(&pl->pl_grant_rate);
-        atomic_inc(&pl->pl_grant_speed);
+        cfs_atomic_inc(&pl->pl_granted);
+        cfs_atomic_inc(&pl->pl_grant_rate);
+        cfs_atomic_inc(&pl->pl_grant_speed);
 
         lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
         /*
@@ -942,10 +942,10 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
         ENTRY;
 
         LDLM_DEBUG(lock, "del lock from pool");
-        LASSERT(atomic_read(&pl->pl_granted) > 0);
-        atomic_dec(&pl->pl_granted);
-        atomic_inc(&pl->pl_cancel_rate);
-        atomic_dec(&pl->pl_grant_speed);
+        LASSERT(cfs_atomic_read(&pl->pl_granted) > 0);
+        cfs_atomic_dec(&pl->pl_granted);
+        cfs_atomic_inc(&pl->pl_cancel_rate);
+        cfs_atomic_dec(&pl->pl_grant_speed);
 
         lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
 
@@ -963,9 +963,9 @@ EXPORT_SYMBOL(ldlm_pool_del);
 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
 {
         __u64 slv;
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         slv = pl->pl_server_lock_volume;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
         return slv;
 }
 EXPORT_SYMBOL(ldlm_pool_get_slv);
@@ -977,9 +977,9 @@ EXPORT_SYMBOL(ldlm_pool_get_slv);
  */
 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
 {
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         pl->pl_server_lock_volume = slv;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 }
 EXPORT_SYMBOL(ldlm_pool_set_slv);
 
@@ -991,9 +991,9 @@ EXPORT_SYMBOL(ldlm_pool_set_slv);
 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
 {
         __u64 slv;
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         slv = pl->pl_client_lock_volume;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
         return slv;
 }
 EXPORT_SYMBOL(ldlm_pool_get_clv);
@@ -1005,9 +1005,9 @@ EXPORT_SYMBOL(ldlm_pool_get_clv);
  */
 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
 {
-        spin_lock(&pl->pl_lock);
+        cfs_spin_lock(&pl->pl_lock);
         pl->pl_client_lock_volume = clv;
-        spin_unlock(&pl->pl_lock);
+        cfs_spin_unlock(&pl->pl_lock);
 }
 EXPORT_SYMBOL(ldlm_pool_set_clv);
 
@@ -1016,7 +1016,7 @@ EXPORT_SYMBOL(ldlm_pool_set_clv);
  */
 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
 {
-        return atomic_read(&pl->pl_limit);
+        return cfs_atomic_read(&pl->pl_limit);
 }
 EXPORT_SYMBOL(ldlm_pool_get_limit);
 
@@ -1025,7 +1025,7 @@ EXPORT_SYMBOL(ldlm_pool_get_limit);
  */
 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
 {
-        atomic_set(&pl->pl_limit, limit);
+        cfs_atomic_set(&pl->pl_limit, limit);
 }
 EXPORT_SYMBOL(ldlm_pool_set_limit);
 
@@ -1034,20 +1034,20 @@ EXPORT_SYMBOL(ldlm_pool_set_limit);
  */
 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
 {
-        return atomic_read(&pl->pl_lock_volume_factor);
+        return cfs_atomic_read(&pl->pl_lock_volume_factor);
 }
 EXPORT_SYMBOL(ldlm_pool_get_lvf);
 
 #ifdef __KERNEL__
 static int ldlm_pool_granted(struct ldlm_pool *pl)
 {
-        return atomic_read(&pl->pl_granted);
+        return cfs_atomic_read(&pl->pl_granted);
 }
 
 static struct ptlrpc_thread *ldlm_pools_thread;
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
-static struct completion ldlm_pools_comp;
+static struct cfs_shrinker *ldlm_pools_srv_shrinker;
+static struct cfs_shrinker *ldlm_pools_cli_shrinker;
+static cfs_completion_t ldlm_pools_comp;
 
 /*
  * Cancel \a nr locks from all namespaces (if possible). Returns number of
@@ -1072,19 +1072,19 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
         /*
          * Find out how many resources we may release.
          */
-        for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+        for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
              nr_ns > 0; nr_ns--)
         {
-                mutex_down(ldlm_namespace_lock(client));
-                if (list_empty(ldlm_namespace_list(client))) {
-                        mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                if (cfs_list_empty(ldlm_namespace_list(client))) {
+                        cfs_mutex_up(ldlm_namespace_lock(client));
                         cl_env_reexit(cookie);
                         return 0;
                 }
                 ns = ldlm_namespace_first_locked(client);
                 ldlm_namespace_get(ns);
                 ldlm_namespace_move_locked(ns, client);
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
                 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
                 ldlm_namespace_put(ns, 1);
         }
@@ -1097,7 +1097,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
         /*
          * Shrink at least ldlm_namespace_nr(client) namespaces.
          */
-        for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+        for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
              nr_ns > 0; nr_ns--)
         {
                 int cancel, nr_locks;
@@ -1105,9 +1105,9 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                 /*
                  * Do not call shrink under ldlm_namespace_lock(client)
                  */
-                mutex_down(ldlm_namespace_lock(client));
-                if (list_empty(ldlm_namespace_list(client))) {
-                        mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                if (cfs_list_empty(ldlm_namespace_list(client))) {
+                        cfs_mutex_up(ldlm_namespace_lock(client));
                         /*
                          * If list is empty, we can't return any @cached > 0,
                          * that probably would cause needless shrinker
@@ -1119,7 +1119,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                 ns = ldlm_namespace_first_locked(client);
                 ldlm_namespace_get(ns);
                 ldlm_namespace_move_locked(ns, client);
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
 
                 nr_locks = ldlm_pool_granted(&ns->ns_pool);
                 cancel = 1 + nr_locks * nr / total;
@@ -1154,9 +1154,9 @@ void ldlm_pools_recalc(ldlm_side_t client)
                 /*
                  * Check all modest namespaces first.
                  */
-                mutex_down(ldlm_namespace_lock(client));
-                list_for_each_entry(ns, ldlm_namespace_list(client),
-                                    ns_list_chain)
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+                                        ns_list_chain)
                 {
                         if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
                                 continue;
@@ -1190,8 +1190,8 @@ void ldlm_pools_recalc(ldlm_side_t client)
                 /*
                  * The rest is given to greedy namespaces.
                  */
-                list_for_each_entry(ns, ldlm_namespace_list(client),
-                                    ns_list_chain)
+                cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
+                                        ns_list_chain)
                 {
                         if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
                                 continue;
@@ -1203,25 +1203,28 @@ void ldlm_pools_recalc(ldlm_side_t client)
                                  * for _all_ pools.
                                  */
                                 l = LDLM_POOL_HOST_L /
-                                        atomic_read(ldlm_namespace_nr(client));
+                                        cfs_atomic_read(
+                                                ldlm_namespace_nr(client));
                         } else {
                                 /*
                                  * All the rest of greedy pools will have
                                  * all locks in equal parts.
                                  */
                                 l = (LDLM_POOL_HOST_L - nr_l) /
-                                        (atomic_read(ldlm_namespace_nr(client)) -
+                                        (cfs_atomic_read(
+                                                ldlm_namespace_nr(client)) -
                                          nr_p);
                         }
                         ldlm_pool_setup(&ns->ns_pool, l);
                 }
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
         }
 
         /*
          * Recalc at least ldlm_namespace_nr(client) namespaces.
          */
-        for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
+        for (nr = cfs_atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
+                int     skip;
                 /*
                  * Lock the list, get first @ns in the list, getref, move it
                  * to the tail, unlock and call pool recalc. This way we avoid
@@ -1229,21 +1232,36 @@ void ldlm_pools_recalc(ldlm_side_t client)
                  * rid of potential deadlock on client nodes when canceling
                  * locks synchronously.
                  */
-                mutex_down(ldlm_namespace_lock(client));
-                if (list_empty(ldlm_namespace_list(client))) {
-                        mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_down(ldlm_namespace_lock(client));
+                if (cfs_list_empty(ldlm_namespace_list(client))) {
+                        cfs_mutex_up(ldlm_namespace_lock(client));
                         break;
                 }
                 ns = ldlm_namespace_first_locked(client);
-                ldlm_namespace_get(ns);
+
+                cfs_spin_lock(&ns->ns_hash_lock);
+                /*
+                 * skip ns which is being freed, and we don't want to increase
+                 * its refcount again, not even temporarily. bz21519.
+                 */
+                if (ns->ns_refcount == 0) {
+                        skip = 1;
+                } else {
+                        skip = 0;
+                        ldlm_namespace_get_locked(ns);
+                }
+                cfs_spin_unlock(&ns->ns_hash_lock);
+
                 ldlm_namespace_move_locked(ns, client);
-                mutex_up(ldlm_namespace_lock(client));
+                cfs_mutex_up(ldlm_namespace_lock(client));
 
                 /*
                  * After setup is done - recalc the pool.
                  */
-                ldlm_pool_recalc(&ns->ns_pool);
-                ldlm_namespace_put(ns, 1);
+                if (!skip) {
+                        ldlm_pool_recalc(&ns->ns_pool);
+                        ldlm_namespace_put(ns, 1);
+                }
         }
 }
 EXPORT_SYMBOL(ldlm_pools_recalc);
@@ -1294,7 +1312,7 @@ static int ldlm_pools_thread_main(void *arg)
         CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
                t_name, cfs_curproc_pid());
 
-        complete_and_exit(&ldlm_pools_comp, 0);
+        cfs_complete_and_exit(&ldlm_pools_comp, 0);
 }
 
 static int ldlm_pools_thread_start(void)
@@ -1310,12 +1328,12 @@ static int ldlm_pools_thread_start(void)
         if (ldlm_pools_thread == NULL)
                 RETURN(-ENOMEM);
 
-        init_completion(&ldlm_pools_comp);
+        cfs_init_completion(&ldlm_pools_comp);
         cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
 
         /*
          * CLONE_VM and CLONE_FILES just avoid a needless copy, because we
-         * just drop the VM and FILES in ptlrpc_daemonize() right away.
+         * just drop the VM and FILES in cfs_daemonize() right away.
          */
         rc = cfs_kernel_thread(ldlm_pools_thread_main, ldlm_pools_thread,
                                CLONE_VM | CLONE_FILES);
@@ -1348,7 +1366,7 @@ static void ldlm_pools_thread_stop(void)
          * This fixes possible race and oops due to accessing freed memory
          * in pools thread.
          */
-        wait_for_completion(&ldlm_pools_comp);
+        cfs_wait_for_completion(&ldlm_pools_comp);
         OBD_FREE_PTR(ldlm_pools_thread);
         ldlm_pools_thread = NULL;
         EXIT;
@@ -1361,10 +1379,12 @@ int ldlm_pools_init(void)
 
         rc = ldlm_pools_thread_start();
         if (rc == 0) {
-                ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS,
-                                                       ldlm_pools_srv_shrink);
-                ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS,
-                                                       ldlm_pools_cli_shrink);
+                ldlm_pools_srv_shrinker =
+                        cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+                                         ldlm_pools_srv_shrink);
+                ldlm_pools_cli_shrinker =
+                        cfs_set_shrinker(CFS_DEFAULT_SEEKS,
+                                         ldlm_pools_cli_shrink);
         }
         RETURN(rc);
 }
@@ -1373,11 +1393,11 @@ EXPORT_SYMBOL(ldlm_pools_init);
 void ldlm_pools_fini(void)
 {
         if (ldlm_pools_srv_shrinker != NULL) {
-                remove_shrinker(ldlm_pools_srv_shrinker);
+                cfs_remove_shrinker(ldlm_pools_srv_shrinker);
                 ldlm_pools_srv_shrinker = NULL;
         }
         if (ldlm_pools_cli_shrinker != NULL) {
-                remove_shrinker(ldlm_pools_cli_shrinker);
+                cfs_remove_shrinker(ldlm_pools_cli_shrinker);
                 ldlm_pools_cli_shrinker = NULL;
         }
         ldlm_pools_thread_stop();