Whamcloud - gitweb
LU-1128 ldlm: return -1 for server pool shrinker
[fs/lustre-release.git] / lustre / ldlm / ldlm_pool.c
index 4b92fcb..1f8f829 100644 (file)
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -228,7 +230,7 @@ static inline int ldlm_pool_t2gsp(unsigned int t)
  *
  * \pre ->pl_lock is locked.
  */
-static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
+static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
 {
         int granted, grant_step, limit;
 
@@ -238,6 +240,9 @@ static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
         grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
         grant_step = ((limit - granted) * grant_step) / 100;
         pl->pl_grant_plan = granted + grant_step;
+        limit = (limit * 5) >> 2;
+        if (pl->pl_grant_plan > limit)
+                pl->pl_grant_plan = limit;
 }
 
 /**
@@ -245,7 +250,7 @@ static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
  *
  * \pre ->pl_lock is locked.
  */
-static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
+static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
 {
         int granted;
         int grant_plan;
@@ -273,10 +278,6 @@ static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
          */
         slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
         do_div(slv_factor, limit);
-        if (2 * abs(granted - limit) > limit) {
-                slv_factor *= slv_factor;
-                slv_factor = dru(slv_factor, LDLM_POOL_SLV_SHIFT, round_up);
-        }
         slv = slv * slv_factor;
         slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
 
@@ -294,7 +295,7 @@ static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
  *
  * \pre ->pl_lock is locked.
  */
-static inline void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
+static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
 {
         int grant_plan = pl->pl_grant_plan;
         __u64 slv = pl->pl_server_lock_volume;
@@ -345,30 +346,35 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
         time_t recalc_interval_sec;
         ENTRY;
 
-        cfs_spin_lock(&pl->pl_lock);
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
-        if (recalc_interval_sec >= pl->pl_recalc_period) {
-                /*
-                 * Recalc SLV after last period. This should be done
-                 * _before_ recalculating new grant plan.
-                 */
-                ldlm_pool_recalc_slv(pl);
+        if (recalc_interval_sec < pl->pl_recalc_period)
+                RETURN(0);
 
-                /*
-                 * Make sure that pool informed obd of last SLV changes.
-                 */
-                ldlm_srv_pool_push_slv(pl);
+        cfs_spin_lock(&pl->pl_lock);
+        recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+        if (recalc_interval_sec < pl->pl_recalc_period) {
+                cfs_spin_unlock(&pl->pl_lock);
+                RETURN(0);
+        }
+        /*
+         * Recalc SLV after last period. This should be done
+         * _before_ recalculating new grant plan.
+         */
+        ldlm_pool_recalc_slv(pl);
 
-                /*
-                 * Update grant_plan for new period.
-                 */
-                ldlm_pool_recalc_grant_plan(pl);
+        /*
+         * Make sure that pool informed obd of last SLV changes.
+         */
+        ldlm_srv_pool_push_slv(pl);
 
-                pl->pl_recalc_time = cfs_time_current_sec();
-                lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
-                                    recalc_interval_sec);
-        }
+        /*
+         * Update grant_plan for new period.
+         */
+        ldlm_pool_recalc_grant_plan(pl);
 
+        pl->pl_recalc_time = cfs_time_current_sec();
+        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+                            recalc_interval_sec);
         cfs_spin_unlock(&pl->pl_lock);
         RETURN(0);
 }
@@ -439,7 +445,6 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
 {
         struct obd_device *obd;
-        ENTRY;
 
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL && obd != LP_POISON);
@@ -449,7 +454,7 @@ static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
         cfs_write_unlock(&obd->obd_pool_lock);
 
         ldlm_pool_set_limit(pl, limit);
-        RETURN(0);
+        return 0;
 }
 
 /**
@@ -479,6 +484,10 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
         time_t recalc_interval_sec;
         ENTRY;
 
+        recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+        if (recalc_interval_sec < pl->pl_recalc_period)
+                RETURN(0);
+
         cfs_spin_lock(&pl->pl_lock);
         /*
          * Check if we need to recalc lists now.
@@ -539,12 +548,12 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
          */
         ldlm_cli_pool_pop_slv(pl);
 
-        cfs_spin_lock(&ns->ns_unused_lock);
+        cfs_spin_lock(&ns->ns_lock);
         unused = ns->ns_nr_unused;
-        cfs_spin_unlock(&ns->ns_unused_lock);
+        cfs_spin_unlock(&ns->ns_lock);
         
         if (nr) {
-                canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC, 
+                canceled = ldlm_cancel_lru(ns, nr, LDLM_ASYNC,
                                            LDLM_CANCEL_SHRINK);
         }
 #ifdef __KERNEL__
@@ -577,6 +586,10 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
         time_t recalc_interval_sec;
         int count;
 
+        recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+        if (recalc_interval_sec <= 0)
+                goto recalc;
+
         cfs_spin_lock(&pl->pl_lock);
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
         if (recalc_interval_sec > 0) {
@@ -590,10 +603,10 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
                  */
                 cfs_atomic_set(&pl->pl_grant_rate, 0);
                 cfs_atomic_set(&pl->pl_cancel_rate, 0);
-                cfs_atomic_set(&pl->pl_grant_speed, 0);
         }
         cfs_spin_unlock(&pl->pl_lock);
 
+ recalc:
         if (pl->pl_ops->po_recalc != NULL) {
                 count = pl->pl_ops->po_recalc(pl);
                 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
@@ -639,10 +652,9 @@ EXPORT_SYMBOL(ldlm_pool_shrink);
  */
 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
 {
-        ENTRY;
         if (pl->pl_ops->po_setup != NULL)
-                RETURN(pl->pl_ops->po_setup(pl, limit));
-        RETURN(0);
+                return(pl->pl_ops->po_setup(pl, limit));
+        return 0;
 }
 EXPORT_SYMBOL(ldlm_pool_setup);
 
@@ -663,9 +675,9 @@ static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
         grant_plan = pl->pl_grant_plan;
         granted = cfs_atomic_read(&pl->pl_granted);
         grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
-        lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
-        grant_speed = cfs_atomic_read(&pl->pl_grant_speed);
         cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
+        grant_speed = grant_rate - cancel_rate;
+        lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
         grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
         cfs_spin_unlock(&pl->pl_lock);
 
@@ -694,6 +706,20 @@ static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
         return nr;
 }
 
+static int lprocfs_rd_grant_speed(char *page, char **start, off_t off,
+                                  int count, int *eof, void *data)
+{
+        struct ldlm_pool *pl = data;
+        int               grant_speed;
+
+        cfs_spin_lock(&pl->pl_lock);
+        /* serialize with ldlm_pool_recalc */
+        grant_speed = cfs_atomic_read(&pl->pl_grant_rate) -
+                      cfs_atomic_read(&pl->pl_cancel_rate);
+        cfs_spin_unlock(&pl->pl_lock);
+        return lprocfs_rd_uint(page, start, off, count, eof, &grant_speed);
+}
+
 LDLM_POOL_PROC_READER(grant_plan, int);
 LDLM_POOL_PROC_READER(recalc_period, int);
 LDLM_POOL_PROC_WRITER(recalc_period, int);
@@ -711,10 +737,11 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
         if (!var_name)
                 RETURN(-ENOMEM);
 
-        parent_ns_proc = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
+        parent_ns_proc = lprocfs_srch(ldlm_ns_proc_dir,
+                                      ldlm_ns_name(ns));
         if (parent_ns_proc == NULL) {
                 CERROR("%s: proc entry is not initialized\n",
-                       ns->ns_name);
+                       ldlm_ns_name(ns));
                 GOTO(out_free_name, rc = -EINVAL);
         }
         pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
@@ -746,8 +773,8 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
         lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
 
         snprintf(var_name, MAX_STRING_SIZE, "grant_speed");
-        pool_vars[0].data = &pl->pl_grant_speed;
-        pool_vars[0].read_fptr = lprocfs_rd_atomic;
+        pool_vars[0].data = pl;
+        pool_vars[0].read_fptr = lprocfs_rd_grant_speed;
         lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
 
         snprintf(var_name, MAX_STRING_SIZE, "cancel_rate");
@@ -857,11 +884,10 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
 
         cfs_atomic_set(&pl->pl_grant_rate, 0);
         cfs_atomic_set(&pl->pl_cancel_rate, 0);
-        cfs_atomic_set(&pl->pl_grant_speed, 0);
         pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
 
         snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
-                 ns->ns_name, idx);
+                 ldlm_ns_name(ns), idx);
 
         if (client == LDLM_NAMESPACE_SERVER) {
                 pl->pl_ops = &ldlm_srv_pool_ops;
@@ -870,7 +896,7 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
                 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
         } else {
                 ldlm_pool_set_limit(pl, 1);
-                pl->pl_server_lock_volume = 1;
+                pl->pl_server_lock_volume = 0;
                 pl->pl_ops = &ldlm_cli_pool_ops;
                 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
         }
@@ -913,13 +939,9 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
          */
         if (lock->l_resource->lr_type == LDLM_FLOCK)
                 return;
-        ENTRY;
 
-        LDLM_DEBUG(lock, "add lock to pool");
         cfs_atomic_inc(&pl->pl_granted);
         cfs_atomic_inc(&pl->pl_grant_rate);
-        cfs_atomic_inc(&pl->pl_grant_speed);
-
         lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
         /*
          * Do not do pool recalc for client side as all locks which
@@ -929,7 +951,6 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
          */
         if (ns_is_server(ldlm_pl2ns(pl)))
                 ldlm_pool_recalc(pl);
-        EXIT;
 }
 EXPORT_SYMBOL(ldlm_pool_add);
 
@@ -943,19 +964,15 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
          */
         if (lock->l_resource->lr_type == LDLM_FLOCK)
                 return;
-        ENTRY;
 
-        LDLM_DEBUG(lock, "del lock from pool");
         LASSERT(cfs_atomic_read(&pl->pl_granted) > 0);
         cfs_atomic_dec(&pl->pl_granted);
         cfs_atomic_inc(&pl->pl_cancel_rate);
-        cfs_atomic_dec(&pl->pl_grant_speed);
 
         lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
 
         if (ns_is_server(ldlm_pl2ns(pl)))
                 ldlm_pool_recalc(pl);
-        EXIT;
 }
 EXPORT_SYMBOL(ldlm_pool_del);
 
@@ -1065,7 +1082,8 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
         struct ldlm_namespace *ns;
         void *cookie;
 
-        if (nr != 0 && !(gfp_mask & __GFP_FS))
+        if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
+            !(gfp_mask & __GFP_FS))
                 return -1;
 
         CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
@@ -1090,7 +1108,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                 ldlm_namespace_move_locked(ns, client);
                 cfs_mutex_up(ldlm_namespace_lock(client));
                 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
-                ldlm_namespace_put(ns, 1);
+                ldlm_namespace_put(ns);
         }
 
         if (nr == 0 || total == 0) {
@@ -1129,20 +1147,26 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                 cancel = 1 + nr_locks * nr / total;
                 ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
                 cached += ldlm_pool_granted(&ns->ns_pool);
-                ldlm_namespace_put(ns, 1);
+                ldlm_namespace_put(ns);
         }
         cl_env_reexit(cookie);
-        return cached;
+        /* we only decrease the SLV in server pools shrinker, return -1 to
+         * kernel to avoid needless loop. LU-1128 */
+        return (client == LDLM_NAMESPACE_SERVER) ? -1 : cached;
 }
 
-static int ldlm_pools_srv_shrink(int nr, unsigned int gfp_mask)
+static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
 {
-        return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER, nr, gfp_mask);
+        return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
+                                 shrink_param(sc, nr_to_scan),
+                                 shrink_param(sc, gfp_mask));
 }
 
-static int ldlm_pools_cli_shrink(int nr, unsigned int gfp_mask)
+static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
 {
-        return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT, nr, gfp_mask);
+        return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
+                                 shrink_param(sc, nr_to_scan),
+                                 shrink_param(sc, gfp_mask));
 }
 
 void ldlm_pools_recalc(ldlm_side_t client)
@@ -1243,18 +1267,18 @@ void ldlm_pools_recalc(ldlm_side_t client)
                 }
                 ns = ldlm_namespace_first_locked(client);
 
-                cfs_spin_lock(&ns->ns_hash_lock);
+                cfs_spin_lock(&ns->ns_lock);
                 /*
                  * skip ns which is being freed, and we don't want to increase
-                 * its refcount again, not even temporarily. bz21519.
+                 * its refcount again, not even temporarily. bz21519 & LU-499.
                  */
-                if (ns->ns_refcount == 0) {
+                if (ns->ns_stopping) {
                         skip = 1;
                 } else {
                         skip = 0;
-                        ldlm_namespace_get_locked(ns);
+                        ldlm_namespace_get(ns);
                 }
-                cfs_spin_unlock(&ns->ns_hash_lock);
+                cfs_spin_unlock(&ns->ns_lock);
 
                 ldlm_namespace_move_locked(ns, client);
                 cfs_mutex_up(ldlm_namespace_lock(client));
@@ -1264,7 +1288,7 @@ void ldlm_pools_recalc(ldlm_side_t client)
                  */
                 if (!skip) {
                         ldlm_pool_recalc(&ns->ns_pool);
-                        ldlm_namespace_put(ns, 1);
+                        ldlm_namespace_put(ns);
                 }
         }
 }
@@ -1277,7 +1301,7 @@ static int ldlm_pools_thread_main(void *arg)
         ENTRY;
 
         cfs_daemonize(t_name);
-        thread->t_flags = SVC_RUNNING;
+        thread_set_flags(thread, SVC_RUNNING);
         cfs_waitq_signal(&thread->t_ctl_waitq);
 
         CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
@@ -1298,19 +1322,18 @@ static int ldlm_pools_thread_main(void *arg)
                  */
                 lwi = LWI_TIMEOUT(cfs_time_seconds(LDLM_POOLS_THREAD_PERIOD),
                                   NULL, NULL);
-                l_wait_event(thread->t_ctl_waitq, (thread->t_flags &
-                                                   (SVC_STOPPING|SVC_EVENT)),
+                l_wait_event(thread->t_ctl_waitq,
+                             thread_is_stopping(thread) ||
+                             thread_is_event(thread),
                              &lwi);
 
-                if (thread->t_flags & SVC_STOPPING) {
-                        thread->t_flags &= ~SVC_STOPPING;
+                if (thread_test_and_clear_flags(thread, SVC_STOPPING))
                         break;
-                } else if (thread->t_flags & SVC_EVENT) {
-                        thread->t_flags &= ~SVC_EVENT;
-                }
+                else
+                        thread_test_and_clear_flags(thread, SVC_EVENT);
         }
 
-        thread->t_flags = SVC_STOPPED;
+        thread_set_flags(thread, SVC_STOPPED);
         cfs_waitq_signal(&thread->t_ctl_waitq);
 
         CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
@@ -1339,8 +1362,8 @@ static int ldlm_pools_thread_start(void)
          * CLONE_VM and CLONE_FILES just avoid a needless copy, because we
          * just drop the VM and FILES in cfs_daemonize() right away.
          */
-        rc = cfs_kernel_thread(ldlm_pools_thread_main, ldlm_pools_thread,
-                               CLONE_VM | CLONE_FILES);
+        rc = cfs_create_thread(ldlm_pools_thread_main, ldlm_pools_thread,
+                               CFS_DAEMON_FLAGS);
         if (rc < 0) {
                 CERROR("Can't start pool thread, error %d\n",
                        rc);
@@ -1349,7 +1372,7 @@ static int ldlm_pools_thread_start(void)
                 RETURN(rc);
         }
         l_wait_event(ldlm_pools_thread->t_ctl_waitq,
-                     (ldlm_pools_thread->t_flags & SVC_RUNNING), &lwi);
+                     thread_is_running(ldlm_pools_thread), &lwi);
         RETURN(0);
 }
 
@@ -1362,7 +1385,7 @@ static void ldlm_pools_thread_stop(void)
                 return;
         }
 
-        ldlm_pools_thread->t_flags = SVC_STOPPING;
+        thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
         cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
 
         /*