Whamcloud - gitweb
LU-7689 obdclass: limit lu_site hash table size on clients
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
index dac8c2a..e04654c 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2015, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -69,6 +69,7 @@ enum {
 
 #define LU_SITE_BITS_MIN    12
 #define LU_SITE_BITS_MAX    24
+#define LU_SITE_BITS_MAX_CL 19
 /**
  * total 256 buckets, we don't want too many buckets because:
  * - consume too much memory
@@ -86,6 +87,7 @@ CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644,
                "Maximum number of objects in lu_object cache");
 
 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
+static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx);
 
 /**
  * Decrease reference counter on object. If last reference is freed, return
@@ -98,7 +100,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
         struct lu_object_header *top;
         struct lu_site          *site;
         struct lu_object        *orig;
-        cfs_hash_bd_t            bd;
+       struct cfs_hash_bd            bd;
        const struct lu_fid     *fid;
 
         top  = o->lo_header;
@@ -154,12 +156,16 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
                LASSERT(list_empty(&top->loh_lru));
                list_add_tail(&top->loh_lru, &bkt->lsb_lru);
                bkt->lsb_lru_len++;
+               lprocfs_counter_incr(site->ls_stats, LU_SS_LRU_LEN);
+               CDEBUG(D_INODE, "Add %p to site lru. hash: %p, bkt: %p, "
+                      "lru_len: %ld\n",
+                      o, site->ls_obj_hash, bkt, bkt->lsb_lru_len);
                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
                 return;
         }
 
         /*
-         * If object is dying (will not be cached), removed it
+        * If object is dying (will not be cached) then remove it
          * from hash table and LRU.
          *
          * This is done with hash table and LRU lists locked. As the only
@@ -202,8 +208,9 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
        top = o->lo_header;
        set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
        if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
-               cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
-               cfs_hash_bd_t bd;
+               struct lu_site *site = o->lo_dev->ld_site;
+               struct cfs_hash *obj_hash = site->ls_obj_hash;
+               struct cfs_hash_bd bd;
 
                cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
                if (!list_empty(&top->loh_lru)) {
@@ -212,6 +219,7 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
                        list_del_init(&top->loh_lru);
                        bkt = cfs_hash_bd_extra_get(obj_hash, &bd);
                        bkt->lsb_lru_len--;
+                       lprocfs_counter_decr(site->ls_stats, LU_SS_LRU_LEN);
                }
                cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
                cfs_hash_bd_unlock(obj_hash, &bd, 1);
@@ -346,11 +354,11 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
         struct lu_object_header *h;
         struct lu_object_header *temp;
         struct lu_site_bkt_data *bkt;
-        cfs_hash_bd_t            bd;
-        cfs_hash_bd_t            bd2;
+       struct cfs_hash_bd            bd;
+       struct cfs_hash_bd            bd2;
        struct list_head         dispose;
        int                      did_sth;
-       unsigned int             start;
+       unsigned int             start = 0;
         int                      count;
         int                      bnr;
        unsigned int             i;
@@ -363,7 +371,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
          * Under LRU list lock, scan LRU list and move unreferenced objects to
          * the dispose list, removing them from LRU and hash table.
          */
-        start = s->ls_purge_start;
+       if (nr != ~0)
+               start = s->ls_purge_start;
        bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
  again:
        /*
@@ -389,6 +398,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
                                                &bd2, &h->loh_hash);
                        list_move(&h->loh_lru, &dispose);
                        bkt->lsb_lru_len--;
+                       lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN);
                         if (did_sth == 0)
                                 did_sth = 1;
 
@@ -574,7 +584,7 @@ int lu_object_invariant(const struct lu_object *o)
 }
 
 static struct lu_object *htable_lookup(struct lu_site *s,
-                                      cfs_hash_bd_t *bd,
+                                      struct cfs_hash_bd *bd,
                                       const struct lu_fid *f,
                                       wait_queue_t *waiter,
                                       __u64 *version)
@@ -604,6 +614,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
                if (!list_empty(&h->loh_lru)) {
                        list_del_init(&h->loh_lru);
                        bkt->lsb_lru_len--;
+                       lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN);
                }
                 return lu_object_top(h);
         }
@@ -615,7 +626,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
          */
 
        if (likely(waiter != NULL)) {
-               init_waitqueue_entry_current(waiter);
+               init_waitqueue_entry(waiter, current);
                add_wait_queue(&bkt->lsb_marche_funebre, waiter);
                set_current_state(TASK_UNINTERRUPTIBLE);
                lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
@@ -667,8 +678,8 @@ static struct lu_object *lu_object_new(const struct lu_env *env,
                                        const struct lu_object_conf *conf)
 {
         struct lu_object        *o;
-        cfs_hash_t              *hs;
-        cfs_hash_bd_t            bd;
+       struct cfs_hash              *hs;
+       struct cfs_hash_bd            bd;
 
         o = lu_object_alloc(env, dev, f, conf);
         if (unlikely(IS_ERR(o)))
@@ -696,8 +707,8 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
        struct lu_object      *o;
        struct lu_object      *shadow;
        struct lu_site        *s;
-       cfs_hash_t            *hs;
-       cfs_hash_bd_t          bd;
+       struct cfs_hash            *hs;
+       struct cfs_hash_bd          bd;
        __u64                  version = 0;
 
         /*
@@ -784,7 +795,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env,
                 * lu_object_find_try() already added waiter into the
                 * wait queue.
                 */
-               waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
+               schedule();
                bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
                remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
        }
@@ -868,7 +879,7 @@ struct lu_site_print_arg {
 };
 
 static int
-lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                  struct hlist_node *hnode, void *data)
 {
        struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
@@ -911,6 +922,7 @@ static unsigned long lu_htable_order(struct lu_device *top)
 {
        unsigned long cache_size;
        unsigned long bits;
+       unsigned long bits_max = LU_SITE_BITS_MAX;
 
        /*
         * For ZFS based OSDs the cache should be disabled by default.  This
@@ -924,6 +936,9 @@ static unsigned long lu_htable_order(struct lu_device *top)
                return LU_SITE_BITS_MIN;
        }
 
+       if (strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME) == 0)
+               bits_max = LU_SITE_BITS_MAX_CL;
+
         /*
          * Calculate hash table size, assuming that we want reasonable
          * performance when 20% of total memory is occupied by cache of
@@ -954,10 +969,11 @@ static unsigned long lu_htable_order(struct lu_device *top)
         for (bits = 1; (1 << bits) < cache_size; ++bits) {
                 ;
         }
-        return bits;
+
+       return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
 }
 
-static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
+static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
                                const void *key, unsigned mask)
 {
        struct lu_fid  *fid = (struct lu_fid *)key;
@@ -997,7 +1013,7 @@ static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
        return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
 }
 
-static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct lu_object_header *h;
 
@@ -1005,12 +1021,12 @@ static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
        atomic_inc(&h->loh_ref);
 }
 
-static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
         LBUG(); /* we should never called it */
 }
 
-static cfs_hash_ops_t lu_site_hash_ops = {
+static struct cfs_hash_ops lu_site_hash_ops = {
         .hs_hash        = lu_obj_hop_hash,
         .hs_key         = lu_obj_hop_key,
         .hs_keycmp      = lu_obj_hop_keycmp,
@@ -1042,7 +1058,7 @@ EXPORT_SYMBOL(lu_dev_del_linkage);
 int lu_site_init(struct lu_site *s, struct lu_device *top)
 {
        struct lu_site_bkt_data *bkt;
-       cfs_hash_bd_t bd;
+       struct cfs_hash_bd bd;
        char name[16];
        unsigned long bits;
        unsigned int i;
@@ -1050,10 +1066,8 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
 
        memset(s, 0, sizeof *s);
        mutex_init(&s->ls_purge_mutex);
-       bits = lu_htable_order(top);
        snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name);
-       for (bits = clamp_t(typeof(bits), bits,
-                           LU_SITE_BITS_MIN, LU_SITE_BITS_MAX);
+       for (bits = lu_htable_order(top);
             bits >= LU_SITE_BITS_MIN; bits--) {
                s->ls_obj_hash = cfs_hash_create(name, bits, bits,
                                                 bits - LU_SITE_BKT_BITS,
@@ -1098,6 +1112,12 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
                              0, "cache_death_race", "cache_death_race");
         lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
                              0, "lru_purged", "lru_purged");
+       /*
+        * Unlike other counters, lru_len can be decremented so
+        * need lc_sum instead of just lc_count
+        */
+       lprocfs_counter_init(s->ls_stats, LU_SS_LRU_LEN,
+                            LPROCFS_CNTR_AVGMINMAX, "lru_len", "lru_len");
 
        INIT_LIST_HEAD(&s->ls_linkage);
         s->ls_top_dev = top;
@@ -1364,7 +1384,8 @@ enum {
 
 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
 
-static DEFINE_SPINLOCK(lu_keys_guard);
+DEFINE_RWLOCK(lu_keys_guard);
+static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0);
 
 /**
  * Global counter incremented whenever key is registered, unregistered,
@@ -1388,7 +1409,7 @@ int lu_context_key_register(struct lu_context_key *key)
         LASSERT(key->lct_owner != NULL);
 
         result = -ENFILE;
-       spin_lock(&lu_keys_guard);
+       write_lock(&lu_keys_guard);
         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
                 if (lu_keys[i] == NULL) {
                         key->lct_index = i;
@@ -1400,7 +1421,7 @@ int lu_context_key_register(struct lu_context_key *key)
                         break;
                 }
         }
-       spin_unlock(&lu_keys_guard);
+       write_unlock(&lu_keys_guard);
        return result;
 }
 EXPORT_SYMBOL(lu_context_key_register);
@@ -1439,13 +1460,26 @@ void lu_context_key_degister(struct lu_context_key *key)
        lu_context_key_quiesce(key);
 
        ++key_set_version;
-       spin_lock(&lu_keys_guard);
+       write_lock(&lu_keys_guard);
        key_fini(&lu_shrink_env.le_ctx, key->lct_index);
+
+       /**
+        * Wait until all transient contexts referencing this key have
+        * run lu_context_key::lct_fini() method.
+        */
+       while (atomic_read(&key->lct_used) > 1) {
+               write_unlock(&lu_keys_guard);
+               CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n",
+                      key->lct_owner ? key->lct_owner->name : "", key,
+                      atomic_read(&key->lct_used));
+               schedule();
+               write_lock(&lu_keys_guard);
+       }
        if (lu_keys[key->lct_index]) {
                lu_keys[key->lct_index] = NULL;
                lu_ref_fini(&key->lct_reference);
        }
-       spin_unlock(&lu_keys_guard);
+       write_unlock(&lu_keys_guard);
 
        LASSERTF(atomic_read(&key->lct_used) == 1,
                 "key has instances: %d\n",
@@ -1567,15 +1601,31 @@ void lu_context_key_quiesce(struct lu_context_key *key)
                  * XXX layering violation.
                  */
                 cl_env_cache_purge(~0);
-                key->lct_tags |= LCT_QUIESCENT;
                 /*
                  * XXX memory barrier has to go here.
                  */
-               spin_lock(&lu_keys_guard);
+               write_lock(&lu_keys_guard);
+               key->lct_tags |= LCT_QUIESCENT;
+
+               /**
+                * Wait until all lu_context_key::lct_init() methods
+                * have completed.
+                */
+               while (atomic_read(&lu_key_initing_cnt) > 0) {
+                       write_unlock(&lu_keys_guard);
+                       CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\""
+                              " %p, %d (%d)\n",
+                              key->lct_owner ? key->lct_owner->name : "",
+                              key, atomic_read(&key->lct_used),
+                              atomic_read(&lu_key_initing_cnt));
+                       schedule();
+                       write_lock(&lu_keys_guard);
+               }
+
                list_for_each_entry(ctx, &lu_context_remembered,
                                    lc_remember)
                        key_fini(ctx, key->lct_index);
-               spin_unlock(&lu_keys_guard);
+               write_unlock(&lu_keys_guard);
                ++key_set_version;
        }
 }
@@ -1604,6 +1654,19 @@ static int keys_fill(struct lu_context *ctx)
 {
        unsigned int i;
 
+       /*
+        * A serialisation with lu_context_key_quiesce() is needed, but some
+        * "key->lct_init()" are calling kernel memory allocation routine and
+        * can't be called while holding a spin_lock.
+        * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt"
+        * to ensure the start of the serialisation.
+        * An atomic_t variable is still used, in order not to reacquire the
+        * lock when decrementing the counter.
+        */
+       read_lock(&lu_keys_guard);
+       atomic_inc(&lu_key_initing_cnt);
+       read_unlock(&lu_keys_guard);
+
         LINVRNT(ctx->lc_value != NULL);
         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
                 struct lu_context_key *key;
@@ -1621,13 +1684,19 @@ static int keys_fill(struct lu_context *ctx)
                         LINVRNT(key->lct_init != NULL);
                         LINVRNT(key->lct_index == i);
 
-                        value = key->lct_init(ctx, key);
-                        if (unlikely(IS_ERR(value)))
-                                return PTR_ERR(value);
-
                        LASSERT(key->lct_owner != NULL);
-                       if (!(ctx->lc_tags & LCT_NOREF))
-                               try_module_get(key->lct_owner);
+                       if (!(ctx->lc_tags & LCT_NOREF) &&
+                           try_module_get(key->lct_owner) == 0) {
+                               /* module is unloading, skip this key */
+                               continue;
+                       }
+
+                       value = key->lct_init(ctx, key);
+                       if (unlikely(IS_ERR(value))) {
+                               atomic_dec(&lu_key_initing_cnt);
+                               return PTR_ERR(value);
+                       }
+
                        lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
                        atomic_inc(&key->lct_used);
                         /*
@@ -1641,6 +1710,7 @@ static int keys_fill(struct lu_context *ctx)
                 }
                 ctx->lc_version = key_set_version;
         }
+       atomic_dec(&lu_key_initing_cnt);
         return 0;
 }
 
@@ -1664,9 +1734,9 @@ int lu_context_init(struct lu_context *ctx, __u32 tags)
        ctx->lc_state = LCS_INITIALIZED;
        ctx->lc_tags = tags;
        if (tags & LCT_REMEMBER) {
-               spin_lock(&lu_keys_guard);
+               write_lock(&lu_keys_guard);
                list_add(&ctx->lc_remember, &lu_context_remembered);
-               spin_unlock(&lu_keys_guard);
+               write_unlock(&lu_keys_guard);
        } else {
                INIT_LIST_HEAD(&ctx->lc_remember);
        }
@@ -1692,10 +1762,10 @@ void lu_context_fini(struct lu_context *ctx)
                keys_fini(ctx);
 
        } else { /* could race with key degister */
-               spin_lock(&lu_keys_guard);
+               write_lock(&lu_keys_guard);
                keys_fini(ctx);
                list_del_init(&ctx->lc_remember);
-               spin_unlock(&lu_keys_guard);
+               write_unlock(&lu_keys_guard);
        }
 }
 EXPORT_SYMBOL(lu_context_fini);
@@ -1723,7 +1793,7 @@ void lu_context_exit(struct lu_context *ctx)
                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
                        /* could race with key quiescency */
                        if (ctx->lc_tags & LCT_REMEMBER)
-                               spin_lock(&lu_keys_guard);
+                               read_lock(&lu_keys_guard);
                        if (ctx->lc_value[i] != NULL) {
                                struct lu_context_key *key;
 
@@ -1734,7 +1804,7 @@ void lu_context_exit(struct lu_context *ctx)
                                                      key, ctx->lc_value[i]);
                        }
                        if (ctx->lc_tags & LCT_REMEMBER)
-                               spin_unlock(&lu_keys_guard);
+                               read_unlock(&lu_keys_guard);
                 }
         }
 }
@@ -1762,37 +1832,37 @@ __u32 lu_session_tags_default = 0;
 
 void lu_context_tags_update(__u32 tags)
 {
-       spin_lock(&lu_keys_guard);
+       write_lock(&lu_keys_guard);
        lu_context_tags_default |= tags;
        key_set_version++;
-       spin_unlock(&lu_keys_guard);
+       write_unlock(&lu_keys_guard);
 }
 EXPORT_SYMBOL(lu_context_tags_update);
 
 void lu_context_tags_clear(__u32 tags)
 {
-       spin_lock(&lu_keys_guard);
+       write_lock(&lu_keys_guard);
        lu_context_tags_default &= ~tags;
        key_set_version++;
-       spin_unlock(&lu_keys_guard);
+       write_unlock(&lu_keys_guard);
 }
 EXPORT_SYMBOL(lu_context_tags_clear);
 
 void lu_session_tags_update(__u32 tags)
 {
-       spin_lock(&lu_keys_guard);
+       write_lock(&lu_keys_guard);
        lu_session_tags_default |= tags;
        key_set_version++;
-       spin_unlock(&lu_keys_guard);
+       write_unlock(&lu_keys_guard);
 }
 EXPORT_SYMBOL(lu_session_tags_update);
 
 void lu_session_tags_clear(__u32 tags)
 {
-       spin_lock(&lu_keys_guard);
+       write_lock(&lu_keys_guard);
        lu_session_tags_default &= ~tags;
        key_set_version++;
-       spin_unlock(&lu_keys_guard);
+       write_unlock(&lu_keys_guard);
 }
 EXPORT_SYMBOL(lu_session_tags_clear);
 
@@ -1864,10 +1934,10 @@ typedef struct lu_site_stats{
         unsigned        lss_busy;
 } lu_site_stats_t;
 
-static void lu_site_stats_get(cfs_hash_t *hs,
+static void lu_site_stats_get(struct cfs_hash *hs,
                               lu_site_stats_t *stats, int populated)
 {
-       cfs_hash_bd_t bd;
+       struct cfs_hash_bd bd;
        unsigned int  i;
 
         cfs_hash_for_each_bucket(hs, &bd, i) {
@@ -1894,10 +1964,22 @@ static void lu_site_stats_get(cfs_hash_t *hs,
 }
 
 
+/*
+ * lu_cache_shrink_count returns the number of cached objects that are
+ * candidates to be freed by shrink_slab(). A counter, which tracks
+ * the number of items in the site's lru, is maintained in the per cpu
+ * stats of each site. The counter is incremented when an object is added
+ * to a site's lru and decremented when one is removed. The number of
+ * free-able objects is the sum of all per cpu counters for all sites.
+ *
+ * Using a per cpu counter is a compromise solution to concurrent access:
+ * lu_object_put() can update the counter without locking the site and
+ * lu_cache_shrink_count can sum the counters without locking each
+ * ls_obj_hash bucket.
+ */
 static unsigned long lu_cache_shrink_count(struct shrinker *sk,
                                           struct shrink_control *sc)
 {
-       lu_site_stats_t stats;
        struct lu_site *s;
        struct lu_site *tmp;
        unsigned long cached = 0;
@@ -1907,14 +1989,14 @@ static unsigned long lu_cache_shrink_count(struct shrinker *sk,
 
        mutex_lock(&lu_sites_guard);
        list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
-               memset(&stats, 0, sizeof(stats));
-               lu_site_stats_get(s->ls_obj_hash, &stats, 0);
-               cached += stats.lss_total - stats.lss_busy;
+               cached += ls_stats_read(s->ls_stats, LU_SS_LRU_LEN);
        }
        mutex_unlock(&lu_sites_guard);
 
        cached = (cached / 100) * sysctl_vfs_cache_pressure;
-       CDEBUG(D_INODE, "%ld objects cached\n", cached);
+       CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n",
+              cached, sysctl_vfs_cache_pressure);
+
        return cached;
 }
 
@@ -1971,7 +2053,7 @@ static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
  * is safe to take the lu_sites_guard lock.
  *
  * Ideally we should accurately return the remaining number of cached
- * objects without taking the  lu_sites_guard lock, but this is not
+ * objects without taking the lu_sites_guard lock, but this is not
  * possible in the current implementation.
  */
 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
@@ -1988,11 +2070,10 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
 
        CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan);
 
-       lu_cache_shrink_scan(shrinker, &scv);
+       if (scv.nr_to_scan != 0)
+               lu_cache_shrink_scan(shrinker, &scv);
 
        cached = lu_cache_shrink_count(shrinker, &scv);
-       if (scv.nr_to_scan == 0)
-               CDEBUG(D_INODE, "%d objects cached\n", cached);
        return cached;
 }
 
@@ -2120,12 +2201,19 @@ void lu_global_fini(void)
 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
 {
 #ifdef CONFIG_PROC_FS
-        struct lprocfs_counter ret;
+       struct lprocfs_counter ret;
 
-        lprocfs_stats_collect(stats, idx, &ret);
-        return (__u32)ret.lc_count;
+       lprocfs_stats_collect(stats, idx, &ret);
+       if (idx == LU_SS_LRU_LEN)
+               /*
+                * protect against counter on cpu A being decremented
+                * before counter is incremented on cpu B; unlikely
+                */
+               return (__u32)((ret.lc_sum > 0) ? ret.lc_sum : 0);
+       else
+               return (__u32)ret.lc_count;
 #else
-        return 0;
+       return 0;
 #endif
 }
 
@@ -2140,7 +2228,7 @@ int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m)
        memset(&stats, 0, sizeof(stats));
        lu_site_stats_get(s->ls_obj_hash, &stats, 1);
 
-       return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
+       return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n",
                          stats.lss_busy,
                          stats.lss_total,
                          stats.lss_populated,
@@ -2151,31 +2239,11 @@ int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m)
                          ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
                          ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
                          ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
-                         ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
+                         ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED),
+                         ls_stats_read(s->ls_stats, LU_SS_LRU_LEN));
 }
 EXPORT_SYMBOL(lu_site_stats_seq_print);
 
-int lu_site_stats_print(const struct lu_site *s, char *page, int count)
-{
-        lu_site_stats_t stats;
-
-        memset(&stats, 0, sizeof(stats));
-        lu_site_stats_get(s->ls_obj_hash, &stats, 1);
-
-        return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
-                        stats.lss_busy,
-                        stats.lss_total,
-                        stats.lss_populated,
-                        CFS_HASH_NHLIST(s->ls_obj_hash),
-                        stats.lss_max_search,
-                        ls_stats_read(s->ls_stats, LU_SS_CREATED),
-                        ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
-                        ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
-                        ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
-                        ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
-                        ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
-}
-
 /**
  * Helper function to initialize a number of kmem slab caches at once.
  */
@@ -2225,8 +2293,8 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
        struct lu_fid           *old = &o->lo_header->loh_fid;
        struct lu_object        *shadow;
        wait_queue_t             waiter;
-       cfs_hash_t              *hs;
-       cfs_hash_bd_t            bd;
+       struct cfs_hash         *hs;
+       struct cfs_hash_bd       bd;
        __u64                    version = 0;
 
        LASSERT(fid_is_zero(old));