Whamcloud - gitweb
LU-7689 obdclass: limit lu_site hash table size on clients
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
index e0ec556..e04654c 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2015, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -69,6 +69,7 @@ enum {
 
 #define LU_SITE_BITS_MIN    12
 #define LU_SITE_BITS_MAX    24
+#define LU_SITE_BITS_MAX_CL 19
 /**
  * total 256 buckets, we don't want too many buckets because:
  * - consume too much memory
@@ -357,7 +358,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
        struct cfs_hash_bd            bd2;
        struct list_head         dispose;
        int                      did_sth;
-       unsigned int             start;
+       unsigned int             start = 0;
         int                      count;
         int                      bnr;
        unsigned int             i;
@@ -370,7 +371,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
          * Under LRU list lock, scan LRU list and move unreferenced objects to
          * the dispose list, removing them from LRU and hash table.
          */
-        start = s->ls_purge_start;
+       if (nr != ~0)
+               start = s->ls_purge_start;
        bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
  again:
        /*
@@ -920,6 +922,7 @@ static unsigned long lu_htable_order(struct lu_device *top)
 {
        unsigned long cache_size;
        unsigned long bits;
+       unsigned long bits_max = LU_SITE_BITS_MAX;
 
        /*
         * For ZFS based OSDs the cache should be disabled by default.  This
@@ -933,6 +936,9 @@ static unsigned long lu_htable_order(struct lu_device *top)
                return LU_SITE_BITS_MIN;
        }
 
+       if (strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME) == 0)
+               bits_max = LU_SITE_BITS_MAX_CL;
+
         /*
          * Calculate hash table size, assuming that we want reasonable
          * performance when 20% of total memory is occupied by cache of
@@ -963,7 +969,8 @@ static unsigned long lu_htable_order(struct lu_device *top)
         for (bits = 1; (1 << bits) < cache_size; ++bits) {
                 ;
         }
-        return bits;
+
+       return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
 }
 
 static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
@@ -1059,10 +1066,8 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
 
        memset(s, 0, sizeof *s);
        mutex_init(&s->ls_purge_mutex);
-       bits = lu_htable_order(top);
        snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name);
-       for (bits = clamp_t(typeof(bits), bits,
-                           LU_SITE_BITS_MIN, LU_SITE_BITS_MAX);
+       for (bits = lu_htable_order(top);
             bits >= LU_SITE_BITS_MIN; bits--) {
                s->ls_obj_hash = cfs_hash_create(name, bits, bits,
                                                 bits - LU_SITE_BKT_BITS,
@@ -2239,28 +2244,6 @@ int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m)
 }
 EXPORT_SYMBOL(lu_site_stats_seq_print);
 
-int lu_site_stats_print(const struct lu_site *s, char *page, int count)
-{
-       lu_site_stats_t stats;
-
-       memset(&stats, 0, sizeof(stats));
-       lu_site_stats_get(s->ls_obj_hash, &stats, 1);
-
-       return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n",
-                       stats.lss_busy,
-                       stats.lss_total,
-                       stats.lss_populated,
-                       CFS_HASH_NHLIST(s->ls_obj_hash),
-                       stats.lss_max_search,
-                       ls_stats_read(s->ls_stats, LU_SS_CREATED),
-                       ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
-                        ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
-                       ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
-                       ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
-                       ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED),
-                       ls_stats_read(s->ls_stats, LU_SS_LRU_LEN));
-}
-
 /**
  * Helper function to initialize a number of kmem slab caches at once.
  */