Whamcloud - gitweb
b=24017 fix the performance issue of lu_cache_shrink
authorJian Yu <jian.yu@oracle.com>
Tue, 16 Nov 2010 15:38:24 +0000 (23:38 +0800)
committerVitaly Fertman <vitaly.fertman@sun.com>
Tue, 16 Nov 2010 22:28:47 +0000 (01:28 +0300)
- increase busy counter in lu_obj_hop_get, so that we don't have to hack out
  this in llite
- make some changes to cfs_hash:
  . do not need to return anything by cfs_hash_get/put, they are not used by
    anyone, cfs_hash_object also can do the same thing
  . pass cfs_hash to cfs_hash_get/put

o=liang
i=vitaly.fertman

libcfs/include/libcfs/libcfs_hash.h
libcfs/libcfs/hash.c
lustre/include/lu_object.h
lustre/ldlm/ldlm_lockd.c
lustre/lov/lov_pool.c
lustre/obdclass/cl_object.c
lustre/obdclass/lu_object.c
lustre/obdclass/obd_config.c
lustre/ptlrpc/connection.c
lustre/quota/quota_context.c

index 52382ad..3e28f5d 100644 (file)
@@ -361,13 +361,13 @@ typedef struct cfs_hash_ops {
         /** return object address of @hnode, i.e: container_of(...hnode) */
         void *   (*hs_object)(cfs_hlist_node_t *hnode);
         /** get refcount of item, always called with holding bucket-lock */
-        void *   (*hs_get)(cfs_hlist_node_t *hnode);
+        void     (*hs_get)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
         /** release refcount of item */
-        void *   (*hs_put)(cfs_hlist_node_t *hnode);
+        void     (*hs_put)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
         /** release refcount of item, always called with holding bucket-lock */
-        void *   (*hs_put_locked)(cfs_hlist_node_t *hnode);
+        void     (*hs_put_locked)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
         /** it's called before removing of @hnode */
-        void     (*hs_exit)(cfs_hlist_node_t *hnode);
+        void     (*hs_exit)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
 } cfs_hash_ops_t;
 
 /** total number of buckets in @hs */
@@ -540,33 +540,33 @@ cfs_hash_object(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
         return CFS_HOP(hs, object)(hnode);
 }
 
-static inline void *
+static inline void
 cfs_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
-        return CFS_HOP(hs, get)(hnode);
+        return CFS_HOP(hs, get)(hs, hnode);
 }
 
-static inline void *
+static inline void
 cfs_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         LASSERT(CFS_HOP(hs, put_locked) != NULL);
 
-        return CFS_HOP(hs, put_locked)(hnode);
+        return CFS_HOP(hs, put_locked)(hs, hnode);
 }
 
-static inline void *
+static inline void
 cfs_hash_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         LASSERT(CFS_HOP(hs, put) != NULL);
 
-        return CFS_HOP(hs, put)(hnode);
+        return CFS_HOP(hs, put)(hs, hnode);
 }
 
 static inline void
 cfs_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         if (CFS_HOP(hs, exit))
-                CFS_HOP(hs, exit)(hnode);
+                CFS_HOP(hs, exit)(hs, hnode);
 }
 
 static inline void cfs_hash_lock(cfs_hash_t *hs, int excl)
@@ -579,6 +579,13 @@ static inline void cfs_hash_unlock(cfs_hash_t *hs, int excl)
         hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
 }
 
+static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
+                                        cfs_atomic_t *condition)
+{
+        LASSERT(cfs_hash_with_no_bktlock(hs));
+        return cfs_atomic_dec_and_lock(condition, &hs->hs_lock.spin);
+}
+
 static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
                                     cfs_hash_bd_t *bd, int excl)
 {
@@ -604,6 +611,18 @@ static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, void *key,
         cfs_hash_bd_lock(hs, bd, excl);
 }
 
+static inline unsigned cfs_hash_bd_index_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+{
+        return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
+}
+
+static inline void cfs_hash_bd_index_set(cfs_hash_t *hs,
+                                         unsigned index, cfs_hash_bd_t *bd)
+{
+        bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
+        bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
+}
+
 static inline void *
 cfs_hash_bd_extra_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
 {
index c2c0528..5791631 100644 (file)
@@ -1572,7 +1572,6 @@ cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
 {
         cfs_hlist_node_t *hnode;
         cfs_hlist_node_t *tmp;
-        void             *obj;
         cfs_hash_bd_t     bd;
         __u32             version;
         int               count = 0;
@@ -1596,13 +1595,13 @@ cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
                         for (hnode = hhead->first; hnode != NULL;) {
                                 cfs_hash_bucket_validate(hs, &bd, hnode);
-                                obj = cfs_hash_get(hs, hnode);
+                                cfs_hash_get(hs, hnode);
                                 cfs_hash_bd_unlock(hs, &bd, 0);
                                 cfs_hash_unlock(hs, 0);
 
                                 rc = func(hs, &bd, hnode, data);
                                 if (stop_on_change)
-                                        (void)cfs_hash_put(hs, hnode);
+                                        cfs_hash_put(hs, hnode);
                                 cfs_cond_resched();
                                 count++;
 
@@ -1703,8 +1702,7 @@ cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
         if (hindex >= CFS_HASH_NHLIST(hs))
                 goto out;
 
-        bd.bd_bucket = hs->hs_buckets[hindex >> hs->hs_bkt_bits];
-        bd.bd_offset = hindex & (CFS_HASH_BKT_NHLIST(hs) - 1);
+        cfs_hash_bd_index_set(hs, hindex, &bd);
 
         cfs_hash_bd_lock(hs, &bd, 0);
         hhead = cfs_hash_bd_hhead(hs, &bd);
index 824e758..15ebba6 100644 (file)
@@ -552,7 +552,11 @@ struct lu_object_header {
 struct fld;
 
 struct lu_site_bkt_data {
-       /**
+        /**
+         * number of busy object on this bucket
+         */
+        long                      lsb_busy;
+        /**
          * LRU list, updated on each access to object. Protected by
          * bucket lock of lu_site::ls_obj_hash.
          *
index 1f07482..1cd25f4 100644 (file)
@@ -2306,10 +2306,9 @@ static void *
 ldlm_export_lock_key(cfs_hlist_node_t *hnode)
 {
         struct ldlm_lock *lock;
-        ENTRY;
 
         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
-        RETURN(&lock->l_remote_handle);
+        return &lock->l_remote_handle;
 }
 
 static void
@@ -2324,8 +2323,7 @@ ldlm_export_lock_keycpy(cfs_hlist_node_t *hnode, void *key)
 static int
 ldlm_export_lock_keycmp(void *key, cfs_hlist_node_t *hnode)
 {
-        ENTRY;
-        RETURN(lustre_handle_equal(ldlm_export_lock_key(hnode), key));
+        return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
 }
 
 static void *
@@ -2334,28 +2332,22 @@ ldlm_export_lock_object(cfs_hlist_node_t *hnode)
         return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
 }
 
-static void *
-ldlm_export_lock_get(cfs_hlist_node_t *hnode)
+static void
+ldlm_export_lock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct ldlm_lock *lock;
-        ENTRY;
 
         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
         LDLM_LOCK_GET(lock);
-
-        RETURN(lock);
 }
 
-static void *
-ldlm_export_lock_put(cfs_hlist_node_t *hnode)
+static void
+ldlm_export_lock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct ldlm_lock *lock;
-        ENTRY;
 
         lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
         LDLM_LOCK_RELEASE(lock);
-
-        RETURN(lock);
 }
 
 static cfs_hash_ops_t ldlm_export_lock_ops = {
index b4b93dc..bf12521 100644 (file)
@@ -127,22 +127,21 @@ static void *pool_hashobject(cfs_hlist_node_t *hnode)
         return cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
 }
 
-static void *pool_hashrefcount_get(cfs_hlist_node_t *hnode)
+static void pool_hashrefcount_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct pool_desc *pool;
 
         pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
         lov_pool_getref(pool);
-        return (pool);
 }
 
-static void *pool_hashrefcount_put_locked(cfs_hlist_node_t *hnode)
+static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
+                                         cfs_hlist_node_t *hnode)
 {
         struct pool_desc *pool;
 
         pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
         lov_pool_putref_locked(pool);
-        return (pool);
 }
 
 cfs_hash_ops_t pool_hash_operations = {
index 261181b..739b88d 100644 (file)
@@ -637,13 +637,19 @@ static int cl_env_hops_keycmp(void *key, cfs_hlist_node_t *hn)
         return (key == cle->ce_owner);
 }
 
+static void cl_env_hops_noop(cfs_hash_t *hs, cfs_hlist_node_t *hn)
+{
+        struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node);
+        LASSERT(cle->ce_magic == &cl_env_init0);
+}
+
 static cfs_hash_ops_t cl_env_hops = {
         .hs_hash        = cl_env_hops_hash,
         .hs_key         = cl_env_hops_obj,
         .hs_keycmp      = cl_env_hops_keycmp,
         .hs_object      = cl_env_hops_obj,
-        .hs_get         = cl_env_hops_obj,
-        .hs_put_locked  = cl_env_hops_obj,
+        .hs_get         = cl_env_hops_noop,
+        .hs_put_locked  = cl_env_hops_noop,
 };
 
 static inline struct cl_env *cl_env_fetch(void)
index 4527e6f..8c61aa9 100644 (file)
@@ -73,6 +73,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o);
  */
 void lu_object_put(const struct lu_env *env, struct lu_object *o)
 {
+        struct lu_site_bkt_data *bkt;
         struct lu_object_header *top;
         struct lu_site          *site;
         struct lu_object        *orig;
@@ -83,20 +84,22 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
         orig = o;
 
         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
+        bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
+
         if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
                 if (lu_object_is_dying(top)) {
-                        struct lu_site_bkt_data *bkt;
 
                         /*
                          * somebody may be waiting for this, currently only
                          * used for cl_object, see cl_object_put_last().
                          */
-                        bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
                         cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
                 }
                 return;
         }
 
+        LASSERT(bkt->lsb_busy > 0);
+        bkt->lsb_busy--;
         /*
          * When last reference is released, iterate over object
          * layers, and notify them that object is no longer busy.
@@ -498,6 +501,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
                 return NULL;
 
         *version = ver;
+        bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
         /* cfs_hash_bd_lookup_intent is a somehow "internal" function
          * of cfs_hash, but we don't want refcount on object right now */
         hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f);
@@ -520,7 +524,6 @@ static struct lu_object *htable_lookup(struct lu_site *s,
         cfs_atomic_dec(&h->loh_ref);
 
         cfs_waitlink_init(waiter);
-        bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
         cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
         cfs_set_current_state(CFS_TASK_UNINT);
         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
@@ -599,6 +602,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
                 bkt = cfs_hash_bd_extra_get(hs, &bd);
                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
                 cfs_list_add_tail(&o->lo_header->loh_lru, &bkt->lsb_lru);
+                bkt->lsb_busy++;
                 cfs_hash_bd_unlock(hs, &bd, 1);
                 return o;
         }
@@ -816,19 +820,24 @@ static int lu_obj_hop_keycmp(void *key, cfs_hlist_node_t *hnode)
         return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
 }
 
-static void *lu_obj_hop_get(cfs_hlist_node_t *hnode)
+static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct lu_object_header *h;
 
         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
-        cfs_atomic_inc(&h->loh_ref);
-        return h;
+        if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
+                struct lu_site_bkt_data *bkt;
+                cfs_hash_bd_t            bd;
+
+                cfs_hash_bd_get(hs, &h->loh_fid, &bd);
+                bkt = cfs_hash_bd_extra_get(hs, &bd);
+                bkt->lsb_busy++;
+        }
 }
 
-static void *lu_obj_hop_put_locked(cfs_hlist_node_t *hnode)
+static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         LBUG(); /* we should never called it */
-        return NULL;
 }
 
 cfs_hash_ops_t lu_site_hash_ops = {
@@ -843,8 +852,14 @@ cfs_hash_ops_t lu_site_hash_ops = {
 /**
  * Initialize site \a s, with \a d as the top level device.
  */
-#define LU_SITE_BITS_MIN    10
+#define LU_SITE_BITS_MIN    12
 #define LU_SITE_BITS_MAX    23
+/**
+ * total 128 buckets, we don't want too many buckets because:
+ * - consume too much memory
+ * - avoid unbalanced LRU list
+ */
+#define LU_SITE_BKT_BITS    7
 
 int lu_site_init(struct lu_site *s, struct lu_device *top)
 {
@@ -858,8 +873,8 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
         bits = lu_htable_order();
         for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
              bits >= LU_SITE_BITS_MIN; bits--) {
-                s->ls_obj_hash = cfs_hash_create("lu_site", bits,
-                                                 bits, bits - LU_SITE_BITS_MIN,
+                s->ls_obj_hash = cfs_hash_create("lu_site", bits, bits,
+                                                 bits - LU_SITE_BKT_BITS,
                                                  sizeof(*bkt), 0, 0,
                                                  &lu_site_hash_ops,
                                                  CFS_HASH_SPIN_BKTLOCK |
@@ -1575,39 +1590,45 @@ EXPORT_SYMBOL(lu_env_refill);
 
 static struct cfs_shrinker *lu_site_shrinker = NULL;
 
-struct lu_site_stats_result {
+typedef struct lu_site_stats{
         unsigned        lss_populated;
         unsigned        lss_max_search;
         unsigned        lss_total;
         unsigned        lss_busy;
-        cfs_hash_bd_t   lss_bd;
-};
+} lu_site_stats_t;
 
-static int lu_site_stats_get(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                             cfs_hlist_node_t *hnode, void *data)
+static void lu_site_stats_get(cfs_hash_t *hs,
+                              lu_site_stats_t *stats, int populated)
 {
-        struct lu_site_stats_result    *sa = data;
-        struct lu_object_header        *h;
+        cfs_hash_bd_t bd;
+        int           i;
+
+        cfs_hash_for_each_bucket(hs, &bd, i) {
+                struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
+                cfs_hlist_head_t        *hhead;
+
+                cfs_hash_bd_lock(hs, &bd, 1);
+                stats->lss_busy  += bkt->lsb_busy;
+                stats->lss_total += cfs_hash_bd_count_get(&bd);
+                stats->lss_max_search = max((int)stats->lss_max_search,
+                                            cfs_hash_bd_depmax_get(&bd));
+                if (!populated) {
+                        cfs_hash_bd_unlock(hs, &bd, 1);
+                        continue;
+                }
 
-        sa->lss_total++;
-        h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
-        if (cfs_atomic_read(&h->loh_ref) > 0)
-                sa->lss_busy++;
-
-        if (sa->lss_bd.bd_bucket == NULL ||
-            cfs_hash_bd_compare(&sa->lss_bd, bd) != 0) {
-                if (sa->lss_max_search < cfs_hash_bd_depmax_get(bd))
-                        sa->lss_max_search = cfs_hash_bd_depmax_get(bd);
-                sa->lss_populated++;
-                sa->lss_bd = *bd;
+                cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
+                        if (!cfs_hlist_empty(hhead))
+                                stats->lss_populated++;
+                }
+                cfs_hash_bd_unlock(hs, &bd, 1);
         }
-        return 0;
 }
 
 #ifdef __KERNEL__
 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
 {
-        struct lu_site_stats_result stats;
+        lu_site_stats_t stats;
         struct lu_site *s;
         struct lu_site *tmp;
         int cached = 0;
@@ -1632,7 +1653,7 @@ static int lu_cache_shrink(int nr, unsigned int gfp_mask)
                 }
 
                 memset(&stats, 0, sizeof(stats));
-                cfs_hash_for_each(s->ls_obj_hash, lu_site_stats_get, &stats);
+                lu_site_stats_get(s->ls_obj_hash, &stats, 0);
                 cached += stats.lss_total - stats.lss_busy;
                 if (nr && remain <= 0)
                         break;
@@ -1821,10 +1842,10 @@ static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
  */
 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
 {
-        struct lu_site_stats_result stats;
+        lu_site_stats_t stats;
 
         memset(&stats, 0, sizeof(stats));
-        cfs_hash_for_each(s->ls_obj_hash, lu_site_stats_get, &stats);
+        lu_site_stats_get(s->ls_obj_hash, &stats, 1);
 
         return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
                         stats.lss_busy,
index e9eb73b..e595ad7 100644 (file)
@@ -1531,7 +1531,7 @@ uuid_key(cfs_hlist_node_t *hnode)
 
         exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
 
-        RETURN(&exp->exp_client_uuid);
+        return &exp->exp_client_uuid;
 }
 
 /*
@@ -1546,8 +1546,8 @@ uuid_keycmp(void *key, cfs_hlist_node_t *hnode)
         LASSERT(key);
         exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
 
-        RETURN(obd_uuid_equals((struct obd_uuid *)key,&exp->exp_client_uuid) &&
-               !exp->exp_failed);
+        return obd_uuid_equals((struct obd_uuid *)key,&exp->exp_client_uuid) &&
+               !exp->exp_failed;
 }
 
 static void *
@@ -1556,26 +1556,22 @@ uuid_export_object(cfs_hlist_node_t *hnode)
         return cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
 }
 
-static void *
-uuid_export_get(cfs_hlist_node_t *hnode)
+static void
+uuid_export_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
         exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
         class_export_get(exp);
-
-        RETURN(exp);
 }
 
-static void *
-uuid_export_put_locked(cfs_hlist_node_t *hnode)
+static void
+uuid_export_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
         exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash);
         class_export_put(exp);
-
-        RETURN(exp);
 }
 
 static cfs_hash_ops_t uuid_hash_ops = {
@@ -1630,26 +1626,22 @@ nid_export_object(cfs_hlist_node_t *hnode)
         return cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
 }
 
-static void *
-nid_export_get(cfs_hlist_node_t *hnode)
+static void
+nid_export_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
         exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
         class_export_get(exp);
-
-        RETURN(exp);
 }
 
-static void *
-nid_export_put_locked(cfs_hlist_node_t *hnode)
+static void
+nid_export_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct obd_export *exp;
 
         exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash);
         class_export_put(exp);
-
-        RETURN(exp);
 }
 
 static cfs_hash_ops_t nid_hash_ops = {
@@ -1673,13 +1665,13 @@ nidstats_key(cfs_hlist_node_t *hnode)
 
         ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
 
-        RETURN(&ns->nid);
+        return &ns->nid;
 }
 
 static int
 nidstats_keycmp(void *key, cfs_hlist_node_t *hnode)
 {
-        RETURN(*(lnet_nid_t *)nidstats_key(hnode) == *(lnet_nid_t *)key);
+        return *(lnet_nid_t *)nidstats_key(hnode) == *(lnet_nid_t *)key;
 }
 
 static void *
@@ -1688,26 +1680,22 @@ nidstats_object(cfs_hlist_node_t *hnode)
         return cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
 }
 
-static void *
-nidstats_get(cfs_hlist_node_t *hnode)
+static void
+nidstats_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct nid_stat *ns;
 
         ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
         nidstat_getref(ns);
-
-        RETURN(ns);
 }
 
-static void *
-nidstats_put_locked(cfs_hlist_node_t *hnode)
+static void
+nidstats_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct nid_stat *ns;
 
         ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash);
         nidstat_putref(ns);
-
-        RETURN(ns);
 }
 
 static cfs_hash_ops_t nid_stat_hash_ops = {
index d57ad7f..ab1f0e7 100644 (file)
@@ -198,30 +198,26 @@ conn_object(cfs_hlist_node_t *hnode)
         return cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
 }
 
-static void *
-conn_get(cfs_hlist_node_t *hnode)
+static void
+conn_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct ptlrpc_connection *conn;
 
         conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
         cfs_atomic_inc(&conn->c_refcount);
-
-        return conn;
 }
 
-static void *
-conn_put_locked(cfs_hlist_node_t *hnode)
+static void
+conn_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct ptlrpc_connection *conn;
 
         conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
         cfs_atomic_dec(&conn->c_refcount);
-
-        return conn;
 }
 
 static void
-conn_exit(cfs_hlist_node_t *hnode)
+conn_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct ptlrpc_connection *conn;
 
index 4666c1f..c3e0a64 100644 (file)
@@ -1623,36 +1623,29 @@ lqs_object(cfs_hlist_node_t *hnode)
         return cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
 }
 
-static void *
-lqs_get(cfs_hlist_node_t *hnode)
+static void
+lqs_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct lustre_qunit_size *q =
                 cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
-        ENTRY;
 
         __lqs_getref(q);
-
-        RETURN(q);
 }
 
-static void *
-lqs_put_locked(cfs_hlist_node_t *hnode)
+static void
+lqs_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct lustre_qunit_size *q =
                 cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
-        ENTRY;
 
         __lqs_putref(q);
-
-        RETURN(q);
 }
 
 static void
-lqs_exit(cfs_hlist_node_t *hnode)
+lqs_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
 {
         struct lustre_qunit_size *q =
                 cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
-        ENTRY;
 
         /*
          * Nothing should be left. User of lqs put it and
@@ -1663,7 +1656,6 @@ lqs_exit(cfs_hlist_node_t *hnode)
                  "Busy lqs %p with %d refs\n", q,
                  cfs_atomic_read(&q->lqs_refcount));
         OBD_FREE_PTR(q);
-        EXIT;
 }
 
 static cfs_hash_ops_t lqs_hash_ops = {