Whamcloud - gitweb
fix changelog
[fs/lustre-release.git] / lustre / obdclass / class_hash.c
index 762aebf..c15c761 100644 (file)
@@ -26,7 +26,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  */
 /*
@@ -127,7 +127,8 @@ lustre_hash_init(char *name, unsigned int cur_bits, unsigned int max_bits,
         lh->lh_ops = ops;
         lh->lh_flags = flags;
         if (cur_bits != max_bits && (lh->lh_flags & LH_REHASH) == 0)
-                CWARN("Rehash is disabled, ignore max_bits %d\n", max_bits);
+                CERROR("Rehash is disabled for %s, ignore max_bits %d\n",
+                       name, max_bits);
 
         /* theta * 1000 */
         __lustre_hash_set_theta(lh, 500, 2000);
@@ -181,13 +182,18 @@ lustre_hash_exit(lustre_hash_t *lh)
                         lh_exit(lh, hnode);
                 }
 
-                LASSERT(hlist_empty(&(lhb->lhb_head)));
-                LASSERT(atomic_read(&lhb->lhb_count) == 0);
+                LASSERTF(hlist_empty(&(lhb->lhb_head)),
+                         "hash bucket %d from %s is not empty\n", i, lh->lh_name);
+                LASSERTF(atomic_read(&lhb->lhb_count) == 0,
+                         "hash bucket %d from %s has #entries > 0 (%d)\n", i,
+                         lh->lh_name, atomic_read(&lhb->lhb_count));
                 write_unlock(&lhb->lhb_rwlock);
                 LIBCFS_FREE_PTR(lhb);
         }
 
-        LASSERT(atomic_read(&lh->lh_count) == 0);
+        LASSERTF(atomic_read(&lh->lh_count) == 0,
+                 "hash %s still has #entries > 0 (%d)\n", lh->lh_name,
+                 atomic_read(&lh->lh_count));
         lh_write_unlock(lh);
 
         LIBCFS_FREE(lh->lh_buckets, sizeof(*lh->lh_buckets) << lh->lh_cur_bits);
@@ -344,9 +350,9 @@ lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
         i = lh_hash(lh, key, lh->lh_cur_mask);
         lhb = lh->lh_buckets[i];
         LASSERT(i <= lh->lh_cur_mask);
-        LASSERT(!hlist_unhashed(hnode));
 
         write_lock(&lhb->lhb_rwlock);
+        LASSERT(!hlist_unhashed(hnode));
         obj = __lustre_hash_bucket_del(lh, lhb, hnode);
         write_unlock(&lhb->lhb_rwlock);
         lh_read_unlock(lh);
@@ -356,6 +362,42 @@ lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
 EXPORT_SYMBOL(lustre_hash_del);
 
 /**
+ * Delete item from the lustre hash @lh when @func return true.
+ * The write lock being hold during loop for each bucket to avoid
+ * any object be reference.
+ */
+void
+lustre_hash_cond_del(lustre_hash_t *lh, lh_cond_opt_cb func, void *data)
+{
+        lustre_hash_bucket_t *lhb;
+        struct hlist_node    *hnode;
+        struct hlist_node    *pos;
+        int                   i;
+        ENTRY;
+
+        LASSERT(lh != NULL);
+
+        lh_write_lock(lh);
+        lh_for_each_bucket(lh, lhb, i) {
+                if (lhb == NULL)
+                        continue;
+
+                write_lock(&lhb->lhb_rwlock);
+                hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
+                        __lustre_hash_bucket_validate(lh, lhb, hnode);
+                        if (func(lh_get(lh, hnode), data))
+                                __lustre_hash_bucket_del(lh, lhb, hnode);
+                        (void)lh_put(lh, hnode);
+                }
+                write_unlock(&lhb->lhb_rwlock);
+        }
+        lh_write_unlock(lh);
+
+        EXIT;
+}
+EXPORT_SYMBOL(lustre_hash_cond_del);
+
+/**
  * Delete item given @key in lustre hash @lh.  The first @key found in
  * the hash will be removed, if the key exists multiple times in the hash
  * @lh this function must be called once per key.  The removed object
@@ -508,13 +550,20 @@ lustre_hash_for_each_empty(lustre_hash_t *lh, lh_for_each_cb func, void *data)
 {
         struct hlist_node    *hnode;
         lustre_hash_bucket_t *lhb;
+        lustre_hash_bucket_t **lhb_last = NULL;
         void                 *obj;
-        int                   i;
+        int                   i = 0;
         ENTRY;
 
 restart:
         lh_read_lock(lh);
-        lh_for_each_bucket(lh, lhb, i) {
+        /* If the hash table has changed since we last held lh_rwlock,
+         * we need to start traversing the list from the start. */
+        if (lh->lh_buckets != lhb_last) {
+                i = 0;
+                lhb_last = lh->lh_buckets;
+        }
+        lh_for_each_bucket_restart(lh, lhb, i) {
                 write_lock(&lhb->lhb_rwlock);
                 while (!hlist_empty(&lhb->lhb_head)) {
                         hnode =  lhb->lhb_head.first;
@@ -524,6 +573,7 @@ restart:
                         lh_read_unlock(lh);
                         func(obj, data);
                         (void)lh_put(lh, hnode);
+                        cfs_cond_resched();
                         goto restart;
                 }
                 write_unlock(&lhb->lhb_rwlock);
@@ -726,7 +776,7 @@ void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key, void *new_key,
                 write_lock(&new_lhb->lhb_rwlock);
                 write_lock(&old_lhb->lhb_rwlock);
         } else { /* do nothing */
-                read_unlock(&lh->lh_rwlock);
+                lh_read_unlock(lh);
                 EXIT;
                 return;
         }