Whamcloud - gitweb
Branch HEAD
authoradilger <adilger>
Thu, 23 Oct 2008 04:13:42 +0000 (04:13 +0000)
committeradilger <adilger>
Thu, 23 Oct 2008 04:13:42 +0000 (04:13 +0000)
Remove trailing whitespace.

lustre/obdclass/class_hash.c

index 919fa4a..55b41df 100644 (file)
@@ -71,23 +71,23 @@ lustre_hash_init(char *name, unsigned int cur_size, unsigned int max_size,
         lustre_hash_t *lh;
         int            i;
         ENTRY;
-  
+
         LASSERT(name != NULL);
         LASSERT(ops != NULL);
 
-        /* 
+        /*
          * Ensure hash is a power of two to allow the use of a bitmask
-         * in the hash function instead of a more expensive modulus. 
+         * in the hash function instead of a more expensive modulus.
          */
         LASSERTF(cur_size && (cur_size & (cur_size - 1)) == 0,
                  "Size (%u) is not power of 2\n", cur_size);
         LASSERTF(max_size && (max_size & (max_size - 1)) == 0,
                  "Size (%u) is not power of 2\n", max_size);
-  
+
         OBD_ALLOC_PTR(lh);
         if (!lh)
                 RETURN(NULL);
-  
+
         strncpy(lh->lh_name, name, sizeof(lh->lh_name));
         atomic_set(&lh->lh_rehash_count, 0);
         atomic_set(&lh->lh_count, 0);
@@ -105,17 +105,17 @@ lustre_hash_init(char *name, unsigned int cur_size, unsigned int max_size,
                 OBD_FREE_PTR(lh);
                 RETURN(NULL);
         }
-  
+
         for (i = 0; i < lh->lh_cur_size; i++) {
                 INIT_HLIST_HEAD(&lh->lh_buckets[i].lhb_head);
                 rwlock_init(&lh->lh_buckets[i].lhb_rwlock);
                 atomic_set(&lh->lh_buckets[i].lhb_count, 0);
         }
-  
+
         return lh;
 }
 EXPORT_SYMBOL(lustre_hash_init);
-  
+
 /**
  * Cleanup lustre hash @lh.
  */
@@ -127,12 +127,12 @@ lustre_hash_exit(lustre_hash_t *lh)
         struct hlist_node    *pos;
         int                   i;
         ENTRY;
-  
+
         if (!lh)
                 return;
-  
+
         write_lock(&lh->lh_rwlock);
-  
+
         lh_for_each_bucket(lh, lhb, i) {
                 write_lock(&lhb->lhb_rwlock);
                 hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
@@ -140,16 +140,16 @@ lustre_hash_exit(lustre_hash_t *lh)
                         __lustre_hash_bucket_del(lh, lhb, hnode);
                         lh_exit(lh, hnode);
                 }
-  
+
                 LASSERT(hlist_empty(&(lhb->lhb_head)));
                 LASSERT(atomic_read(&lhb->lhb_count) == 0);
                 write_unlock(&lhb->lhb_rwlock);
         }
-  
+
         OBD_VFREE(lh->lh_buckets, sizeof(*lh->lh_buckets) * lh->lh_cur_size);
         LASSERT(atomic_read(&lh->lh_count) == 0);
         write_unlock(&lh->lh_rwlock);
-  
+
         OBD_FREE_PTR(lh);
         EXIT;
 }
@@ -170,7 +170,7 @@ static inline unsigned int lustre_hash_rehash_size(lustre_hash_t *lh)
 
         return 0;
 }
-  
+
 /**
  * Add item @hnode to lustre hash @lh using @key.  The registered
  * ops->lh_get function will be called when the item is added.
@@ -182,7 +182,7 @@ lustre_hash_add(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
         int                   size;
         unsigned              i;
         ENTRY;
-  
+
         __lustre_hash_key_validate(lh, key, hnode);
 
         read_lock(&lh->lh_rwlock);
@@ -199,7 +199,7 @@ lustre_hash_add(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
         read_unlock(&lh->lh_rwlock);
         if (size)
                 lustre_hash_rehash(lh, size);
-  
+
         EXIT;
 }
 EXPORT_SYMBOL(lustre_hash_add);
@@ -213,9 +213,9 @@ lustre_hash_findadd_unique_hnode(lustre_hash_t *lh, void *key,
         int                   size;
         unsigned              i;
         ENTRY;
-  
+
         __lustre_hash_key_validate(lh, key, hnode);
-  
+
         read_lock(&lh->lh_rwlock);
         i = lh_hash(lh, key, lh->lh_cur_size - 1);
         lhb = &lh->lh_buckets[i];
@@ -236,10 +236,10 @@ lustre_hash_findadd_unique_hnode(lustre_hash_t *lh, void *key,
         read_unlock(&lh->lh_rwlock);
         if (size)
                 lustre_hash_rehash(lh, size);
-  
+
         RETURN(ehnode);
 }
-  
+
 /**
  * Add item @hnode to lustre hash @lh using @key.  The registered
  * ops->lh_get function will be called if the item was added.
@@ -250,15 +250,15 @@ lustre_hash_add_unique(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
 {
         struct hlist_node    *ehnode;
         ENTRY;
-        
+
         ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
         if (ehnode != hnode)
                 RETURN(-EALREADY);
-        
+
         RETURN(0);
 }
 EXPORT_SYMBOL(lustre_hash_add_unique);
-  
+
 /**
  * Add item @hnode to lustre hash @lh using @key.  If this @key
  * already exists in the hash then ops->lh_get will be called on the
@@ -272,14 +272,14 @@ lustre_hash_findadd_unique(lustre_hash_t *lh, void *key,
         struct hlist_node    *ehnode;
         void                 *obj;
         ENTRY;
-        
+
         ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
         obj = lh_get(lh, ehnode);
         lh_put(lh, ehnode);
         RETURN(obj);
 }
 EXPORT_SYMBOL(lustre_hash_findadd_unique);
-  
+
 /**
  * Delete item @hnode from the lustre hash @lh using @key.  The @key
  * is required to ensure the correct hash bucket is locked since there
@@ -295,9 +295,9 @@ lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
         unsigned              i;
         void                 *obj;
         ENTRY;
-  
+
         __lustre_hash_key_validate(lh, key, hnode);
-  
+
         read_lock(&lh->lh_rwlock);
         i = lh_hash(lh, key, lh->lh_cur_size - 1);
         lhb = &lh->lh_buckets[i];
@@ -312,11 +312,11 @@ lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
         read_unlock(&lh->lh_rwlock);
         if (size)
                 lustre_hash_rehash(lh, size);
-  
+
         RETURN(obj);
 }
 EXPORT_SYMBOL(lustre_hash_del);
-  
+
 /**
  * Delete item given @key in lustre hash @lh.  The first @key found in
  * the hash will be removed, if the key exists multiple times in the hash
@@ -332,7 +332,7 @@ lustre_hash_del_key(lustre_hash_t *lh, void *key)
         unsigned              i;
         void                 *obj = NULL;
         ENTRY;
-  
+
         read_lock(&lh->lh_rwlock);
         i = lh_hash(lh, key, lh->lh_cur_size - 1);
         lhb = &lh->lh_buckets[i];
@@ -349,11 +349,11 @@ lustre_hash_del_key(lustre_hash_t *lh, void *key)
         read_unlock(&lh->lh_rwlock);
         if (size)
                 lustre_hash_rehash(lh, size);
-  
+
         RETURN(obj);
 }
 EXPORT_SYMBOL(lustre_hash_del_key);
-  
+
 /**
  * Lookup an item using @key in the lustre hash @lh and return it.
  * If the @key is found in the hash lh->lh_get() is called and the
@@ -370,7 +370,7 @@ lustre_hash_lookup(lustre_hash_t *lh, void *key)
         unsigned              i;
         void                 *obj = NULL;
         ENTRY;
-  
+
         read_lock(&lh->lh_rwlock);
         i = lh_hash(lh, key, lh->lh_cur_size - 1);
         lhb = &lh->lh_buckets[i];
@@ -380,14 +380,14 @@ lustre_hash_lookup(lustre_hash_t *lh, void *key)
         hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
         if (hnode)
                 obj = lh_get(lh, hnode);
-  
+
         read_unlock(&lhb->lhb_rwlock);
         read_unlock(&lh->lh_rwlock);
-  
+
         RETURN(obj);
 }
 EXPORT_SYMBOL(lustre_hash_lookup);
-  
+
 /**
  * For each item in the lustre hash @lh call the passed callback @func
  * and pass to it as an argument each hash item and the private @data.
@@ -403,7 +403,7 @@ lustre_hash_for_each(lustre_hash_t *lh, lh_for_each_cb func, void *data)
         void                 *obj;
         int                   i;
         ENTRY;
-  
+
         read_lock(&lh->lh_rwlock);
         lh_for_each_bucket(lh, lhb, i) {
                 read_lock(&lhb->lhb_rwlock);
@@ -420,7 +420,7 @@ lustre_hash_for_each(lustre_hash_t *lh, lh_for_each_cb func, void *data)
         EXIT;
 }
 EXPORT_SYMBOL(lustre_hash_for_each);
-  
+
 /**
  * For each item in the lustre hash @lh call the passed callback @func
  * and pass to it as an argument each hash item and the private @data.
@@ -440,7 +440,7 @@ lustre_hash_for_each_safe(lustre_hash_t *lh, lh_for_each_cb func, void *data)
         void                 *obj;
         int                   i;
         ENTRY;
-  
+
         read_lock(&lh->lh_rwlock);
         lh_for_each_bucket(lh, lhb, i) {
                 read_lock(&lhb->lhb_rwlock);
@@ -458,7 +458,7 @@ lustre_hash_for_each_safe(lustre_hash_t *lh, lh_for_each_cb func, void *data)
         EXIT;
 }
 EXPORT_SYMBOL(lustre_hash_for_each_safe);
-  
+
 /**
  * For each hash bucket in the lustre hash @lh call the passed callback
  * @func until all the hash buckets are empty.  The passed callback @func
@@ -478,7 +478,7 @@ lustre_hash_for_each_empty(lustre_hash_t *lh, lh_for_each_cb func, void *data)
         void                 *obj;
         int                   i;
         ENTRY;
-  
+
 restart:
         read_lock(&lh->lh_rwlock);
         lh_for_each_bucket(lh, lhb, i) {
@@ -499,7 +499,7 @@ restart:
         EXIT;
 }
 EXPORT_SYMBOL(lustre_hash_for_each_empty);
-  
+
   /*
  * For each item in the lustre hash @lh which matches the @key call
  * the passed callback @func and pass to it as an argument each hash
@@ -516,30 +516,30 @@ lustre_hash_for_each_key(lustre_hash_t *lh, void *key,
         lustre_hash_bucket_t *lhb;
         unsigned              i;
         ENTRY;
-  
+
         read_lock(&lh->lh_rwlock);
         i = lh_hash(lh, key, lh->lh_cur_size - 1);
         lhb = &lh->lh_buckets[i];
         LASSERT(i < lh->lh_cur_size);
-  
+
         read_lock(&lhb->lhb_rwlock);
         hlist_for_each(hnode, &(lhb->lhb_head)) {
                 __lustre_hash_bucket_validate(lh, lhb, hnode);
-  
+
                 if (!lh_compare(lh, key, hnode))
                         continue;
-  
+
                 func(lh_get(lh, hnode), data);
                 (void)lh_put(lh, hnode);
         }
-  
+
         read_unlock(&lhb->lhb_rwlock);
         read_unlock(&lh->lh_rwlock);
-  
+
         EXIT;
 }
 EXPORT_SYMBOL(lustre_hash_for_each_key);
-  
+
 /**
  * Rehash the lustre hash @lh to the given @size.  This can be used
  * to grow the hash size when excessive chaining is detected, or to
@@ -565,24 +565,24 @@ lustre_hash_rehash(lustre_hash_t *lh, int size)
         int                    theta;
         void                  *key;
         ENTRY;
-  
+
         LASSERT(size > 0);
-  
+
         OBD_VMALLOC(rehash_buckets, sizeof(*rehash_buckets) * size);
         if (!rehash_buckets)
                 RETURN(-ENOMEM);
-  
+
         for (i = 0; i < size; i++) {
                 INIT_HLIST_HEAD(&rehash_buckets[i].lhb_head);
                 rwlock_init(&rehash_buckets[i].lhb_rwlock);
                 atomic_set(&rehash_buckets[i].lhb_count, 0);
         }
-  
+
         write_lock(&lh->lh_rwlock);
 
-        /* 
+        /*
          * Early return for multiple concurrent racing callers,
-         * ensure we only trigger the rehash if it is still needed. 
+         * ensure we only trigger the rehash if it is still needed.
          */
         theta = __lustre_hash_theta(lh);
         if ((theta >= lh->lh_min_theta) && (theta <= lh->lh_max_theta)) {
@@ -590,10 +590,10 @@ lustre_hash_rehash(lustre_hash_t *lh, int size)
                 write_unlock(&lh->lh_rwlock);
                 RETURN(-EALREADY);
         }
-  
+
         lh_size = lh->lh_cur_size;
         lh_buckets = lh->lh_buckets;
-  
+
         lh->lh_cur_size = size;
         lh->lh_buckets = rehash_buckets;
         atomic_inc(&lh->lh_rehash_count);
@@ -606,39 +606,39 @@ lustre_hash_rehash(lustre_hash_t *lh, int size)
                         key = lh_key(lh, hnode);
                         LASSERT(key);
 
-                        /* 
+                        /*
                          * Validate hnode is in the correct bucket.
                          */
                         if (unlikely(lh->lh_flags & LH_DEBUG))
                                 LASSERT(lh_hash(lh, key, lh_size - 1) == i);
 
-                        /* 
+                        /*
                          * Delete from old hash bucket.
                          */
                         hlist_del(hnode);
                         LASSERT(atomic_read(&lh_lhb->lhb_count) > 0);
                         atomic_dec(&lh_lhb->lhb_count);
 
-                        /* 
-                         * Add to rehash bucket, ops->lh_key must be defined. 
+                        /*
+                         * Add to rehash bucket, ops->lh_key must be defined.
                          */
                         rehash_lhb = &rehash_buckets[lh_hash(lh, key, size-1)];
                         hlist_add_head(hnode, &(rehash_lhb->lhb_head));
                         atomic_inc(&rehash_lhb->lhb_count);
                 }
-  
+
                 LASSERT(hlist_empty(&(lh_lhb->lhb_head)));
                 LASSERT(atomic_read(&lh_lhb->lhb_count) == 0);
                 write_unlock(&lh_lhb->lhb_rwlock);
         }
-  
+
         OBD_VFREE(lh_buckets, sizeof(*lh_buckets) * lh_size);
         write_unlock(&lh->lh_rwlock);
-  
+
         RETURN(0);
 }
 EXPORT_SYMBOL(lustre_hash_rehash);
-  
+
 /**
  * Rehash the object referenced by @hnode in the lustre hash @lh.  The
  * @old_key must be provided to locate the objects previous location
@@ -656,12 +656,12 @@ void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key, void *new_key,
         unsigned               i;
         int                    j;
         ENTRY;
-  
+
         __lustre_hash_key_validate(lh, new_key, hnode);
         LASSERT(!hlist_unhashed(hnode));
-  
+
         read_lock(&lh->lh_rwlock);
-  
+
         i = lh_hash(lh, old_key, lh->lh_cur_size - 1);
         old_lhb = &lh->lh_buckets[i];
         LASSERT(i < lh->lh_cur_size);
@@ -673,9 +673,9 @@ void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key, void *new_key,
         write_lock(&old_lhb->lhb_rwlock);
         write_lock(&new_lhb->lhb_rwlock);
 
-        /* 
+        /*
          * Migrate item between hash buckets without calling
-         * the lh_get() and lh_put() callback functions. 
+         * the lh_get() and lh_put() callback functions.
          */
         hlist_del(hnode);
         LASSERT(atomic_read(&old_lhb->lhb_count) > 0);
@@ -686,11 +686,11 @@ void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key, void *new_key,
         write_unlock(&new_lhb->lhb_rwlock);
         write_unlock(&old_lhb->lhb_rwlock);
         read_unlock(&lh->lh_rwlock);
-  
+
         EXIT;
 }
 EXPORT_SYMBOL(lustre_hash_rehash_key);
-  
+
 int lustre_hash_debug_header(char *str, int size)
 {
         return snprintf(str, size,
@@ -730,7 +730,7 @@ int lustre_hash_debug_str(lustre_hash_t *lh, char *str, int size)
         c += snprintf(str + c, size - c, "%5d ",
                       atomic_read(&lh->lh_count));
 
-        /* 
+        /*
          * The distribution is a summary of the chained hash depth in
          * each of the lustre hash buckets.  Each buckets lhb_count is
          * divided by the hash theta value and used to generate a
@@ -749,9 +749,9 @@ int lustre_hash_debug_str(lustre_hash_t *lh, char *str, int size)
         for (i = 0; i < 8; i++)
                 c += snprintf(str + c, size - c, "%d%c",  dist[i],
                               (i == 7) ? '\n' : '/');
-  
+
         read_unlock(&lh->lh_rwlock);
-  
+
         return c;
 }
 EXPORT_SYMBOL(lustre_hash_debug_str);