Whamcloud - gitweb
b=17167 libcfs: ensure all libcfs exported symbols to have cfs_ prefix
[fs/lustre-release.git] / lustre / fld / fld_cache.c
index 359f5f2..2337953 100644 (file)
@@ -88,7 +88,7 @@ struct fld_cache *fld_cache_init(const char *name,
         CFS_INIT_LIST_HEAD(&cache->fci_lru);
 
         cache->fci_cache_count = 0;
-        spin_lock_init(&cache->fci_lock);
+        cfs_spin_lock_init(&cache->fci_lock);
 
         strncpy(cache->fci_name, name,
                 sizeof(cache->fci_name));
@@ -139,8 +139,8 @@ void fld_cache_fini(struct fld_cache *cache)
 static inline void fld_cache_entry_delete(struct fld_cache *cache,
                                           struct fld_cache_entry *node)
 {
-        list_del(&node->fce_list);
-        list_del(&node->fce_lru);
+        cfs_list_del(&node->fce_list);
+        cfs_list_del(&node->fce_lru);
         cache->fci_cache_count--;
         OBD_FREE_PTR(node);
 }
@@ -154,12 +154,12 @@ static void fld_fix_new_list(struct fld_cache *cache)
         struct fld_cache_entry *f_next;
         struct lu_seq_range *c_range;
         struct lu_seq_range *n_range;
-        struct list_head *head = &cache->fci_entries_head;
+        cfs_list_t *head = &cache->fci_entries_head;
         ENTRY;
 
 restart_fixup:
 
-        list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
+        cfs_list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
                 c_range = &f_curr->fce_range;
                 n_range = &f_next->fce_range;
 
@@ -214,10 +214,10 @@ restart_fixup:
  */
 static inline void fld_cache_entry_add(struct fld_cache *cache,
                                        struct fld_cache_entry *f_new,
-                                       struct list_head *pos)
+                                       cfs_list_t *pos)
 {
-        list_add(&f_new->fce_list, pos);
-        list_add(&f_new->fce_lru, &cache->fci_lru);
+        cfs_list_add(&f_new->fce_list, pos);
+        cfs_list_add(&f_new->fce_lru, &cache->fci_lru);
 
         cache->fci_cache_count++;
         fld_fix_new_list(cache);
@@ -230,7 +230,7 @@ static inline void fld_cache_entry_add(struct fld_cache *cache,
 static int fld_cache_shrink(struct fld_cache *cache)
 {
         struct fld_cache_entry *flde;
-        struct list_head *curr;
+        cfs_list_t *curr;
         int num = 0;
         ENTRY;
 
@@ -244,7 +244,7 @@ static int fld_cache_shrink(struct fld_cache *cache)
         while (cache->fci_cache_count + cache->fci_threshold >
                cache->fci_cache_size && curr != &cache->fci_lru) {
 
-                flde = list_entry(curr, struct fld_cache_entry, fce_lru);
+                flde = cfs_list_entry(curr, struct fld_cache_entry, fce_lru);
                 curr = curr->prev;
                 fld_cache_entry_delete(cache, flde);
                 num++;
@@ -263,10 +263,10 @@ void fld_cache_flush(struct fld_cache *cache)
 {
         ENTRY;
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         cache->fci_cache_size = 0;
         fld_cache_shrink(cache);
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
 
         EXIT;
 }
@@ -391,8 +391,8 @@ void fld_cache_insert(struct fld_cache *cache,
         struct fld_cache_entry *f_new;
         struct fld_cache_entry *f_curr;
         struct fld_cache_entry *n;
-        struct list_head *head;
-        struct list_head *prev = NULL;
+        cfs_list_t *head;
+        cfs_list_t *prev = NULL;
         const seqno_t new_start  = range->lsr_start;
         const seqno_t new_end  = range->lsr_end;
         ENTRY;
@@ -413,12 +413,12 @@ void fld_cache_insert(struct fld_cache *cache,
          * So we don't need to search new entry before starting insertion loop.
          */
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         fld_cache_shrink(cache);
 
         head = &cache->fci_entries_head;
 
-        list_for_each_entry_safe(f_curr, n, head, fce_list) {
+        cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
                 /* add list if next is end of list */
                 if (new_end < f_curr->fce_range.lsr_start)
                         break;
@@ -437,7 +437,7 @@ void fld_cache_insert(struct fld_cache *cache,
         /* Add new entry to cache and lru list. */
         fld_cache_entry_add(cache, f_new, prev);
 out:
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
         EXIT;
 }
 
@@ -448,15 +448,15 @@ int fld_cache_lookup(struct fld_cache *cache,
                      const seqno_t seq, struct lu_seq_range *range)
 {
         struct fld_cache_entry *flde;
-        struct list_head *head;
+        cfs_list_t *head;
         ENTRY;
 
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         head = &cache->fci_entries_head;
 
         cache->fci_stat.fst_count++;
-        list_for_each_entry(flde, head, fce_list) {
+        cfs_list_for_each_entry(flde, head, fce_list) {
                 if (flde->fce_range.lsr_start > seq)
                         break;
 
@@ -464,12 +464,12 @@ int fld_cache_lookup(struct fld_cache *cache,
                         *range = flde->fce_range;
 
                         /* update position of this entry in lru list. */
-                        list_move(&flde->fce_lru, &cache->fci_lru);
+                        cfs_list_move(&flde->fce_lru, &cache->fci_lru);
                         cache->fci_stat.fst_cache++;
-                        spin_unlock(&cache->fci_lock);
+                        cfs_spin_unlock(&cache->fci_lock);
                         RETURN(0);
                 }
         }
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
         RETURN(-ENOENT);
 }