Whamcloud - gitweb
b=19486 add server identifier into lu_seq_range.
[fs/lustre-release.git] / lustre / fld / fld_cache.c
index 359f5f2..4dc1f70 100644 (file)
@@ -26,7 +26,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  */
 /*
@@ -88,7 +88,7 @@ struct fld_cache *fld_cache_init(const char *name,
         CFS_INIT_LIST_HEAD(&cache->fci_lru);
 
         cache->fci_cache_count = 0;
-        spin_lock_init(&cache->fci_lock);
+        cfs_spin_lock_init(&cache->fci_lock);
 
         strncpy(cache->fci_name, name,
                 sizeof(cache->fci_name));
@@ -139,8 +139,8 @@ void fld_cache_fini(struct fld_cache *cache)
 static inline void fld_cache_entry_delete(struct fld_cache *cache,
                                           struct fld_cache_entry *node)
 {
-        list_del(&node->fce_list);
-        list_del(&node->fce_lru);
+        cfs_list_del(&node->fce_list);
+        cfs_list_del(&node->fce_lru);
         cache->fci_cache_count--;
         OBD_FREE_PTR(node);
 }
@@ -154,12 +154,12 @@ static void fld_fix_new_list(struct fld_cache *cache)
         struct fld_cache_entry *f_next;
         struct lu_seq_range *c_range;
         struct lu_seq_range *n_range;
-        struct list_head *head = &cache->fci_entries_head;
+        cfs_list_t *head = &cache->fci_entries_head;
         ENTRY;
 
 restart_fixup:
 
-        list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
+        cfs_list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
                 c_range = &f_curr->fce_range;
                 n_range = &f_next->fce_range;
 
@@ -171,7 +171,7 @@ restart_fixup:
 
                 /* check merge possibility with next range */
                 if (c_range->lsr_end == n_range->lsr_start) {
-                        if (c_range->lsr_mdt != n_range->lsr_mdt)
+                        if (c_range->lsr_index != n_range->lsr_index)
                                 continue;
                         n_range->lsr_start = c_range->lsr_start;
                         fld_cache_entry_delete(cache, f_curr);
@@ -181,7 +181,7 @@ restart_fixup:
                 /* check if current range overlaps with next range. */
                 if (n_range->lsr_start < c_range->lsr_end) {
 
-                        if (c_range->lsr_mdt == n_range->lsr_mdt) {
+                        if (c_range->lsr_index == n_range->lsr_index) {
                                 n_range->lsr_start = c_range->lsr_start;
                                 n_range->lsr_end = max(c_range->lsr_end,
                                                        n_range->lsr_end);
@@ -214,10 +214,10 @@ restart_fixup:
  */
 static inline void fld_cache_entry_add(struct fld_cache *cache,
                                        struct fld_cache_entry *f_new,
-                                       struct list_head *pos)
+                                       cfs_list_t *pos)
 {
-        list_add(&f_new->fce_list, pos);
-        list_add(&f_new->fce_lru, &cache->fci_lru);
+        cfs_list_add(&f_new->fce_list, pos);
+        cfs_list_add(&f_new->fce_lru, &cache->fci_lru);
 
         cache->fci_cache_count++;
         fld_fix_new_list(cache);
@@ -230,7 +230,7 @@ static inline void fld_cache_entry_add(struct fld_cache *cache,
 static int fld_cache_shrink(struct fld_cache *cache)
 {
         struct fld_cache_entry *flde;
-        struct list_head *curr;
+        cfs_list_t *curr;
         int num = 0;
         ENTRY;
 
@@ -244,7 +244,7 @@ static int fld_cache_shrink(struct fld_cache *cache)
         while (cache->fci_cache_count + cache->fci_threshold >
                cache->fci_cache_size && curr != &cache->fci_lru) {
 
-                flde = list_entry(curr, struct fld_cache_entry, fce_lru);
+                flde = cfs_list_entry(curr, struct fld_cache_entry, fce_lru);
                 curr = curr->prev;
                 fld_cache_entry_delete(cache, flde);
                 num++;
@@ -263,10 +263,10 @@ void fld_cache_flush(struct fld_cache *cache)
 {
         ENTRY;
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         cache->fci_cache_size = 0;
         fld_cache_shrink(cache);
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
 
         EXIT;
 }
@@ -302,7 +302,7 @@ void fld_cache_punch_hole(struct fld_cache *cache,
         /* fldt */
         fldt->fce_range.lsr_start = new_end;
         fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
-        fldt->fce_range.lsr_mdt = f_curr->fce_range.lsr_mdt;
+        fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
 
         /* f_curr */
         f_curr->fce_range.lsr_end = new_start;
@@ -325,12 +325,12 @@ void fld_cache_overlap_handle(struct fld_cache *cache,
         const struct lu_seq_range *range = &f_new->fce_range;
         const seqno_t new_start  = range->lsr_start;
         const seqno_t new_end  = range->lsr_end;
-        const mdsno_t mdt = range->lsr_mdt;
+        const mdsno_t mdt = range->lsr_index;
 
         /* this is overlap case, these case are checking overlapping with
          * prev range only. fixup will handle overlaping with next range. */
 
-        if (f_curr->fce_range.lsr_mdt == mdt) {
+        if (f_curr->fce_range.lsr_index == mdt) {
                 f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
                                                   new_start);
 
@@ -391,8 +391,8 @@ void fld_cache_insert(struct fld_cache *cache,
         struct fld_cache_entry *f_new;
         struct fld_cache_entry *f_curr;
         struct fld_cache_entry *n;
-        struct list_head *head;
-        struct list_head *prev = NULL;
+        cfs_list_t *head;
+        cfs_list_t *prev = NULL;
         const seqno_t new_start  = range->lsr_start;
         const seqno_t new_end  = range->lsr_end;
         ENTRY;
@@ -413,12 +413,12 @@ void fld_cache_insert(struct fld_cache *cache,
          * So we don't need to search new entry before starting insertion loop.
          */
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         fld_cache_shrink(cache);
 
         head = &cache->fci_entries_head;
 
-        list_for_each_entry_safe(f_curr, n, head, fce_list) {
+        cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
                 /* add list if next is end of list */
                 if (new_end < f_curr->fce_range.lsr_start)
                         break;
@@ -437,7 +437,7 @@ void fld_cache_insert(struct fld_cache *cache,
         /* Add new entry to cache and lru list. */
         fld_cache_entry_add(cache, f_new, prev);
 out:
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
         EXIT;
 }
 
@@ -448,15 +448,15 @@ int fld_cache_lookup(struct fld_cache *cache,
                      const seqno_t seq, struct lu_seq_range *range)
 {
         struct fld_cache_entry *flde;
-        struct list_head *head;
+        cfs_list_t *head;
         ENTRY;
 
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         head = &cache->fci_entries_head;
 
         cache->fci_stat.fst_count++;
-        list_for_each_entry(flde, head, fce_list) {
+        cfs_list_for_each_entry(flde, head, fce_list) {
                 if (flde->fce_range.lsr_start > seq)
                         break;
 
@@ -464,12 +464,12 @@ int fld_cache_lookup(struct fld_cache *cache,
                         *range = flde->fce_range;
 
                         /* update position of this entry in lru list. */
-                        list_move(&flde->fce_lru, &cache->fci_lru);
+                        cfs_list_move(&flde->fce_lru, &cache->fci_lru);
                         cache->fci_stat.fst_cache++;
-                        spin_unlock(&cache->fci_lock);
+                        cfs_spin_unlock(&cache->fci_lock);
                         RETURN(0);
                 }
         }
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
         RETURN(-ENOENT);
 }