Whamcloud - gitweb
LU-56 ptlrpc: cleanup of ptlrpc_unregister_service
[fs/lustre-release.git] / lustre / fld / fld_cache.c
index 359f5f2..11cf0c5 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -26,7 +24,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  */
 /*
@@ -41,9 +39,6 @@
  * Author: Yury Umanets <umka@clusterfs.com>
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 #define DEBUG_SUBSYSTEM S_FLD
 
 #ifdef __KERNEL__
@@ -88,7 +83,7 @@ struct fld_cache *fld_cache_init(const char *name,
         CFS_INIT_LIST_HEAD(&cache->fci_lru);
 
         cache->fci_cache_count = 0;
-        spin_lock_init(&cache->fci_lock);
+        cfs_spin_lock_init(&cache->fci_lock);
 
         strncpy(cache->fci_name, name,
                 sizeof(cache->fci_name));
@@ -139,8 +134,8 @@ void fld_cache_fini(struct fld_cache *cache)
 static inline void fld_cache_entry_delete(struct fld_cache *cache,
                                           struct fld_cache_entry *node)
 {
-        list_del(&node->fce_list);
-        list_del(&node->fce_lru);
+        cfs_list_del(&node->fce_list);
+        cfs_list_del(&node->fce_lru);
         cache->fci_cache_count--;
         OBD_FREE_PTR(node);
 }
@@ -154,12 +149,12 @@ static void fld_fix_new_list(struct fld_cache *cache)
         struct fld_cache_entry *f_next;
         struct lu_seq_range *c_range;
         struct lu_seq_range *n_range;
-        struct list_head *head = &cache->fci_entries_head;
+        cfs_list_t *head = &cache->fci_entries_head;
         ENTRY;
 
 restart_fixup:
 
-        list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
+        cfs_list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
                 c_range = &f_curr->fce_range;
                 n_range = &f_next->fce_range;
 
@@ -171,7 +166,7 @@ restart_fixup:
 
                 /* check merge possibility with next range */
                 if (c_range->lsr_end == n_range->lsr_start) {
-                        if (c_range->lsr_mdt != n_range->lsr_mdt)
+                        if (c_range->lsr_index != n_range->lsr_index)
                                 continue;
                         n_range->lsr_start = c_range->lsr_start;
                         fld_cache_entry_delete(cache, f_curr);
@@ -181,7 +176,7 @@ restart_fixup:
                 /* check if current range overlaps with next range. */
                 if (n_range->lsr_start < c_range->lsr_end) {
 
-                        if (c_range->lsr_mdt == n_range->lsr_mdt) {
+                        if (c_range->lsr_index == n_range->lsr_index) {
                                 n_range->lsr_start = c_range->lsr_start;
                                 n_range->lsr_end = max(c_range->lsr_end,
                                                        n_range->lsr_end);
@@ -214,10 +209,10 @@ restart_fixup:
  */
 static inline void fld_cache_entry_add(struct fld_cache *cache,
                                        struct fld_cache_entry *f_new,
-                                       struct list_head *pos)
+                                       cfs_list_t *pos)
 {
-        list_add(&f_new->fce_list, pos);
-        list_add(&f_new->fce_lru, &cache->fci_lru);
+        cfs_list_add(&f_new->fce_list, pos);
+        cfs_list_add(&f_new->fce_lru, &cache->fci_lru);
 
         cache->fci_cache_count++;
         fld_fix_new_list(cache);
@@ -230,7 +225,7 @@ static inline void fld_cache_entry_add(struct fld_cache *cache,
 static int fld_cache_shrink(struct fld_cache *cache)
 {
         struct fld_cache_entry *flde;
-        struct list_head *curr;
+        cfs_list_t *curr;
         int num = 0;
         ENTRY;
 
@@ -244,7 +239,7 @@ static int fld_cache_shrink(struct fld_cache *cache)
         while (cache->fci_cache_count + cache->fci_threshold >
                cache->fci_cache_size && curr != &cache->fci_lru) {
 
-                flde = list_entry(curr, struct fld_cache_entry, fce_lru);
+                flde = cfs_list_entry(curr, struct fld_cache_entry, fce_lru);
                 curr = curr->prev;
                 fld_cache_entry_delete(cache, flde);
                 num++;
@@ -263,10 +258,10 @@ void fld_cache_flush(struct fld_cache *cache)
 {
         ENTRY;
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         cache->fci_cache_size = 0;
         fld_cache_shrink(cache);
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
 
         EXIT;
 }
@@ -302,7 +297,7 @@ void fld_cache_punch_hole(struct fld_cache *cache,
         /* fldt */
         fldt->fce_range.lsr_start = new_end;
         fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
-        fldt->fce_range.lsr_mdt = f_curr->fce_range.lsr_mdt;
+        fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
 
         /* f_curr */
         f_curr->fce_range.lsr_end = new_start;
@@ -325,12 +320,12 @@ void fld_cache_overlap_handle(struct fld_cache *cache,
         const struct lu_seq_range *range = &f_new->fce_range;
         const seqno_t new_start  = range->lsr_start;
         const seqno_t new_end  = range->lsr_end;
-        const mdsno_t mdt = range->lsr_mdt;
+        const mdsno_t mdt = range->lsr_index;
 
         /* this is overlap case, these case are checking overlapping with
          * prev range only. fixup will handle overlaping with next range. */
 
-        if (f_curr->fce_range.lsr_mdt == mdt) {
+        if (f_curr->fce_range.lsr_index == mdt) {
                 f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
                                                   new_start);
 
@@ -391,8 +386,8 @@ void fld_cache_insert(struct fld_cache *cache,
         struct fld_cache_entry *f_new;
         struct fld_cache_entry *f_curr;
         struct fld_cache_entry *n;
-        struct list_head *head;
-        struct list_head *prev = NULL;
+        cfs_list_t *head;
+        cfs_list_t *prev = NULL;
         const seqno_t new_start  = range->lsr_start;
         const seqno_t new_end  = range->lsr_end;
         ENTRY;
@@ -413,12 +408,12 @@ void fld_cache_insert(struct fld_cache *cache,
          * So we don't need to search new entry before starting insertion loop.
          */
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         fld_cache_shrink(cache);
 
         head = &cache->fci_entries_head;
 
-        list_for_each_entry_safe(f_curr, n, head, fce_list) {
+        cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
                 /* add list if next is end of list */
                 if (new_end < f_curr->fce_range.lsr_start)
                         break;
@@ -437,7 +432,7 @@ void fld_cache_insert(struct fld_cache *cache,
         /* Add new entry to cache and lru list. */
         fld_cache_entry_add(cache, f_new, prev);
 out:
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
         EXIT;
 }
 
@@ -448,15 +443,15 @@ int fld_cache_lookup(struct fld_cache *cache,
                      const seqno_t seq, struct lu_seq_range *range)
 {
         struct fld_cache_entry *flde;
-        struct list_head *head;
+        cfs_list_t *head;
         ENTRY;
 
 
-        spin_lock(&cache->fci_lock);
+        cfs_spin_lock(&cache->fci_lock);
         head = &cache->fci_entries_head;
 
         cache->fci_stat.fst_count++;
-        list_for_each_entry(flde, head, fce_list) {
+        cfs_list_for_each_entry(flde, head, fce_list) {
                 if (flde->fce_range.lsr_start > seq)
                         break;
 
@@ -464,12 +459,12 @@ int fld_cache_lookup(struct fld_cache *cache,
                         *range = flde->fce_range;
 
                         /* update position of this entry in lru list. */
-                        list_move(&flde->fce_lru, &cache->fci_lru);
+                        cfs_list_move(&flde->fce_lru, &cache->fci_lru);
                         cache->fci_stat.fst_cache++;
-                        spin_unlock(&cache->fci_lock);
+                        cfs_spin_unlock(&cache->fci_lock);
                         RETURN(0);
                 }
         }
-        spin_unlock(&cache->fci_lock);
+        cfs_spin_unlock(&cache->fci_lock);
         RETURN(-ENOENT);
 }