Whamcloud - gitweb
LU-1866 lfsck: enhance otable-based iteration
[fs/lustre-release.git] / lustre / fld / fld_cache.c
index ccfd381..e6ff0a7 100644 (file)
@@ -26,6 +26,8 @@
 /*
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2013, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -83,7 +85,7 @@ struct fld_cache *fld_cache_init(const char *name,
         CFS_INIT_LIST_HEAD(&cache->fci_lru);
 
         cache->fci_cache_count = 0;
-       spin_lock_init(&cache->fci_lock);
+       rwlock_init(&cache->fci_lock);
 
         strncpy(cache->fci_name, name,
                 sizeof(cache->fci_name));
@@ -131,13 +133,13 @@ void fld_cache_fini(struct fld_cache *cache)
 /**
  * delete given node from list.
  */
-static inline void fld_cache_entry_delete(struct fld_cache *cache,
-                                          struct fld_cache_entry *node)
+void fld_cache_entry_delete(struct fld_cache *cache,
+                           struct fld_cache_entry *node)
 {
-        cfs_list_del(&node->fce_list);
-        cfs_list_del(&node->fce_lru);
-        cache->fci_cache_count--;
-        OBD_FREE_PTR(node);
+       cfs_list_del(&node->fce_list);
+       cfs_list_del(&node->fce_lru);
+       cache->fci_cache_count--;
+       OBD_FREE_PTR(node);
 }
 
 /**
@@ -261,10 +263,10 @@ void fld_cache_flush(struct fld_cache *cache)
 {
        ENTRY;
 
-       spin_lock(&cache->fci_lock);
+       write_lock(&cache->fci_lock);
        cache->fci_cache_size = 0;
        fld_cache_shrink(cache);
-       spin_unlock(&cache->fci_lock);
+       write_unlock(&cache->fci_lock);
 
        EXIT;
 }
@@ -316,9 +318,9 @@ void fld_cache_punch_hole(struct fld_cache *cache,
 /**
  * handle range overlap in fld cache.
  */
-void fld_cache_overlap_handle(struct fld_cache *cache,
-                              struct fld_cache_entry *f_curr,
-                              struct fld_cache_entry *f_new)
+static void fld_cache_overlap_handle(struct fld_cache *cache,
+                               struct fld_cache_entry *f_curr,
+                               struct fld_cache_entry *f_new)
 {
         const struct lu_seq_range *range = &f_new->fce_range;
         const seqno_t new_start  = range->lsr_start;
@@ -377,71 +379,160 @@ void fld_cache_overlap_handle(struct fld_cache *cache,
                        PRANGE(range),PRANGE(&f_curr->fce_range));
 }
 
+struct fld_cache_entry
+*fld_cache_entry_create(const struct lu_seq_range *range)
+{
+       struct fld_cache_entry *f_new;
+
+       LASSERT(range_is_sane(range));
+
+       OBD_ALLOC_PTR(f_new);
+       if (!f_new)
+               RETURN(ERR_PTR(-ENOMEM));
+
+       f_new->fce_range = *range;
+       RETURN(f_new);
+}
+
 /**
  * Insert FLD entry in FLD cache.
  *
  * This function handles all cases of merging and breaking up of
  * ranges.
  */
-void fld_cache_insert(struct fld_cache *cache,
-                      const struct lu_seq_range *range)
+int fld_cache_insert_nolock(struct fld_cache *cache,
+                           struct fld_cache_entry *f_new)
 {
-        struct fld_cache_entry *f_new;
-        struct fld_cache_entry *f_curr;
-        struct fld_cache_entry *n;
-        cfs_list_t *head;
-        cfs_list_t *prev = NULL;
-        const seqno_t new_start  = range->lsr_start;
-        const seqno_t new_end  = range->lsr_end;
-        __u32 new_flags  = range->lsr_flags;
-        ENTRY;
+       struct fld_cache_entry *f_curr;
+       struct fld_cache_entry *n;
+       cfs_list_t *head;
+       cfs_list_t *prev = NULL;
+       const seqno_t new_start  = f_new->fce_range.lsr_start;
+       const seqno_t new_end  = f_new->fce_range.lsr_end;
+       __u32 new_flags  = f_new->fce_range.lsr_flags;
+       ENTRY;
 
-        LASSERT(range_is_sane(range));
+       /*
+        * Duplicate entries are eliminated in insert op.
+        * So we don't need to search new entry before starting
+        * insertion loop.
+        */
+
+       if (!cache->fci_no_shrink)
+               fld_cache_shrink(cache);
+
+       head = &cache->fci_entries_head;
+
+       cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
+               /* add list if next is end of list */
+               if (new_end < f_curr->fce_range.lsr_start ||
+                  (new_end == f_curr->fce_range.lsr_start &&
+                   new_flags != f_curr->fce_range.lsr_flags))
+                       break;
+
+               prev = &f_curr->fce_list;
+               /* check if this range is to left of new range. */
+               if (new_start < f_curr->fce_range.lsr_end &&
+                   new_flags == f_curr->fce_range.lsr_flags) {
+                       fld_cache_overlap_handle(cache, f_curr, f_new);
+                       goto out;
+               }
+       }
 
-        /* Allocate new entry. */
-        OBD_ALLOC_PTR(f_new);
-        if (!f_new) {
-                EXIT;
-                return;
-        }
+       if (prev == NULL)
+               prev = head;
 
-        f_new->fce_range = *range;
+       CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
+       /* Add new entry to cache and lru list. */
+       fld_cache_entry_add(cache, f_new, prev);
+out:
+       RETURN(0);
+}
 
-        /*
-         * Duplicate entries are eliminated in inset op.
-         * So we don't need to search new entry before starting insertion loop.
-         */
+int fld_cache_insert(struct fld_cache *cache,
+                    const struct lu_seq_range *range)
+{
+       struct fld_cache_entry  *flde;
+       int rc;
 
-       spin_lock(&cache->fci_lock);
-        fld_cache_shrink(cache);
+       flde = fld_cache_entry_create(range);
+       if (IS_ERR(flde))
+               RETURN(PTR_ERR(flde));
 
-        head = &cache->fci_entries_head;
+       write_lock(&cache->fci_lock);
+       rc = fld_cache_insert_nolock(cache, flde);
+       write_unlock(&cache->fci_lock);
+       if (rc)
+               OBD_FREE_PTR(flde);
 
-        cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
-                /* add list if next is end of list */
-                if (new_end < f_curr->fce_range.lsr_start ||
-                   (new_end == f_curr->fce_range.lsr_start &&
-                    new_flags != f_curr->fce_range.lsr_flags))
-                        break;
+       RETURN(rc);
+}
 
-                prev = &f_curr->fce_list;
-                /* check if this range is to left of new range. */
-                if (new_start < f_curr->fce_range.lsr_end &&
-                    new_flags == f_curr->fce_range.lsr_flags) {
-                        fld_cache_overlap_handle(cache, f_curr, f_new);
-                        goto out;
-                }
-        }
+void fld_cache_delete_nolock(struct fld_cache *cache,
+                     const struct lu_seq_range *range)
+{
+       struct fld_cache_entry *flde;
+       struct fld_cache_entry *tmp;
+       cfs_list_t *head;
 
-        if (prev == NULL)
-                prev = head;
+       head = &cache->fci_entries_head;
+       cfs_list_for_each_entry_safe(flde, tmp, head, fce_list) {
+               /* add list if next is end of list */
+               if (range->lsr_start == flde->fce_range.lsr_start ||
+                  (range->lsr_end == flde->fce_range.lsr_end &&
+                   range->lsr_flags == flde->fce_range.lsr_flags)) {
+                       fld_cache_entry_delete(cache, flde);
+                       break;
+               }
+       }
+}
 
-        CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
-        /* Add new entry to cache and lru list. */
-        fld_cache_entry_add(cache, f_new, prev);
-out:
-       spin_unlock(&cache->fci_lock);
-       EXIT;
+/**
+ * Delete FLD entry in FLD cache.
+ *
+ */
+void fld_cache_delete(struct fld_cache *cache,
+                     const struct lu_seq_range *range)
+{
+       write_lock(&cache->fci_lock);
+       fld_cache_delete_nolock(cache, range);
+       write_unlock(&cache->fci_lock);
+}
+
+struct fld_cache_entry
+*fld_cache_entry_lookup_nolock(struct fld_cache *cache,
+                             struct lu_seq_range *range)
+{
+       struct fld_cache_entry *flde;
+       struct fld_cache_entry *got = NULL;
+       cfs_list_t *head;
+
+       head = &cache->fci_entries_head;
+       cfs_list_for_each_entry(flde, head, fce_list) {
+               if (range->lsr_start == flde->fce_range.lsr_start ||
+                  (range->lsr_end == flde->fce_range.lsr_end &&
+                   range->lsr_flags == flde->fce_range.lsr_flags)) {
+                       got = flde;
+                       break;
+               }
+       }
+
+       RETURN(got);
+}
+
+/**
+ * lookup \a seq sequence for range in fld cache.
+ */
+struct fld_cache_entry
+*fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range)
+{
+       struct fld_cache_entry *got = NULL;
+       ENTRY;
+
+       read_lock(&cache->fci_lock);
+       got = fld_cache_entry_lookup_nolock(cache, range);
+       read_unlock(&cache->fci_lock);
+       RETURN(got);
 }
 
 /**
@@ -451,27 +542,31 @@ int fld_cache_lookup(struct fld_cache *cache,
                     const seqno_t seq, struct lu_seq_range *range)
 {
        struct fld_cache_entry *flde;
+       struct fld_cache_entry *prev = NULL;
        cfs_list_t *head;
        ENTRY;
 
-       spin_lock(&cache->fci_lock);
-        head = &cache->fci_entries_head;
+       read_lock(&cache->fci_lock);
+       head = &cache->fci_entries_head;
 
-        cache->fci_stat.fst_count++;
-        cfs_list_for_each_entry(flde, head, fce_list) {
-                if (flde->fce_range.lsr_start > seq)
-                        break;
+       cache->fci_stat.fst_count++;
+       cfs_list_for_each_entry(flde, head, fce_list) {
+               if (flde->fce_range.lsr_start > seq) {
+                       if (prev != NULL)
+                               memcpy(range, prev, sizeof(*range));
+                       break;
+               }
 
+               prev = flde;
                 if (range_within(&flde->fce_range, seq)) {
                         *range = flde->fce_range;
 
-                        /* update position of this entry in lru list. */
-                        cfs_list_move(&flde->fce_lru, &cache->fci_lru);
                         cache->fci_stat.fst_cache++;
-                       spin_unlock(&cache->fci_lock);
+                       read_unlock(&cache->fci_lock);
                        RETURN(0);
                }
        }
-       spin_unlock(&cache->fci_lock);
+       read_unlock(&cache->fci_lock);
        RETURN(-ENOENT);
 }
+