Whamcloud - gitweb
LU-8468 kernel: kernel update RHEL7.2 [3.10.0-327.28.2.el7]
[fs/lustre-release.git] / lustre / fld / fld_cache.c
index b21ead4..8e4a691 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 #define DEBUG_SUBSYSTEM S_FLD
 
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-# include <linux/module.h>
-# include <linux/jbd.h>
-# include <asm/div64.h>
-#else /* __KERNEL__ */
-# include <liblustre.h>
-# include <libcfs/list.h>
-#endif
-
-#include <obd.h>
-#include <obd_class.h>
-#include <lustre_ver.h>
+#include <libcfs/libcfs.h>
+#include <linux/module.h>
+#include <linux/math64.h>
 #include <obd_support.h>
-#include <lprocfs_status.h>
-
-#include <dt_object.h>
-#include <md_object.h>
-#include <lustre_req_layout.h>
 #include <lustre_fld.h>
 #include "fld_internal.h"
 
@@ -81,8 +66,8 @@ struct fld_cache *fld_cache_init(const char *name,
         if (cache == NULL)
                 RETURN(ERR_PTR(-ENOMEM));
 
-        CFS_INIT_LIST_HEAD(&cache->fci_entries_head);
-        CFS_INIT_LIST_HEAD(&cache->fci_lru);
+       INIT_LIST_HEAD(&cache->fci_entries_head);
+       INIT_LIST_HEAD(&cache->fci_lru);
 
         cache->fci_cache_count = 0;
        rwlock_init(&cache->fci_lock);
@@ -136,8 +121,8 @@ void fld_cache_fini(struct fld_cache *cache)
 void fld_cache_entry_delete(struct fld_cache *cache,
                            struct fld_cache_entry *node)
 {
-       cfs_list_del(&node->fce_list);
-       cfs_list_del(&node->fce_lru);
+       list_del(&node->fce_list);
+       list_del(&node->fce_lru);
        cache->fci_cache_count--;
        OBD_FREE_PTR(node);
 }
@@ -151,18 +136,18 @@ static void fld_fix_new_list(struct fld_cache *cache)
         struct fld_cache_entry *f_next;
         struct lu_seq_range *c_range;
         struct lu_seq_range *n_range;
-        cfs_list_t *head = &cache->fci_entries_head;
+       struct list_head *head = &cache->fci_entries_head;
         ENTRY;
 
 restart_fixup:
 
-        cfs_list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
-                c_range = &f_curr->fce_range;
-                n_range = &f_next->fce_range;
+       list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
+               c_range = &f_curr->fce_range;
+               n_range = &f_next->fce_range;
 
-                LASSERT(range_is_sane(c_range));
-                if (&f_next->fce_list == head)
-                        break;
+               LASSERT(lu_seq_range_is_sane(c_range));
+               if (&f_next->fce_list == head)
+                       break;
 
                if (c_range->lsr_flags != n_range->lsr_flags)
                        continue;
@@ -214,13 +199,13 @@ restart_fixup:
  */
 static inline void fld_cache_entry_add(struct fld_cache *cache,
                                        struct fld_cache_entry *f_new,
-                                       cfs_list_t *pos)
+                                      struct list_head *pos)
 {
-        cfs_list_add(&f_new->fce_list, pos);
-        cfs_list_add(&f_new->fce_lru, &cache->fci_lru);
+       list_add(&f_new->fce_list, pos);
+       list_add(&f_new->fce_lru, &cache->fci_lru);
 
-        cache->fci_cache_count++;
-        fld_fix_new_list(cache);
+       cache->fci_cache_count++;
+       fld_fix_new_list(cache);
 }
 
 /**
@@ -230,7 +215,7 @@ static inline void fld_cache_entry_add(struct fld_cache *cache,
 static int fld_cache_shrink(struct fld_cache *cache)
 {
         struct fld_cache_entry *flde;
-        cfs_list_t *curr;
+       struct list_head *curr;
         int num = 0;
         ENTRY;
 
@@ -244,7 +229,7 @@ static int fld_cache_shrink(struct fld_cache *cache)
         while (cache->fci_cache_count + cache->fci_threshold >
                cache->fci_cache_size && curr != &cache->fci_lru) {
 
-                flde = cfs_list_entry(curr, struct fld_cache_entry, fce_lru);
+               flde = list_entry(curr, struct fld_cache_entry, fce_lru);
                 curr = curr->prev;
                 fld_cache_entry_delete(cache, flde);
                 num++;
@@ -276,17 +261,17 @@ void fld_cache_flush(struct fld_cache *cache)
  * entry accordingly.
  */
 
-void fld_cache_punch_hole(struct fld_cache *cache,
-                          struct fld_cache_entry *f_curr,
-                          struct fld_cache_entry *f_new)
+static void fld_cache_punch_hole(struct fld_cache *cache,
+                                struct fld_cache_entry *f_curr,
+                                struct fld_cache_entry *f_new)
 {
         const struct lu_seq_range *range = &f_new->fce_range;
-        const seqno_t new_start  = range->lsr_start;
-        const seqno_t new_end  = range->lsr_end;
+       const u64 new_start  = range->lsr_start;
+       const u64 new_end  = range->lsr_end;
         struct fld_cache_entry *fldt;
 
         ENTRY;
-        OBD_ALLOC_GFP(fldt, sizeof *fldt, CFS_ALLOC_ATOMIC);
+       OBD_ALLOC_GFP(fldt, sizeof *fldt, GFP_ATOMIC);
         if (!fldt) {
                 OBD_FREE_PTR(f_new);
                 EXIT;
@@ -322,10 +307,10 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
                                struct fld_cache_entry *f_curr,
                                struct fld_cache_entry *f_new)
 {
-        const struct lu_seq_range *range = &f_new->fce_range;
-        const seqno_t new_start  = range->lsr_start;
-        const seqno_t new_end  = range->lsr_end;
-        const mdsno_t mdt = range->lsr_index;
+       const struct lu_seq_range *range = &f_new->fce_range;
+       const u64 new_start  = range->lsr_start;
+       const u64 new_end  = range->lsr_end;
+       const u32 mdt = range->lsr_index;
 
         /* this is overlap case, these case are checking overlapping with
          * prev range only. fixup will handle overlaping with next range. */
@@ -384,7 +369,7 @@ struct fld_cache_entry
 {
        struct fld_cache_entry *f_new;
 
-       LASSERT(range_is_sane(range));
+       LASSERT(lu_seq_range_is_sane(range));
 
        OBD_ALLOC_PTR(f_new);
        if (!f_new)
@@ -405,10 +390,10 @@ int fld_cache_insert_nolock(struct fld_cache *cache,
 {
        struct fld_cache_entry *f_curr;
        struct fld_cache_entry *n;
-       cfs_list_t *head;
-       cfs_list_t *prev = NULL;
-       const seqno_t new_start  = f_new->fce_range.lsr_start;
-       const seqno_t new_end  = f_new->fce_range.lsr_end;
+       struct list_head *head;
+       struct list_head *prev = NULL;
+       const u64 new_start  = f_new->fce_range.lsr_start;
+       const u64 new_end  = f_new->fce_range.lsr_end;
        __u32 new_flags  = f_new->fce_range.lsr_flags;
        ENTRY;
 
@@ -423,7 +408,7 @@ int fld_cache_insert_nolock(struct fld_cache *cache,
 
        head = &cache->fci_entries_head;
 
-       cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
+       list_for_each_entry_safe(f_curr, n, head, fce_list) {
                /* add list if next is end of list */
                if (new_end < f_curr->fce_range.lsr_start ||
                   (new_end == f_curr->fce_range.lsr_start &&
@@ -473,10 +458,10 @@ void fld_cache_delete_nolock(struct fld_cache *cache,
 {
        struct fld_cache_entry *flde;
        struct fld_cache_entry *tmp;
-       cfs_list_t *head;
+       struct list_head *head;
 
        head = &cache->fci_entries_head;
-       cfs_list_for_each_entry_safe(flde, tmp, head, fce_list) {
+       list_for_each_entry_safe(flde, tmp, head, fce_list) {
                /* add list if next is end of list */
                if (range->lsr_start == flde->fce_range.lsr_start ||
                   (range->lsr_end == flde->fce_range.lsr_end &&
@@ -499,16 +484,16 @@ void fld_cache_delete(struct fld_cache *cache,
        write_unlock(&cache->fci_lock);
 }
 
-struct fld_cache_entry
-*fld_cache_entry_lookup_nolock(struct fld_cache *cache,
-                             struct lu_seq_range *range)
+struct fld_cache_entry *
+fld_cache_entry_lookup_nolock(struct fld_cache *cache,
+                             const struct lu_seq_range *range)
 {
        struct fld_cache_entry *flde;
        struct fld_cache_entry *got = NULL;
-       cfs_list_t *head;
+       struct list_head *head;
 
        head = &cache->fci_entries_head;
-       cfs_list_for_each_entry(flde, head, fce_list) {
+       list_for_each_entry(flde, head, fce_list) {
                if (range->lsr_start == flde->fce_range.lsr_start ||
                   (range->lsr_end == flde->fce_range.lsr_end &&
                    range->lsr_flags == flde->fce_range.lsr_flags)) {
@@ -523,8 +508,9 @@ struct fld_cache_entry
 /**
  * lookup \a seq sequence for range in fld cache.
  */
-struct fld_cache_entry
-*fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range)
+struct fld_cache_entry *
+fld_cache_entry_lookup(struct fld_cache *cache,
+                      const struct lu_seq_range *range)
 {
        struct fld_cache_entry *got = NULL;
        ENTRY;
@@ -532,6 +518,7 @@ struct fld_cache_entry
        read_lock(&cache->fci_lock);
        got = fld_cache_entry_lookup_nolock(cache, range);
        read_unlock(&cache->fci_lock);
+
        RETURN(got);
 }
 
@@ -539,29 +526,29 @@ struct fld_cache_entry
  * lookup \a seq sequence for range in fld cache.
  */
 int fld_cache_lookup(struct fld_cache *cache,
-                    const seqno_t seq, struct lu_seq_range *range)
+                    const u64 seq, struct lu_seq_range *range)
 {
        struct fld_cache_entry *flde;
        struct fld_cache_entry *prev = NULL;
-       cfs_list_t *head;
+       struct list_head *head;
        ENTRY;
 
        read_lock(&cache->fci_lock);
        head = &cache->fci_entries_head;
 
        cache->fci_stat.fst_count++;
-       cfs_list_for_each_entry(flde, head, fce_list) {
+       list_for_each_entry(flde, head, fce_list) {
                if (flde->fce_range.lsr_start > seq) {
                        if (prev != NULL)
-                               memcpy(range, prev, sizeof(*range));
+                               *range = prev->fce_range;
                        break;
                }
 
                prev = flde;
-                if (range_within(&flde->fce_range, seq)) {
-                        *range = flde->fce_range;
+               if (lu_seq_range_within(&flde->fce_range, seq)) {
+                       *range = flde->fce_range;
 
-                        cache->fci_stat.fst_cache++;
+                       cache->fci_stat.fst_cache++;
                        read_unlock(&cache->fci_lock);
                        RETURN(0);
                }
@@ -569,4 +556,3 @@ int fld_cache_lookup(struct fld_cache *cache,
        read_unlock(&cache->fci_lock);
        RETURN(-ENOENT);
 }
-