-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Yury Umanets <umka@clusterfs.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_FLD
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-# include <linux/module.h>
-# include <linux/jbd.h>
-# include <asm/div64.h>
-#else /* __KERNEL__ */
-# include <liblustre.h>
-# include <libcfs/list.h>
-#endif
-
-#include <obd.h>
-#include <obd_class.h>
-#include <lustre_ver.h>
+#include <libcfs/libcfs.h>
+#include <linux/module.h>
+#include <linux/math64.h>
#include <obd_support.h>
-#include <lprocfs_status.h>
-
-#include <dt_object.h>
-#include <md_object.h>
-#include <lustre_req_layout.h>
#include <lustre_fld.h>
#include "fld_internal.h"
/**
* create fld cache.
*/
-struct fld_cache *fld_cache_init(const char *name,
- int cache_size, int cache_threshold)
+struct fld_cache *fld_cache_init(const char *name, int cache_size,
+ int cache_threshold)
{
- struct fld_cache *cache;
- ENTRY;
+ struct fld_cache *cache;
- LASSERT(name != NULL);
- LASSERT(cache_threshold < cache_size);
+ ENTRY;
- OBD_ALLOC_PTR(cache);
- if (cache == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ LASSERT(name != NULL);
+ LASSERT(cache_threshold < cache_size);
- CFS_INIT_LIST_HEAD(&cache->fci_entries_head);
- CFS_INIT_LIST_HEAD(&cache->fci_lru);
+ OBD_ALLOC_PTR(cache);
+ if (cache == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
- cache->fci_cache_count = 0;
- spin_lock_init(&cache->fci_lock);
+ INIT_LIST_HEAD(&cache->fci_entries_head);
+ INIT_LIST_HEAD(&cache->fci_lru);
- strncpy(cache->fci_name, name,
- sizeof(cache->fci_name));
+ cache->fci_cache_count = 0;
+ rwlock_init(&cache->fci_lock);
- cache->fci_cache_size = cache_size;
- cache->fci_threshold = cache_threshold;
+ strlcpy(cache->fci_name, name, sizeof(cache->fci_name));
- /* Init fld cache info. */
- memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));
+ cache->fci_cache_size = cache_size;
+ cache->fci_threshold = cache_threshold;
- CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
- cache->fci_name, cache_size, cache_threshold);
+ /* Init fld cache info. */
+ memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));
- RETURN(cache);
+ CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
+ cache->fci_name, cache_size, cache_threshold);
+
+ RETURN(cache);
}
/**
*/
void fld_cache_fini(struct fld_cache *cache)
{
- __u64 pct;
- ENTRY;
-
- LASSERT(cache != NULL);
- fld_cache_flush(cache);
-
- if (cache->fci_stat.fst_count > 0) {
- pct = cache->fci_stat.fst_cache * 100;
- do_div(pct, cache->fci_stat.fst_count);
- } else {
- pct = 0;
- }
-
- CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name);
- CDEBUG(D_INFO, " Total reqs: "LPU64"\n", cache->fci_stat.fst_count);
- CDEBUG(D_INFO, " Cache reqs: "LPU64"\n", cache->fci_stat.fst_cache);
- CDEBUG(D_INFO, " Cache hits: "LPU64"%%\n", pct);
+ LASSERT(cache != NULL);
+ fld_cache_flush(cache);
- OBD_FREE_PTR(cache);
+ CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name);
+ CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache);
+ CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count);
- EXIT;
+ OBD_FREE_PTR(cache);
}
-static inline void fld_cache_entry_delete(struct fld_cache *cache,
- struct fld_cache_entry *node);
+/**
+ * delete given node from list.
+ */
+static void fld_cache_entry_delete(struct fld_cache *cache,
+ struct fld_cache_entry *node)
+{
+ list_del(&node->fce_list);
+ list_del(&node->fce_lru);
+ cache->fci_cache_count--;
+ OBD_FREE_PTR(node);
+}
/**
* fix list by checking new entry with NEXT entry in order.
*/
static void fld_fix_new_list(struct fld_cache *cache)
{
- struct fld_cache_entry *f_curr;
- struct fld_cache_entry *f_next;
- struct lu_seq_range *c_range;
- struct lu_seq_range *n_range;
- struct list_head *head = &cache->fci_entries_head;
- ENTRY;
+ struct fld_cache_entry *f_curr;
+ struct fld_cache_entry *f_next;
+ struct lu_seq_range *c_range;
+ struct lu_seq_range *n_range;
+ struct list_head *head = &cache->fci_entries_head;
+
+ ENTRY;
restart_fixup:
- list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
- c_range = &f_curr->fce_range;
- n_range = &f_next->fce_range;
-
- LASSERT(range_is_sane(c_range));
- if (&f_next->fce_list == head)
- break;
-
- LASSERT(c_range->lsr_start <= n_range->lsr_start);
-
- /* check merge possibility with next range */
- if (c_range->lsr_end == n_range->lsr_start) {
- if (c_range->lsr_mdt != n_range->lsr_mdt)
- continue;
- n_range->lsr_start = c_range->lsr_start;
- fld_cache_entry_delete(cache, f_curr);
- continue;
- }
-
- /* check if current range overlaps with next range. */
- if (n_range->lsr_start < c_range->lsr_end) {
-
- if (c_range->lsr_mdt == n_range->lsr_mdt) {
- n_range->lsr_start = c_range->lsr_start;
- n_range->lsr_end = max(c_range->lsr_end,
- n_range->lsr_end);
-
- fld_cache_entry_delete(cache, f_curr);
- } else {
- if (n_range->lsr_end <= c_range->lsr_end) {
- *n_range = *c_range;
- fld_cache_entry_delete(cache, f_curr);
- } else
- n_range->lsr_start = c_range->lsr_end;
- }
-
- /* we could have overlap over next
- * range too. better restart. */
- goto restart_fixup;
- }
-
- /* kill duplicates */
- if (c_range->lsr_start == n_range->lsr_start &&
- c_range->lsr_end == n_range->lsr_end)
- fld_cache_entry_delete(cache, f_curr);
- }
-
- EXIT;
+ list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
+ c_range = &f_curr->fce_range;
+ n_range = &f_next->fce_range;
+
+ LASSERT(lu_seq_range_is_sane(c_range));
+ if (&f_next->fce_list == head)
+ break;
+
+ if (c_range->lsr_flags != n_range->lsr_flags)
+ continue;
+
+ LASSERTF(c_range->lsr_start <= n_range->lsr_start,
+ "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
+ PRANGE(c_range), PRANGE(n_range));
+
+ /* check merge possibility with next range */
+ if (c_range->lsr_end == n_range->lsr_start) {
+ if (c_range->lsr_index != n_range->lsr_index)
+ continue;
+ n_range->lsr_start = c_range->lsr_start;
+ fld_cache_entry_delete(cache, f_curr);
+ continue;
+ }
+
+ /* check if current range overlaps with next range. */
+ if (n_range->lsr_start < c_range->lsr_end) {
+ if (c_range->lsr_index == n_range->lsr_index) {
+ n_range->lsr_start = c_range->lsr_start;
+ n_range->lsr_end = max(c_range->lsr_end,
+ n_range->lsr_end);
+ fld_cache_entry_delete(cache, f_curr);
+ } else {
+ if (n_range->lsr_end <= c_range->lsr_end) {
+ *n_range = *c_range;
+ fld_cache_entry_delete(cache, f_curr);
+ } else
+ n_range->lsr_start = c_range->lsr_end;
+ }
+
+ /* we could have overlap over next
+ * range too. better restart.
+ */
+ goto restart_fixup;
+ }
+
+ /* kill duplicates */
+ if (c_range->lsr_start == n_range->lsr_start &&
+ c_range->lsr_end == n_range->lsr_end)
+ fld_cache_entry_delete(cache, f_curr);
+ }
+
+ EXIT;
}
/**
* add node to fld cache
*/
static inline void fld_cache_entry_add(struct fld_cache *cache,
- struct fld_cache_entry *f_new,
- struct list_head *pos)
+ struct fld_cache_entry *f_new,
+ struct list_head *pos)
{
- list_add(&f_new->fce_list, pos);
- list_add(&f_new->fce_lru, &cache->fci_lru);
-
- cache->fci_cache_count++;
- fld_fix_new_list(cache);
-}
+ list_add(&f_new->fce_list, pos);
+ list_add(&f_new->fce_lru, &cache->fci_lru);
-/**
- * delete given node from list.
- */
-static inline void fld_cache_entry_delete(struct fld_cache *cache,
- struct fld_cache_entry *node)
-{
- list_del(&node->fce_list);
- list_del(&node->fce_lru);
- cache->fci_cache_count--;
- OBD_FREE_PTR(node);
+ cache->fci_cache_count++;
+ fld_fix_new_list(cache);
}
/**
*/
static int fld_cache_shrink(struct fld_cache *cache)
{
- struct fld_cache_entry *flde;
- struct list_head *curr;
- int num = 0;
- ENTRY;
+ int num = 0;
- LASSERT(cache != NULL);
+ ENTRY;
- if (cache->fci_cache_count < cache->fci_cache_size)
- RETURN(0);
+ LASSERT(cache != NULL);
- curr = cache->fci_lru.prev;
+ if (cache->fci_cache_count < cache->fci_cache_size)
+ RETURN(0);
- while (cache->fci_cache_count + cache->fci_threshold >
- cache->fci_cache_size && curr != &cache->fci_lru) {
+ while (cache->fci_cache_count + cache->fci_threshold >
+ cache->fci_cache_size &&
+ !list_empty(&cache->fci_lru)) {
+ struct fld_cache_entry *flde =
+ list_last_entry(&cache->fci_lru, struct fld_cache_entry,
+ fce_lru);
- flde = list_entry(curr, struct fld_cache_entry, fce_lru);
- curr = curr->prev;
- fld_cache_entry_delete(cache, flde);
- num++;
- }
+ fld_cache_entry_delete(cache, flde);
+ num++;
+ }
- CDEBUG(D_INFO, "%s: FLD cache - Shrunk by "
- "%d entries\n", cache->fci_name, num);
+ CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n",
+ cache->fci_name, num);
- RETURN(0);
+ RETURN(0);
}
/**
*/
void fld_cache_flush(struct fld_cache *cache)
{
- ENTRY;
+ ENTRY;
- spin_lock(&cache->fci_lock);
- cache->fci_cache_size = 0;
- fld_cache_shrink(cache);
- spin_unlock(&cache->fci_lock);
+ write_lock(&cache->fci_lock);
+ cache->fci_cache_size = 0;
+ fld_cache_shrink(cache);
+ write_unlock(&cache->fci_lock);
- EXIT;
+ EXIT;
}
/**
* entry accordingly.
*/
-void fld_cache_punch_hole(struct fld_cache *cache,
- struct fld_cache_entry *f_curr,
- struct fld_cache_entry *f_new)
+static void fld_cache_punch_hole(struct fld_cache *cache,
+ struct fld_cache_entry *f_curr,
+ struct fld_cache_entry *f_new)
{
- const struct lu_seq_range *range = &f_new->fce_range;
- const seqno_t new_start = range->lsr_start;
- const seqno_t new_end = range->lsr_end;
- struct fld_cache_entry *fldt;
-
- ENTRY;
- OBD_ALLOC_GFP(fldt, sizeof *fldt, CFS_ALLOC_ATOMIC);
- if (!fldt) {
- OBD_FREE_PTR(f_new);
- EXIT;
- /* overlap is not allowed, so dont mess up list. */
- return;
- }
- /* break f_curr RANGE into three RANGES:
- * f_curr, f_new , fldt
- */
-
- /* f_new = *range */
-
- /* fldt */
- fldt->fce_range.lsr_start = new_end;
- fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
- fldt->fce_range.lsr_mdt = f_curr->fce_range.lsr_mdt;
-
- /* f_curr */
- f_curr->fce_range.lsr_end = new_start;
-
- /* add these two entries to list */
- fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
- fld_cache_entry_add(cache, fldt, &f_new->fce_list);
-
- /* no need to fixup */
- EXIT;
+ const struct lu_seq_range *range = &f_new->fce_range;
+ const u64 new_start = range->lsr_start;
+ const u64 new_end = range->lsr_end;
+ struct fld_cache_entry *fldt;
+
+ ENTRY;
+ OBD_ALLOC_GFP(fldt, sizeof(*fldt), GFP_ATOMIC);
+ if (!fldt) {
+ OBD_FREE_PTR(f_new);
+ EXIT;
+ /* overlap is not allowed, so dont mess up list. */
+ return;
+ }
+ /* break f_curr RANGE into three RANGES:
+ * f_curr, f_new , fldt
+ */
+
+ /* fldt */
+ fldt->fce_range.lsr_start = new_end;
+ fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
+ fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
+
+ /* f_curr */
+ f_curr->fce_range.lsr_end = new_start;
+
+ /* add these two entries to list */
+ fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
+ fld_cache_entry_add(cache, fldt, &f_new->fce_list);
+
+ /* no need to fixup */
+ EXIT;
}
/**
* handle range overlap in fld cache.
*/
-void fld_cache_overlap_handle(struct fld_cache *cache,
- struct fld_cache_entry *f_curr,
- struct fld_cache_entry *f_new)
+static void fld_cache_overlap_handle(struct fld_cache *cache,
+ struct fld_cache_entry *f_curr,
+ struct fld_cache_entry *f_new)
{
- const struct lu_seq_range *range = &f_new->fce_range;
- const seqno_t new_start = range->lsr_start;
- const seqno_t new_end = range->lsr_end;
- const mdsno_t mdt = range->lsr_mdt;
+ const struct lu_seq_range *range = &f_new->fce_range;
+ const u64 new_start = range->lsr_start;
+ const u64 new_end = range->lsr_end;
+ const u32 mdt = range->lsr_index;
+
+ /* this is overlap case, these case are checking overlapping with
+ * prev range only. fixup will handle overlaping with next range.
+ */
+
+ if (f_curr->fce_range.lsr_index == mdt) {
+ f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
+ new_start);
- /* this is overlap case, these case are checking overlapping with
- * prev range only. fixup will handle overlaping with next range. */
+ f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end,
+ new_end);
- if (f_curr->fce_range.lsr_mdt == mdt) {
- f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
- new_start);
+ OBD_FREE_PTR(f_new);
+ fld_fix_new_list(cache);
- f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end,
- new_end);
+ } else if (new_start <= f_curr->fce_range.lsr_start &&
+ f_curr->fce_range.lsr_end <= new_end) {
+ /* case 1: new range completely overshadowed existing range.
+ * e.g. whole range migrated. update fld cache entry
+ */
- OBD_FREE_PTR(f_new);
- fld_fix_new_list(cache);
+ f_curr->fce_range = *range;
+ OBD_FREE_PTR(f_new);
+ fld_fix_new_list(cache);
- } else if (new_start <= f_curr->fce_range.lsr_start &&
- f_curr->fce_range.lsr_end <= new_end) {
- /* case 1: new range completely overshadowed existing range.
- * e.g. whole range migrated. update fld cache entry */
+ } else if (f_curr->fce_range.lsr_start < new_start &&
+ new_end < f_curr->fce_range.lsr_end) {
+ /* case 2: new range fit within existing range. */
- f_curr->fce_range = *range;
- OBD_FREE_PTR(f_new);
- fld_fix_new_list(cache);
+ fld_cache_punch_hole(cache, f_curr, f_new);
- } else if (f_curr->fce_range.lsr_start < new_start &&
- new_end < f_curr->fce_range.lsr_end) {
- /* case 2: new range fit within existing range. */
+ } else if (new_end <= f_curr->fce_range.lsr_end) {
+ /* case 3: overlap:
+ * [new_start [c_start new_end) c_end)
+ */
- fld_cache_punch_hole(cache, f_curr, f_new);
+ LASSERT(new_start <= f_curr->fce_range.lsr_start);
- } else if (new_end <= f_curr->fce_range.lsr_end) {
- /* case 3: overlap:
- * [new_start [c_start new_end) c_end)
- */
+ f_curr->fce_range.lsr_start = new_end;
+ fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev);
- LASSERT(new_start <= f_curr->fce_range.lsr_start);
+ } else if (f_curr->fce_range.lsr_start <= new_start) {
+ /* case 4: overlap:
+ * [c_start [new_start c_end) new_end)
+ */
- f_curr->fce_range.lsr_start = new_end;
- fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev);
+ LASSERT(f_curr->fce_range.lsr_end <= new_end);
+
+ f_curr->fce_range.lsr_end = new_start;
+ fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
+ } else
+ CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
+ PRANGE(range), PRANGE(&f_curr->fce_range));
+}
+
+struct fld_cache_entry
+*fld_cache_entry_create(const struct lu_seq_range *range)
+{
+ struct fld_cache_entry *f_new;
- } else if (f_curr->fce_range.lsr_start <= new_start) {
- /* case 4: overlap:
- * [c_start [new_start c_end) new_end)
- */
+ LASSERT(lu_seq_range_is_sane(range));
- LASSERT(f_curr->fce_range.lsr_end <= new_end);
+ OBD_ALLOC_PTR(f_new);
+ if (!f_new)
+ RETURN(ERR_PTR(-ENOMEM));
- f_curr->fce_range.lsr_end = new_start;
- fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
- } else
- CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
- PRANGE(range),PRANGE(&f_curr->fce_range));
+ f_new->fce_range = *range;
+ RETURN(f_new);
}
/**
* This function handles all cases of merging and breaking up of
* ranges.
*/
-void fld_cache_insert(struct fld_cache *cache,
- const struct lu_seq_range *range)
+int fld_cache_insert_nolock(struct fld_cache *cache,
+ struct fld_cache_entry *f_new)
{
- struct fld_cache_entry *f_new;
- struct fld_cache_entry *f_curr;
- struct fld_cache_entry *n;
- struct list_head *head;
- struct list_head *prev = NULL;
- const seqno_t new_start = range->lsr_start;
- const seqno_t new_end = range->lsr_end;
- ENTRY;
-
- LASSERT(range_is_sane(range));
-
- /* Allocate new entry. */
- OBD_ALLOC_PTR(f_new);
- if (!f_new) {
- EXIT;
- return;
- }
-
- f_new->fce_range = *range;
-
- /*
- * Duplicate entries are eliminated in inset op.
- * So we don't need to search new entry before starting insertion loop.
- */
-
- spin_lock(&cache->fci_lock);
- fld_cache_shrink(cache);
-
- head = &cache->fci_entries_head;
-
- list_for_each_entry_safe(f_curr, n, head, fce_list) {
- /* add list if next is end of list */
- if (new_end < f_curr->fce_range.lsr_start)
- break;
-
- prev = &f_curr->fce_list;
- /* check if this range is to left of new range. */
- if (new_start < f_curr->fce_range.lsr_end) {
- fld_cache_overlap_handle(cache, f_curr, f_new);
- goto out;
- }
- }
-
- if (prev == NULL)
- prev = head;
-
- /* Add new entry to cache and lru list. */
- fld_cache_entry_add(cache, f_new, prev);
+ struct fld_cache_entry *f_curr;
+ struct fld_cache_entry *n;
+ struct list_head *head;
+ struct list_head *prev = NULL;
+ const u64 new_start = f_new->fce_range.lsr_start;
+ const u64 new_end = f_new->fce_range.lsr_end;
+ __u32 new_flags = f_new->fce_range.lsr_flags;
+
+ ENTRY;
+
+ /*
+ * Duplicate entries are eliminated in insert op.
+ * So we don't need to search new entry before starting
+ * insertion loop.
+ */
+
+ fld_cache_shrink(cache);
+
+ head = &cache->fci_entries_head;
+
+ list_for_each_entry_safe(f_curr, n, head, fce_list) {
+ /* add list if next is end of list */
+ if (new_end < f_curr->fce_range.lsr_start ||
+ (new_end == f_curr->fce_range.lsr_start &&
+ new_flags != f_curr->fce_range.lsr_flags))
+ break;
+
+ prev = &f_curr->fce_list;
+ /* check if this range is to left of new range. */
+ if (new_start < f_curr->fce_range.lsr_end &&
+ new_flags == f_curr->fce_range.lsr_flags) {
+ fld_cache_overlap_handle(cache, f_curr, f_new);
+ goto out;
+ }
+ }
+
+ if (prev == NULL)
+ prev = head;
+
+ CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
+ /* Add new entry to cache and lru list. */
+ fld_cache_entry_add(cache, f_new, prev);
out:
- spin_unlock(&cache->fci_lock);
- EXIT;
+ RETURN(0);
+}
+
+int fld_cache_insert(struct fld_cache *cache,
+ const struct lu_seq_range *range)
+{
+ struct fld_cache_entry *flde;
+ int rc;
+
+ flde = fld_cache_entry_create(range);
+ if (IS_ERR(flde))
+ RETURN(PTR_ERR(flde));
+
+ write_lock(&cache->fci_lock);
+ rc = fld_cache_insert_nolock(cache, flde);
+ write_unlock(&cache->fci_lock);
+ if (rc)
+ OBD_FREE_PTR(flde);
+
+ RETURN(rc);
+}
+
+void fld_cache_delete_nolock(struct fld_cache *cache,
+ const struct lu_seq_range *range)
+{
+ struct fld_cache_entry *flde;
+ struct fld_cache_entry *tmp;
+ struct list_head *head;
+
+ head = &cache->fci_entries_head;
+ list_for_each_entry_safe(flde, tmp, head, fce_list) {
+ /* add list if next is end of list */
+ if (range->lsr_start == flde->fce_range.lsr_start ||
+ (range->lsr_end == flde->fce_range.lsr_end &&
+ range->lsr_flags == flde->fce_range.lsr_flags)) {
+ fld_cache_entry_delete(cache, flde);
+ break;
+ }
+ }
}
/**
* lookup \a seq sequence for range in fld cache.
*/
int fld_cache_lookup(struct fld_cache *cache,
- const seqno_t seq, struct lu_seq_range *range)
+ const u64 seq, struct lu_seq_range *range)
{
- struct fld_cache_entry *flde;
- struct list_head *head;
- ENTRY;
-
-
- spin_lock(&cache->fci_lock);
- head = &cache->fci_entries_head;
-
- cache->fci_stat.fst_count++;
- list_for_each_entry(flde, head, fce_list) {
- if (flde->fce_range.lsr_start > seq)
- break;
-
- if (range_within(&flde->fce_range, seq)) {
- *range = flde->fce_range;
-
- /* update position of this entry in lru list. */
- list_move(&flde->fce_lru, &cache->fci_lru);
- cache->fci_stat.fst_cache++;
- spin_unlock(&cache->fci_lock);
- RETURN(0);
- }
- }
- spin_unlock(&cache->fci_lock);
- RETURN(-ENOENT);
+ struct fld_cache_entry *flde;
+ struct fld_cache_entry *prev = NULL;
+ struct list_head *head;
+
+ ENTRY;
+
+ read_lock(&cache->fci_lock);
+ head = &cache->fci_entries_head;
+
+ cache->fci_stat.fst_count++;
+ list_for_each_entry(flde, head, fce_list) {
+ if (flde->fce_range.lsr_start > seq) {
+ if (prev != NULL)
+ *range = prev->fce_range;
+ break;
+ }
+
+ prev = flde;
+ if (lu_seq_range_within(&flde->fce_range, seq)) {
+ *range = flde->fce_range;
+
+ cache->fci_stat.fst_cache++;
+ read_unlock(&cache->fci_lock);
+ RETURN(0);
+ }
+ }
+ read_unlock(&cache->fci_lock);
+ RETURN(-ENOENT);
}