X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ffld%2Ffld_cache.c;h=e77df9652141ec18812af18a9ba5010bf43fb59f;hb=f75d2a1fc9b17b384bbcbc13bcb80ba10412cf29;hp=6336fdd466bf7d309897367e9ab060e57ce7afb0;hpb=70e80ade90af09300396706b8910e196a7928520;p=fs%2Flustre-release.git diff --git a/lustre/fld/fld_cache.c b/lustre/fld/fld_cache.c index 6336fdd..e77df96 100644 --- a/lustre/fld/fld_cache.c +++ b/lustre/fld/fld_cache.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -16,441 +14,479 @@ * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see [sun.com URL with a - * copy of GPLv2]. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * lustre/fld/fld_cache.c * * FLD (Fids Location Database) * + * Author: Pravin Shelar * Author: Yury Umanets */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_FLD -#ifdef __KERNEL__ -# include -# include -# include -# include -#else /* __KERNEL__ */ -# include -# include -#endif - -#include -#include -#include +#include +#include +#include #include -#include - -#include -#include -#include #include #include "fld_internal.h" -#ifdef __KERNEL__ -static inline __u32 fld_cache_hash(seqno_t seq) -{ - return (__u32)seq; -} - -void fld_cache_flush(struct fld_cache *cache) -{ - struct fld_cache_entry *flde; - struct hlist_head *bucket; - struct hlist_node *scan; - struct hlist_node *next; - int i; - ENTRY; - - /* Free all cache entries. */ - spin_lock(&cache->fci_lock); - for (i = 0; i < cache->fci_hash_size; i++) { - bucket = cache->fci_hash_table + i; - hlist_for_each_entry_safe(flde, scan, next, bucket, fce_list) { - hlist_del_init(&flde->fce_list); - list_del_init(&flde->fce_lru); - cache->fci_cache_count--; - OBD_FREE_PTR(flde); - } - } - spin_unlock(&cache->fci_lock); - EXIT; -} - -struct fld_cache *fld_cache_init(const char *name, int hash_size, - int cache_size, int cache_threshold) +/** + * create fld cache. + */ +struct fld_cache *fld_cache_init(const char *name, int cache_size, + int cache_threshold) { struct fld_cache *cache; - int i; - ENTRY; - LASSERT(name != NULL); - LASSERT(IS_PO2(hash_size)); - LASSERT(cache_threshold < cache_size); + ENTRY; - OBD_ALLOC_PTR(cache); - if (cache == NULL) - RETURN(ERR_PTR(-ENOMEM)); + LASSERT(name != NULL); + LASSERT(cache_threshold < cache_size); - INIT_LIST_HEAD(&cache->fci_lru); + OBD_ALLOC_PTR(cache); + if (cache == NULL) + RETURN(ERR_PTR(-ENOMEM)); + + INIT_LIST_HEAD(&cache->fci_entries_head); + INIT_LIST_HEAD(&cache->fci_lru); cache->fci_cache_count = 0; - spin_lock_init(&cache->fci_lock); + rwlock_init(&cache->fci_lock); - strncpy(cache->fci_name, name, - sizeof(cache->fci_name)); + strlcpy(cache->fci_name, name, sizeof(cache->fci_name)); - cache->fci_hash_size = hash_size; cache->fci_cache_size = cache_size; - cache->fci_threshold = cache_threshold; - - /* Init fld cache info. */ - cache->fci_hash_mask = hash_size - 1; - OBD_ALLOC(cache->fci_hash_table, - hash_size * sizeof(*cache->fci_hash_table)); - if (cache->fci_hash_table == NULL) { - OBD_FREE_PTR(cache); - RETURN(ERR_PTR(-ENOMEM)); - } + cache->fci_threshold = cache_threshold; - for (i = 0; i < hash_size; i++) - INIT_HLIST_HEAD(&cache->fci_hash_table[i]); - memset(&cache->fci_stat, 0, sizeof(cache->fci_stat)); + /* Init fld cache info. */ + memset(&cache->fci_stat, 0, sizeof(cache->fci_stat)); - CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n", - cache->fci_name, cache_size, cache_threshold); + CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n", + cache->fci_name, cache_size, cache_threshold); - RETURN(cache); + RETURN(cache); } -EXPORT_SYMBOL(fld_cache_init); +/** + * destroy fld cache. + */ void fld_cache_fini(struct fld_cache *cache) { - __u64 pct; - ENTRY; - - LASSERT(cache != NULL); - fld_cache_flush(cache); - - if (cache->fci_stat.fst_count > 0) { - pct = cache->fci_stat.fst_cache * 100; - do_div(pct, cache->fci_stat.fst_count); - } else { - pct = 0; - } - - printk("FLD cache statistics (%s):\n", cache->fci_name); - printk(" Total reqs: "LPU64"\n", cache->fci_stat.fst_count); - printk(" Cache reqs: "LPU64"\n", cache->fci_stat.fst_cache); - printk(" Saved RPCs: "LPU64"\n", cache->fci_stat.fst_inflight); - printk(" Cache hits: "LPU64"%%\n", pct); - - OBD_FREE(cache->fci_hash_table, cache->fci_hash_size * - sizeof(*cache->fci_hash_table)); + LASSERT(cache != NULL); + fld_cache_flush(cache); + + CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name); + CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache); + CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count); + OBD_FREE_PTR(cache); - - EXIT; } -EXPORT_SYMBOL(fld_cache_fini); -static inline struct hlist_head * -fld_cache_bucket(struct fld_cache *cache, seqno_t seq) +/** + * delete given node from list. + */ +static void fld_cache_entry_delete(struct fld_cache *cache, + struct fld_cache_entry *node) { - return cache->fci_hash_table + (fld_cache_hash(seq) & - cache->fci_hash_mask); + list_del(&node->fce_list); + list_del(&node->fce_lru); + cache->fci_cache_count--; + OBD_FREE_PTR(node); } -/* - * Check if cache needs to be shrinked. If so - do it. Tries to keep all - * collision lists well balanced. That is, check all of them and remove one - * entry in list and so on until cache is shrinked enough. +/** + * fix list by checking new entry with NEXT entry in order. */ -static int fld_cache_shrink(struct fld_cache *cache) +static void fld_fix_new_list(struct fld_cache *cache) { - struct fld_cache_entry *flde; - struct list_head *curr; - int num = 0; - ENTRY; + struct fld_cache_entry *f_curr; + struct fld_cache_entry *f_next; + struct lu_seq_range *c_range; + struct lu_seq_range *n_range; + struct list_head *head = &cache->fci_entries_head; + + ENTRY; + +restart_fixup: + + list_for_each_entry_safe(f_curr, f_next, head, fce_list) { + c_range = &f_curr->fce_range; + n_range = &f_next->fce_range; + + LASSERT(lu_seq_range_is_sane(c_range)); + if (&f_next->fce_list == head) + break; + + if (c_range->lsr_flags != n_range->lsr_flags) + continue; + + LASSERTF(c_range->lsr_start <= n_range->lsr_start, + "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n", + PRANGE(c_range), PRANGE(n_range)); + + /* check merge possibility with next range */ + if (c_range->lsr_end == n_range->lsr_start) { + if (c_range->lsr_index != n_range->lsr_index) + continue; + n_range->lsr_start = c_range->lsr_start; + fld_cache_entry_delete(cache, f_curr); + continue; + } - LASSERT(cache != NULL); + /* check if current range overlaps with next range. */ + if (n_range->lsr_start < c_range->lsr_end) { + if (c_range->lsr_index == n_range->lsr_index) { + n_range->lsr_start = c_range->lsr_start; + n_range->lsr_end = max(c_range->lsr_end, + n_range->lsr_end); + fld_cache_entry_delete(cache, f_curr); + } else { + if (n_range->lsr_end <= c_range->lsr_end) { + *n_range = *c_range; + fld_cache_entry_delete(cache, f_curr); + } else + n_range->lsr_start = c_range->lsr_end; + } + + /* we could have overlap over next + * range too. better restart. + */ + goto restart_fixup; + } - if (cache->fci_cache_count < cache->fci_cache_size) - RETURN(0); + /* kill duplicates */ + if (c_range->lsr_start == n_range->lsr_start && + c_range->lsr_end == n_range->lsr_end) + fld_cache_entry_delete(cache, f_curr); + } - curr = cache->fci_lru.prev; + EXIT; +} - while (cache->fci_cache_count + cache->fci_threshold > - cache->fci_cache_size && curr != &cache->fci_lru) - { - flde = list_entry(curr, struct fld_cache_entry, fce_lru); - curr = curr->prev; +/** + * add node to fld cache + */ +static inline void fld_cache_entry_add(struct fld_cache *cache, + struct fld_cache_entry *f_new, + struct list_head *pos) +{ + list_add(&f_new->fce_list, pos); + list_add(&f_new->fce_lru, &cache->fci_lru); - /* keep inflights */ - if (flde->fce_inflight) - continue; + cache->fci_cache_count++; + fld_fix_new_list(cache); +} - hlist_del_init(&flde->fce_list); - list_del_init(&flde->fce_lru); - cache->fci_cache_count--; - OBD_FREE_PTR(flde); - num++; - } +/** + * Check if cache needs to be shrunk. If so - do it. + * Remove one entry in list and so on until cache is shrunk enough. + */ +static int fld_cache_shrink(struct fld_cache *cache) +{ + int num = 0; - CDEBUG(D_INFO, "%s: FLD cache - Shrinked by " - "%d entries\n", cache->fci_name, num); + ENTRY; - RETURN(0); -} + LASSERT(cache != NULL); -int fld_cache_insert_inflight(struct fld_cache *cache, seqno_t seq) -{ - struct fld_cache_entry *flde, *fldt; - struct hlist_head *bucket; - struct hlist_node *scan; - ENTRY; - - spin_lock(&cache->fci_lock); - - /* Check if cache already has the entry with such a seq. */ - bucket = fld_cache_bucket(cache, seq); - hlist_for_each_entry(fldt, scan, bucket, fce_list) { - if (fldt->fce_seq == seq) { - spin_unlock(&cache->fci_lock); - RETURN(-EEXIST); - } - } - spin_unlock(&cache->fci_lock); - - /* Allocate new entry. */ - OBD_ALLOC_PTR(flde); - if (!flde) - RETURN(-ENOMEM); - - /* - * Check if cache has the entry with such a seq again. It could be added - * while we were allocating new entry. - */ - spin_lock(&cache->fci_lock); - hlist_for_each_entry(fldt, scan, bucket, fce_list) { - if (fldt->fce_seq == seq) { - spin_unlock(&cache->fci_lock); - OBD_FREE_PTR(flde); - RETURN(0); - } - } - - /* Add new entry to cache and lru list. */ - INIT_HLIST_NODE(&flde->fce_list); - flde->fce_inflight = 1; - flde->fce_invalid = 1; - cfs_waitq_init(&flde->fce_waitq); - flde->fce_seq = seq; - - hlist_add_head(&flde->fce_list, bucket); - list_add(&flde->fce_lru, &cache->fci_lru); - cache->fci_cache_count++; - - spin_unlock(&cache->fci_lock); - - RETURN(0); + if (cache->fci_cache_count < cache->fci_cache_size) + RETURN(0); + + while (cache->fci_cache_count + cache->fci_threshold > + cache->fci_cache_size && + !list_empty(&cache->fci_lru)) { + struct fld_cache_entry *flde = + list_last_entry(&cache->fci_lru, struct fld_cache_entry, + fce_lru); + + fld_cache_entry_delete(cache, flde); + num++; + } + + CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n", + cache->fci_name, num); + + RETURN(0); } -EXPORT_SYMBOL(fld_cache_insert_inflight); -int fld_cache_insert(struct fld_cache *cache, - seqno_t seq, mdsno_t mds) +/** + * kill all fld cache entries. + */ +void fld_cache_flush(struct fld_cache *cache) { - struct fld_cache_entry *flde, *fldt; - struct hlist_head *bucket; - struct hlist_node *scan; - int rc; - ENTRY; - - spin_lock(&cache->fci_lock); - - /* Check if need to shrink cache. */ - rc = fld_cache_shrink(cache); - if (rc) { - spin_unlock(&cache->fci_lock); - RETURN(rc); - } - - /* Check if cache already has the entry with such a seq. */ - bucket = fld_cache_bucket(cache, seq); - hlist_for_each_entry(fldt, scan, bucket, fce_list) { - if (fldt->fce_seq == seq) { - if (fldt->fce_inflight) { - /* set mds for inflight entry */ - fldt->fce_mds = mds; - fldt->fce_inflight = 0; - fldt->fce_invalid = 0; - cfs_waitq_signal(&fldt->fce_waitq); - rc = 0; - } else - rc = -EEXIST; - spin_unlock(&cache->fci_lock); - RETURN(rc); - } - } - spin_unlock(&cache->fci_lock); - - /* Allocate new entry. */ - OBD_ALLOC_PTR(flde); - if (!flde) - RETURN(-ENOMEM); - - /* - * Check if cache has the entry with such a seq again. It could be added - * while we were allocating new entry. - */ - spin_lock(&cache->fci_lock); - hlist_for_each_entry(fldt, scan, bucket, fce_list) { - if (fldt->fce_seq == seq) { - spin_unlock(&cache->fci_lock); - OBD_FREE_PTR(flde); - RETURN(0); - } - } - - /* Add new entry to cache and lru list. */ - INIT_HLIST_NODE(&flde->fce_list); - flde->fce_mds = mds; - flde->fce_seq = seq; - flde->fce_inflight = 0; - flde->fce_invalid = 0; - - hlist_add_head(&flde->fce_list, bucket); - list_add(&flde->fce_lru, &cache->fci_lru); - cache->fci_cache_count++; - - spin_unlock(&cache->fci_lock); - - RETURN(0); + ENTRY; + + write_lock(&cache->fci_lock); + cache->fci_cache_size = 0; + fld_cache_shrink(cache); + write_unlock(&cache->fci_lock); + + EXIT; } -EXPORT_SYMBOL(fld_cache_insert); -void fld_cache_delete(struct fld_cache *cache, seqno_t seq) +/** + * punch hole in existing range. divide this range and add new + * entry accordingly. + */ + +static void fld_cache_punch_hole(struct fld_cache *cache, + struct fld_cache_entry *f_curr, + struct fld_cache_entry *f_new) { - struct fld_cache_entry *flde; - struct hlist_node *scan, *n; - struct hlist_head *bucket; - ENTRY; - - bucket = fld_cache_bucket(cache, seq); - - spin_lock(&cache->fci_lock); - hlist_for_each_entry_safe(flde, scan, n, bucket, fce_list) { - if (flde->fce_seq == seq) { - hlist_del_init(&flde->fce_list); - list_del_init(&flde->fce_lru); - if (flde->fce_inflight) { - flde->fce_inflight = 0; - flde->fce_invalid = 1; - cfs_waitq_signal(&flde->fce_waitq); - } - cache->fci_cache_count--; - OBD_FREE_PTR(flde); - GOTO(out_unlock, 0); - } - } - - EXIT; -out_unlock: - spin_unlock(&cache->fci_lock); + const struct lu_seq_range *range = &f_new->fce_range; + const u64 new_start = range->lsr_start; + const u64 new_end = range->lsr_end; + struct fld_cache_entry *fldt; + + ENTRY; + OBD_ALLOC_GFP(fldt, sizeof(*fldt), GFP_ATOMIC); + if (!fldt) { + OBD_FREE_PTR(f_new); + EXIT; + /* overlap is not allowed, so dont mess up list. */ + return; + } + /* break f_curr RANGE into three RANGES: + * f_curr, f_new , fldt + */ + + /* fldt */ + fldt->fce_range.lsr_start = new_end; + fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end; + fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index; + + /* f_curr */ + f_curr->fce_range.lsr_end = new_start; + + /* add these two entries to list */ + fld_cache_entry_add(cache, f_new, &f_curr->fce_list); + fld_cache_entry_add(cache, fldt, &f_new->fce_list); + + /* no need to fixup */ + EXIT; } -EXPORT_SYMBOL(fld_cache_delete); -static int fld_check_inflight(struct fld_cache_entry *flde) +/** + * handle range overlap in fld cache. + */ +static void fld_cache_overlap_handle(struct fld_cache *cache, + struct fld_cache_entry *f_curr, + struct fld_cache_entry *f_new) { - return (flde->fce_inflight); + const struct lu_seq_range *range = &f_new->fce_range; + const u64 new_start = range->lsr_start; + const u64 new_end = range->lsr_end; + const u32 mdt = range->lsr_index; + + /* this is overlap case, these case are checking overlapping with + * prev range only. fixup will handle overlaping with next range. + */ + + if (f_curr->fce_range.lsr_index == mdt) { + f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start, + new_start); + + f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end, + new_end); + + OBD_FREE_PTR(f_new); + fld_fix_new_list(cache); + + } else if (new_start <= f_curr->fce_range.lsr_start && + f_curr->fce_range.lsr_end <= new_end) { + /* case 1: new range completely overshadowed existing range. + * e.g. whole range migrated. update fld cache entry + */ + + f_curr->fce_range = *range; + OBD_FREE_PTR(f_new); + fld_fix_new_list(cache); + + } else if (f_curr->fce_range.lsr_start < new_start && + new_end < f_curr->fce_range.lsr_end) { + /* case 2: new range fit within existing range. */ + + fld_cache_punch_hole(cache, f_curr, f_new); + + } else if (new_end <= f_curr->fce_range.lsr_end) { + /* case 3: overlap: + * [new_start [c_start new_end) c_end) + */ + + LASSERT(new_start <= f_curr->fce_range.lsr_start); + + f_curr->fce_range.lsr_start = new_end; + fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev); + + } else if (f_curr->fce_range.lsr_start <= new_start) { + /* case 4: overlap: + * [c_start [new_start c_end) new_end) + */ + + LASSERT(f_curr->fce_range.lsr_end <= new_end); + + f_curr->fce_range.lsr_end = new_start; + fld_cache_entry_add(cache, f_new, &f_curr->fce_list); + } else + CERROR("NEW range ="DRANGE" curr = "DRANGE"\n", + PRANGE(range), PRANGE(&f_curr->fce_range)); } -int fld_cache_lookup(struct fld_cache *cache, - seqno_t seq, mdsno_t *mds) +struct fld_cache_entry +*fld_cache_entry_create(const struct lu_seq_range *range) { - struct fld_cache_entry *flde; - struct hlist_node *scan, *n; - struct hlist_head *bucket; - ENTRY; - - bucket = fld_cache_bucket(cache, seq); - - spin_lock(&cache->fci_lock); - cache->fci_stat.fst_count++; - hlist_for_each_entry_safe(flde, scan, n, bucket, fce_list) { - if (flde->fce_seq == seq) { - if (flde->fce_inflight) { - /* lookup RPC is inflight need to wait */ - struct l_wait_info lwi; - spin_unlock(&cache->fci_lock); - lwi = LWI_TIMEOUT(0, NULL, NULL); - l_wait_event(flde->fce_waitq, - !fld_check_inflight(flde), &lwi); - LASSERT(!flde->fce_inflight); - if (flde->fce_invalid) - RETURN(-ENOENT); - - *mds = flde->fce_mds; - cache->fci_stat.fst_inflight++; - } else { - LASSERT(!flde->fce_invalid); - *mds = flde->fce_mds; - list_del(&flde->fce_lru); - list_add(&flde->fce_lru, &cache->fci_lru); - cache->fci_stat.fst_cache++; - spin_unlock(&cache->fci_lock); - } - RETURN(0); - } - } - spin_unlock(&cache->fci_lock); - RETURN(-ENOENT); + struct fld_cache_entry *f_new; + + LASSERT(lu_seq_range_is_sane(range)); + + OBD_ALLOC_PTR(f_new); + if (!f_new) + RETURN(ERR_PTR(-ENOMEM)); + + f_new->fce_range = *range; + RETURN(f_new); } -EXPORT_SYMBOL(fld_cache_lookup); -#else -int fld_cache_insert_inflight(struct fld_cache *cache, seqno_t seq) + +/** + * Insert FLD entry in FLD cache. + * + * This function handles all cases of merging and breaking up of + * ranges. + */ +int fld_cache_insert_nolock(struct fld_cache *cache, + struct fld_cache_entry *f_new) { - return -ENOTSUPP; + struct fld_cache_entry *f_curr; + struct fld_cache_entry *n; + struct list_head *head; + struct list_head *prev = NULL; + const u64 new_start = f_new->fce_range.lsr_start; + const u64 new_end = f_new->fce_range.lsr_end; + __u32 new_flags = f_new->fce_range.lsr_flags; + + ENTRY; + + /* + * Duplicate entries are eliminated in insert op. + * So we don't need to search new entry before starting + * insertion loop. + */ + + fld_cache_shrink(cache); + + head = &cache->fci_entries_head; + + list_for_each_entry_safe(f_curr, n, head, fce_list) { + /* add list if next is end of list */ + if (new_end < f_curr->fce_range.lsr_start || + (new_end == f_curr->fce_range.lsr_start && + new_flags != f_curr->fce_range.lsr_flags)) + break; + + prev = &f_curr->fce_list; + /* check if this range is to left of new range. */ + if (new_start < f_curr->fce_range.lsr_end && + new_flags == f_curr->fce_range.lsr_flags) { + fld_cache_overlap_handle(cache, f_curr, f_new); + goto out; + } + } + + if (prev == NULL) + prev = head; + + CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range)); + /* Add new entry to cache and lru list. */ + fld_cache_entry_add(cache, f_new, prev); +out: + RETURN(0); } -EXPORT_SYMBOL(fld_cache_insert_inflight); int fld_cache_insert(struct fld_cache *cache, - seqno_t seq, mdsno_t mds) + const struct lu_seq_range *range) { - return -ENOTSUPP; + struct fld_cache_entry *flde; + int rc; + + flde = fld_cache_entry_create(range); + if (IS_ERR(flde)) + RETURN(PTR_ERR(flde)); + + write_lock(&cache->fci_lock); + rc = fld_cache_insert_nolock(cache, flde); + write_unlock(&cache->fci_lock); + if (rc) + OBD_FREE_PTR(flde); + + RETURN(rc); } -EXPORT_SYMBOL(fld_cache_insert); -void fld_cache_delete(struct fld_cache *cache, - seqno_t seq) +void fld_cache_delete_nolock(struct fld_cache *cache, + const struct lu_seq_range *range) { - return; + struct fld_cache_entry *flde; + struct fld_cache_entry *tmp; + struct list_head *head; + + head = &cache->fci_entries_head; + list_for_each_entry_safe(flde, tmp, head, fce_list) { + /* add list if next is end of list */ + if (range->lsr_start == flde->fce_range.lsr_start || + (range->lsr_end == flde->fce_range.lsr_end && + range->lsr_flags == flde->fce_range.lsr_flags)) { + fld_cache_entry_delete(cache, flde); + break; + } + } } -EXPORT_SYMBOL(fld_cache_delete); +/** + * lookup \a seq sequence for range in fld cache. + */ int fld_cache_lookup(struct fld_cache *cache, - seqno_t seq, mdsno_t *mds) + const u64 seq, struct lu_seq_range *range) { - return -ENOTSUPP; + struct fld_cache_entry *flde; + struct fld_cache_entry *prev = NULL; + struct list_head *head; + + ENTRY; + + read_lock(&cache->fci_lock); + head = &cache->fci_entries_head; + + cache->fci_stat.fst_count++; + list_for_each_entry(flde, head, fce_list) { + if (flde->fce_range.lsr_start > seq) { + if (prev != NULL) + *range = prev->fce_range; + break; + } + + prev = flde; + if (lu_seq_range_within(&flde->fce_range, seq)) { + *range = flde->fce_range; + + cache->fci_stat.fst_cache++; + read_unlock(&cache->fci_lock); + RETURN(0); + } + } + read_unlock(&cache->fci_lock); + RETURN(-ENOENT); } -EXPORT_SYMBOL(fld_cache_lookup); -#endif