X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ffld%2Ffld_cache.c;h=f638e0dcd1ea4d1f69f6b1899e94fda2875c84fa;hp=9f0c12260e0dd01c03316c7a555d7b7b03ede538;hb=e1b63fd21177b40d5c23cedd9e5d81b461db53c3;hpb=0f8dca08a4f68cba82c2c822998ecc309d3b7aaf diff --git a/lustre/fld/fld_cache.c b/lustre/fld/fld_cache.c index 9f0c122..f638e0d 100644 --- a/lustre/fld/fld_cache.c +++ b/lustre/fld/fld_cache.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -17,17 +15,15 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -41,30 +37,12 @@ * Author: Yury Umanets */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_FLD -#ifdef __KERNEL__ -# include -# include -# include -# include -#else /* __KERNEL__ */ -# include -# include -#endif - -#include -#include -#include +#include +#include +#include #include -#include - -#include -#include -#include #include #include "fld_internal.h" @@ -84,13 +62,13 @@ struct fld_cache *fld_cache_init(const char *name, if (cache == NULL) RETURN(ERR_PTR(-ENOMEM)); - CFS_INIT_LIST_HEAD(&cache->fci_entries_head); - CFS_INIT_LIST_HEAD(&cache->fci_lru); + INIT_LIST_HEAD(&cache->fci_entries_head); + INIT_LIST_HEAD(&cache->fci_lru); cache->fci_cache_count = 0; - cfs_spin_lock_init(&cache->fci_lock); + rwlock_init(&cache->fci_lock); - strncpy(cache->fci_name, name, + strlcpy(cache->fci_name, name, sizeof(cache->fci_name)); cache->fci_cache_size = cache_size; @@ -110,39 +88,26 @@ struct fld_cache *fld_cache_init(const char *name, */ void fld_cache_fini(struct fld_cache *cache) { - __u64 pct; - ENTRY; - - LASSERT(cache != NULL); - fld_cache_flush(cache); + LASSERT(cache != NULL); + fld_cache_flush(cache); - if (cache->fci_stat.fst_count > 0) { - pct = cache->fci_stat.fst_cache * 100; - do_div(pct, cache->fci_stat.fst_count); - } else { - pct = 0; - } - - CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name); - CDEBUG(D_INFO, " Total reqs: "LPU64"\n", cache->fci_stat.fst_count); - CDEBUG(D_INFO, " Cache reqs: "LPU64"\n", cache->fci_stat.fst_cache); - CDEBUG(D_INFO, " Cache hits: "LPU64"%%\n", pct); + CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name); + CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache); + CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count); - OBD_FREE_PTR(cache); - - EXIT; + OBD_FREE_PTR(cache); } /** * delete given node from list. */ -static inline void fld_cache_entry_delete(struct fld_cache *cache, - struct fld_cache_entry *node) +void fld_cache_entry_delete(struct fld_cache *cache, + struct fld_cache_entry *node) { - cfs_list_del(&node->fce_list); - cfs_list_del(&node->fce_lru); - cache->fci_cache_count--; - OBD_FREE_PTR(node); + list_del(&node->fce_list); + list_del(&node->fce_lru); + cache->fci_cache_count--; + OBD_FREE_PTR(node); } /** @@ -154,24 +119,29 @@ static void fld_fix_new_list(struct fld_cache *cache) struct fld_cache_entry *f_next; struct lu_seq_range *c_range; struct lu_seq_range *n_range; - cfs_list_t *head = &cache->fci_entries_head; + struct list_head *head = &cache->fci_entries_head; ENTRY; restart_fixup: - cfs_list_for_each_entry_safe(f_curr, f_next, head, fce_list) { - c_range = &f_curr->fce_range; - n_range = &f_next->fce_range; + list_for_each_entry_safe(f_curr, f_next, head, fce_list) { + c_range = &f_curr->fce_range; + n_range = &f_next->fce_range; - LASSERT(range_is_sane(c_range)); - if (&f_next->fce_list == head) - break; + LASSERT(lu_seq_range_is_sane(c_range)); + if (&f_next->fce_list == head) + break; - LASSERT(c_range->lsr_start <= n_range->lsr_start); + if (c_range->lsr_flags != n_range->lsr_flags) + continue; + + LASSERTF(c_range->lsr_start <= n_range->lsr_start, + "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n", + PRANGE(c_range), PRANGE(n_range)); /* check merge possibility with next range */ if (c_range->lsr_end == n_range->lsr_start) { - if (c_range->lsr_mdt != n_range->lsr_mdt) + if (c_range->lsr_index != n_range->lsr_index) continue; n_range->lsr_start = c_range->lsr_start; fld_cache_entry_delete(cache, f_curr); @@ -180,12 +150,10 @@ restart_fixup: /* check if current range overlaps with next range. */ if (n_range->lsr_start < c_range->lsr_end) { - - if (c_range->lsr_mdt == n_range->lsr_mdt) { + if (c_range->lsr_index == n_range->lsr_index) { n_range->lsr_start = c_range->lsr_start; n_range->lsr_end = max(c_range->lsr_end, n_range->lsr_end); - fld_cache_entry_delete(cache, f_curr); } else { if (n_range->lsr_end <= c_range->lsr_end) { @@ -201,9 +169,9 @@ restart_fixup: } /* kill duplicates */ - if (c_range->lsr_start == n_range->lsr_start && - c_range->lsr_end == n_range->lsr_end) - fld_cache_entry_delete(cache, f_curr); + if (c_range->lsr_start == n_range->lsr_start && + c_range->lsr_end == n_range->lsr_end) + fld_cache_entry_delete(cache, f_curr); } EXIT; @@ -214,13 +182,13 @@ restart_fixup: */ static inline void fld_cache_entry_add(struct fld_cache *cache, struct fld_cache_entry *f_new, - cfs_list_t *pos) + struct list_head *pos) { - cfs_list_add(&f_new->fce_list, pos); - cfs_list_add(&f_new->fce_lru, &cache->fci_lru); + list_add(&f_new->fce_list, pos); + list_add(&f_new->fce_lru, &cache->fci_lru); - cache->fci_cache_count++; - fld_fix_new_list(cache); + cache->fci_cache_count++; + fld_fix_new_list(cache); } /** @@ -230,7 +198,7 @@ static inline void fld_cache_entry_add(struct fld_cache *cache, static int fld_cache_shrink(struct fld_cache *cache) { struct fld_cache_entry *flde; - cfs_list_t *curr; + struct list_head *curr; int num = 0; ENTRY; @@ -244,7 +212,7 @@ static int fld_cache_shrink(struct fld_cache *cache) while (cache->fci_cache_count + cache->fci_threshold > cache->fci_cache_size && curr != &cache->fci_lru) { - flde = cfs_list_entry(curr, struct fld_cache_entry, fce_lru); + flde = list_entry(curr, struct fld_cache_entry, fce_lru); curr = curr->prev; fld_cache_entry_delete(cache, flde); num++; @@ -261,14 +229,14 @@ static int fld_cache_shrink(struct fld_cache *cache) */ void fld_cache_flush(struct fld_cache *cache) { - ENTRY; + ENTRY; - cfs_spin_lock(&cache->fci_lock); - cache->fci_cache_size = 0; - fld_cache_shrink(cache); - cfs_spin_unlock(&cache->fci_lock); + write_lock(&cache->fci_lock); + cache->fci_cache_size = 0; + fld_cache_shrink(cache); + write_unlock(&cache->fci_lock); - EXIT; + EXIT; } /** @@ -276,17 +244,17 @@ void fld_cache_flush(struct fld_cache *cache) * entry accordingly. */ -void fld_cache_punch_hole(struct fld_cache *cache, - struct fld_cache_entry *f_curr, - struct fld_cache_entry *f_new) +static void fld_cache_punch_hole(struct fld_cache *cache, + struct fld_cache_entry *f_curr, + struct fld_cache_entry *f_new) { const struct lu_seq_range *range = &f_new->fce_range; - const seqno_t new_start = range->lsr_start; - const seqno_t new_end = range->lsr_end; + const u64 new_start = range->lsr_start; + const u64 new_end = range->lsr_end; struct fld_cache_entry *fldt; ENTRY; - OBD_ALLOC_GFP(fldt, sizeof *fldt, CFS_ALLOC_ATOMIC); + OBD_ALLOC_GFP(fldt, sizeof *fldt, GFP_ATOMIC); if (!fldt) { OBD_FREE_PTR(f_new); EXIT; @@ -302,7 +270,7 @@ void fld_cache_punch_hole(struct fld_cache *cache, /* fldt */ fldt->fce_range.lsr_start = new_end; fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end; - fldt->fce_range.lsr_mdt = f_curr->fce_range.lsr_mdt; + fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index; /* f_curr */ f_curr->fce_range.lsr_end = new_start; @@ -318,19 +286,19 @@ void fld_cache_punch_hole(struct fld_cache *cache, /** * handle range overlap in fld cache. */ -void fld_cache_overlap_handle(struct fld_cache *cache, - struct fld_cache_entry *f_curr, - struct fld_cache_entry *f_new) +static void fld_cache_overlap_handle(struct fld_cache *cache, + struct fld_cache_entry *f_curr, + struct fld_cache_entry *f_new) { - const struct lu_seq_range *range = &f_new->fce_range; - const seqno_t new_start = range->lsr_start; - const seqno_t new_end = range->lsr_end; - const mdsno_t mdt = range->lsr_mdt; + const struct lu_seq_range *range = &f_new->fce_range; + const u64 new_start = range->lsr_start; + const u64 new_end = range->lsr_end; + const u32 mdt = range->lsr_index; /* this is overlap case, these case are checking overlapping with * prev range only. fixup will handle overlaping with next range. */ - if (f_curr->fce_range.lsr_mdt == mdt) { + if (f_curr->fce_range.lsr_index == mdt) { f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start, new_start); @@ -379,97 +347,195 @@ void fld_cache_overlap_handle(struct fld_cache *cache, PRANGE(range),PRANGE(&f_curr->fce_range)); } +struct fld_cache_entry +*fld_cache_entry_create(const struct lu_seq_range *range) +{ + struct fld_cache_entry *f_new; + + LASSERT(lu_seq_range_is_sane(range)); + + OBD_ALLOC_PTR(f_new); + if (!f_new) + RETURN(ERR_PTR(-ENOMEM)); + + f_new->fce_range = *range; + RETURN(f_new); +} + /** * Insert FLD entry in FLD cache. * * This function handles all cases of merging and breaking up of * ranges. */ -void fld_cache_insert(struct fld_cache *cache, - const struct lu_seq_range *range) +int fld_cache_insert_nolock(struct fld_cache *cache, + struct fld_cache_entry *f_new) { - struct fld_cache_entry *f_new; - struct fld_cache_entry *f_curr; - struct fld_cache_entry *n; - cfs_list_t *head; - cfs_list_t *prev = NULL; - const seqno_t new_start = range->lsr_start; - const seqno_t new_end = range->lsr_end; - ENTRY; + struct fld_cache_entry *f_curr; + struct fld_cache_entry *n; + struct list_head *head; + struct list_head *prev = NULL; + const u64 new_start = f_new->fce_range.lsr_start; + const u64 new_end = f_new->fce_range.lsr_end; + __u32 new_flags = f_new->fce_range.lsr_flags; + ENTRY; + + /* + * Duplicate entries are eliminated in insert op. + * So we don't need to search new entry before starting + * insertion loop. + */ + + if (!cache->fci_no_shrink) + fld_cache_shrink(cache); + + head = &cache->fci_entries_head; + + list_for_each_entry_safe(f_curr, n, head, fce_list) { + /* add list if next is end of list */ + if (new_end < f_curr->fce_range.lsr_start || + (new_end == f_curr->fce_range.lsr_start && + new_flags != f_curr->fce_range.lsr_flags)) + break; + + prev = &f_curr->fce_list; + /* check if this range is to left of new range. */ + if (new_start < f_curr->fce_range.lsr_end && + new_flags == f_curr->fce_range.lsr_flags) { + fld_cache_overlap_handle(cache, f_curr, f_new); + goto out; + } + } + + if (prev == NULL) + prev = head; + + CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range)); + /* Add new entry to cache and lru list. */ + fld_cache_entry_add(cache, f_new, prev); +out: + RETURN(0); +} - LASSERT(range_is_sane(range)); +int fld_cache_insert(struct fld_cache *cache, + const struct lu_seq_range *range) +{ + struct fld_cache_entry *flde; + int rc; - /* Allocate new entry. */ - OBD_ALLOC_PTR(f_new); - if (!f_new) { - EXIT; - return; - } + flde = fld_cache_entry_create(range); + if (IS_ERR(flde)) + RETURN(PTR_ERR(flde)); - f_new->fce_range = *range; + write_lock(&cache->fci_lock); + rc = fld_cache_insert_nolock(cache, flde); + write_unlock(&cache->fci_lock); + if (rc) + OBD_FREE_PTR(flde); - /* - * Duplicate entries are eliminated in inset op. - * So we don't need to search new entry before starting insertion loop. - */ + RETURN(rc); +} - cfs_spin_lock(&cache->fci_lock); - fld_cache_shrink(cache); +void fld_cache_delete_nolock(struct fld_cache *cache, + const struct lu_seq_range *range) +{ + struct fld_cache_entry *flde; + struct fld_cache_entry *tmp; + struct list_head *head; + + head = &cache->fci_entries_head; + list_for_each_entry_safe(flde, tmp, head, fce_list) { + /* add list if next is end of list */ + if (range->lsr_start == flde->fce_range.lsr_start || + (range->lsr_end == flde->fce_range.lsr_end && + range->lsr_flags == flde->fce_range.lsr_flags)) { + fld_cache_entry_delete(cache, flde); + break; + } + } +} - head = &cache->fci_entries_head; +/** + * Delete FLD entry in FLD cache. + * + */ +void fld_cache_delete(struct fld_cache *cache, + const struct lu_seq_range *range) +{ + write_lock(&cache->fci_lock); + fld_cache_delete_nolock(cache, range); + write_unlock(&cache->fci_lock); +} - cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) { - /* add list if next is end of list */ - if (new_end < f_curr->fce_range.lsr_start) - break; +struct fld_cache_entry * +fld_cache_entry_lookup_nolock(struct fld_cache *cache, + const struct lu_seq_range *range) +{ + struct fld_cache_entry *flde; + struct fld_cache_entry *got = NULL; + struct list_head *head; + + head = &cache->fci_entries_head; + list_for_each_entry(flde, head, fce_list) { + if (range->lsr_start == flde->fce_range.lsr_start || + (range->lsr_end == flde->fce_range.lsr_end && + range->lsr_flags == flde->fce_range.lsr_flags)) { + got = flde; + break; + } + } + + RETURN(got); +} - prev = &f_curr->fce_list; - /* check if this range is to left of new range. */ - if (new_start < f_curr->fce_range.lsr_end) { - fld_cache_overlap_handle(cache, f_curr, f_new); - goto out; - } - } +/** + * lookup \a seq sequence for range in fld cache. + */ +struct fld_cache_entry * +fld_cache_entry_lookup(struct fld_cache *cache, + const struct lu_seq_range *range) +{ + struct fld_cache_entry *got = NULL; + ENTRY; - if (prev == NULL) - prev = head; + read_lock(&cache->fci_lock); + got = fld_cache_entry_lookup_nolock(cache, range); + read_unlock(&cache->fci_lock); - /* Add new entry to cache and lru list. */ - fld_cache_entry_add(cache, f_new, prev); -out: - cfs_spin_unlock(&cache->fci_lock); - EXIT; + RETURN(got); } /** * lookup \a seq sequence for range in fld cache. */ int fld_cache_lookup(struct fld_cache *cache, - const seqno_t seq, struct lu_seq_range *range) + const u64 seq, struct lu_seq_range *range) { - struct fld_cache_entry *flde; - cfs_list_t *head; - ENTRY; - - - cfs_spin_lock(&cache->fci_lock); - head = &cache->fci_entries_head; - - cache->fci_stat.fst_count++; - cfs_list_for_each_entry(flde, head, fce_list) { - if (flde->fce_range.lsr_start > seq) - break; - - if (range_within(&flde->fce_range, seq)) { - *range = flde->fce_range; - - /* update position of this entry in lru list. */ - cfs_list_move(&flde->fce_lru, &cache->fci_lru); - cache->fci_stat.fst_cache++; - cfs_spin_unlock(&cache->fci_lock); - RETURN(0); - } - } - cfs_spin_unlock(&cache->fci_lock); - RETURN(-ENOENT); + struct fld_cache_entry *flde; + struct fld_cache_entry *prev = NULL; + struct list_head *head; + ENTRY; + + read_lock(&cache->fci_lock); + head = &cache->fci_entries_head; + + cache->fci_stat.fst_count++; + list_for_each_entry(flde, head, fce_list) { + if (flde->fce_range.lsr_start > seq) { + if (prev != NULL) + *range = prev->fce_range; + break; + } + + prev = flde; + if (lu_seq_range_within(&flde->fce_range, seq)) { + *range = flde->fce_range; + + cache->fci_stat.fst_cache++; + read_unlock(&cache->fci_lock); + RETURN(0); + } + } + read_unlock(&cache->fci_lock); + RETURN(-ENOENT); }