* Author: Yury Umanets <umka@clusterfs.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_FLD
#ifdef __KERNEL__
CFS_INIT_LIST_HEAD(&cache->fci_lru);
cache->fci_cache_count = 0;
- cfs_spin_lock_init(&cache->fci_lock);
+ spin_lock_init(&cache->fci_lock);
strncpy(cache->fci_name, name,
sizeof(cache->fci_name));
if (&f_next->fce_list == head)
break;
- LASSERT(c_range->lsr_start <= n_range->lsr_start);
+ if (c_range->lsr_flags != n_range->lsr_flags)
+ continue;
+
+ LASSERTF(c_range->lsr_start <= n_range->lsr_start,
+ "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
+ PRANGE(c_range), PRANGE(n_range));
/* check merge possibility with next range */
if (c_range->lsr_end == n_range->lsr_start) {
/* check if current range overlaps with next range. */
if (n_range->lsr_start < c_range->lsr_end) {
-
if (c_range->lsr_index == n_range->lsr_index) {
n_range->lsr_start = c_range->lsr_start;
n_range->lsr_end = max(c_range->lsr_end,
n_range->lsr_end);
-
fld_cache_entry_delete(cache, f_curr);
} else {
if (n_range->lsr_end <= c_range->lsr_end) {
}
/* kill duplicates */
- if (c_range->lsr_start == n_range->lsr_start &&
- c_range->lsr_end == n_range->lsr_end)
- fld_cache_entry_delete(cache, f_curr);
+ if (c_range->lsr_start == n_range->lsr_start &&
+ c_range->lsr_end == n_range->lsr_end)
+ fld_cache_entry_delete(cache, f_curr);
}
EXIT;
*/
void fld_cache_flush(struct fld_cache *cache)
{
- ENTRY;
+ ENTRY;
- cfs_spin_lock(&cache->fci_lock);
- cache->fci_cache_size = 0;
- fld_cache_shrink(cache);
- cfs_spin_unlock(&cache->fci_lock);
+ spin_lock(&cache->fci_lock);
+ cache->fci_cache_size = 0;
+ fld_cache_shrink(cache);
+ spin_unlock(&cache->fci_lock);
- EXIT;
+ EXIT;
}
/**
cfs_list_t *prev = NULL;
const seqno_t new_start = range->lsr_start;
const seqno_t new_end = range->lsr_end;
+ __u32 new_flags = range->lsr_flags;
ENTRY;
LASSERT(range_is_sane(range));
* So we don't need to search new entry before starting insertion loop.
*/
- cfs_spin_lock(&cache->fci_lock);
+ spin_lock(&cache->fci_lock);
fld_cache_shrink(cache);
head = &cache->fci_entries_head;
cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
/* add list if next is end of list */
- if (new_end < f_curr->fce_range.lsr_start)
+ if (new_end < f_curr->fce_range.lsr_start ||
+ (new_end == f_curr->fce_range.lsr_start &&
+ new_flags != f_curr->fce_range.lsr_flags))
break;
prev = &f_curr->fce_list;
/* check if this range is to left of new range. */
- if (new_start < f_curr->fce_range.lsr_end) {
+ if (new_start < f_curr->fce_range.lsr_end &&
+ new_flags == f_curr->fce_range.lsr_flags) {
fld_cache_overlap_handle(cache, f_curr, f_new);
goto out;
}
if (prev == NULL)
prev = head;
+ CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
/* Add new entry to cache and lru list. */
fld_cache_entry_add(cache, f_new, prev);
out:
- cfs_spin_unlock(&cache->fci_lock);
- EXIT;
+ spin_unlock(&cache->fci_lock);
+ EXIT;
}
/**
* lookup \a seq sequence for range in fld cache.
*/
int fld_cache_lookup(struct fld_cache *cache,
- const seqno_t seq, struct lu_seq_range *range)
+ const seqno_t seq, struct lu_seq_range *range)
{
- struct fld_cache_entry *flde;
- cfs_list_t *head;
- ENTRY;
+ struct fld_cache_entry *flde;
+ cfs_list_t *head;
+ ENTRY;
-
- cfs_spin_lock(&cache->fci_lock);
+ spin_lock(&cache->fci_lock);
head = &cache->fci_entries_head;
cache->fci_stat.fst_count++;
/* update position of this entry in lru list. */
cfs_list_move(&flde->fce_lru, &cache->fci_lru);
cache->fci_stat.fst_cache++;
- cfs_spin_unlock(&cache->fci_lock);
- RETURN(0);
- }
- }
- cfs_spin_unlock(&cache->fci_lock);
- RETURN(-ENOENT);
+ spin_unlock(&cache->fci_lock);
+ RETURN(0);
+ }
+ }
+ spin_unlock(&cache->fci_lock);
+ RETURN(-ENOENT);
}