* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
CFS_INIT_LIST_HEAD(&cache->fci_lru);
cache->fci_cache_count = 0;
- spin_lock_init(&cache->fci_lock);
+ cfs_spin_lock_init(&cache->fci_lock);
strncpy(cache->fci_name, name,
sizeof(cache->fci_name));
static inline void fld_cache_entry_delete(struct fld_cache *cache,
struct fld_cache_entry *node)
{
- list_del(&node->fce_list);
- list_del(&node->fce_lru);
+ cfs_list_del(&node->fce_list);
+ cfs_list_del(&node->fce_lru);
cache->fci_cache_count--;
OBD_FREE_PTR(node);
}
struct fld_cache_entry *f_next;
struct lu_seq_range *c_range;
struct lu_seq_range *n_range;
- struct list_head *head = &cache->fci_entries_head;
+ cfs_list_t *head = &cache->fci_entries_head;
ENTRY;
restart_fixup:
- list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
+ cfs_list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
c_range = &f_curr->fce_range;
n_range = &f_next->fce_range;
/* check merge possibility with next range */
if (c_range->lsr_end == n_range->lsr_start) {
- if (c_range->lsr_mdt != n_range->lsr_mdt)
+ if (c_range->lsr_index != n_range->lsr_index)
continue;
n_range->lsr_start = c_range->lsr_start;
fld_cache_entry_delete(cache, f_curr);
/* check if current range overlaps with next range. */
if (n_range->lsr_start < c_range->lsr_end) {
- if (c_range->lsr_mdt == n_range->lsr_mdt) {
+ if (c_range->lsr_index == n_range->lsr_index) {
n_range->lsr_start = c_range->lsr_start;
n_range->lsr_end = max(c_range->lsr_end,
n_range->lsr_end);
*/
static inline void fld_cache_entry_add(struct fld_cache *cache,
struct fld_cache_entry *f_new,
- struct list_head *pos)
+ cfs_list_t *pos)
{
- list_add(&f_new->fce_list, pos);
- list_add(&f_new->fce_lru, &cache->fci_lru);
+ cfs_list_add(&f_new->fce_list, pos);
+ cfs_list_add(&f_new->fce_lru, &cache->fci_lru);
cache->fci_cache_count++;
fld_fix_new_list(cache);
static int fld_cache_shrink(struct fld_cache *cache)
{
struct fld_cache_entry *flde;
- struct list_head *curr;
+ cfs_list_t *curr;
int num = 0;
ENTRY;
while (cache->fci_cache_count + cache->fci_threshold >
cache->fci_cache_size && curr != &cache->fci_lru) {
- flde = list_entry(curr, struct fld_cache_entry, fce_lru);
+ flde = cfs_list_entry(curr, struct fld_cache_entry, fce_lru);
curr = curr->prev;
fld_cache_entry_delete(cache, flde);
num++;
{
ENTRY;
- spin_lock(&cache->fci_lock);
+ cfs_spin_lock(&cache->fci_lock);
cache->fci_cache_size = 0;
fld_cache_shrink(cache);
- spin_unlock(&cache->fci_lock);
+ cfs_spin_unlock(&cache->fci_lock);
EXIT;
}
/* fldt */
fldt->fce_range.lsr_start = new_end;
fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
- fldt->fce_range.lsr_mdt = f_curr->fce_range.lsr_mdt;
+ fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
/* f_curr */
f_curr->fce_range.lsr_end = new_start;
const struct lu_seq_range *range = &f_new->fce_range;
const seqno_t new_start = range->lsr_start;
const seqno_t new_end = range->lsr_end;
- const mdsno_t mdt = range->lsr_mdt;
+ const mdsno_t mdt = range->lsr_index;
/* this is overlap case, these case are checking overlapping with
* prev range only. fixup will handle overlaping with next range. */
- if (f_curr->fce_range.lsr_mdt == mdt) {
+ if (f_curr->fce_range.lsr_index == mdt) {
f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
new_start);
struct fld_cache_entry *f_new;
struct fld_cache_entry *f_curr;
struct fld_cache_entry *n;
- struct list_head *head;
- struct list_head *prev = NULL;
+ cfs_list_t *head;
+ cfs_list_t *prev = NULL;
const seqno_t new_start = range->lsr_start;
const seqno_t new_end = range->lsr_end;
ENTRY;
* So we don't need to search new entry before starting insertion loop.
*/
- spin_lock(&cache->fci_lock);
+ cfs_spin_lock(&cache->fci_lock);
fld_cache_shrink(cache);
head = &cache->fci_entries_head;
- list_for_each_entry_safe(f_curr, n, head, fce_list) {
+ cfs_list_for_each_entry_safe(f_curr, n, head, fce_list) {
/* add list if next is end of list */
if (new_end < f_curr->fce_range.lsr_start)
break;
/* Add new entry to cache and lru list. */
fld_cache_entry_add(cache, f_new, prev);
out:
- spin_unlock(&cache->fci_lock);
+ cfs_spin_unlock(&cache->fci_lock);
EXIT;
}
const seqno_t seq, struct lu_seq_range *range)
{
struct fld_cache_entry *flde;
- struct list_head *head;
+ cfs_list_t *head;
ENTRY;
- spin_lock(&cache->fci_lock);
+ cfs_spin_lock(&cache->fci_lock);
head = &cache->fci_entries_head;
cache->fci_stat.fst_count++;
- list_for_each_entry(flde, head, fce_list) {
+ cfs_list_for_each_entry(flde, head, fce_list) {
if (flde->fce_range.lsr_start > seq)
break;
*range = flde->fce_range;
/* update position of this entry in lru list. */
- list_move(&flde->fce_lru, &cache->fci_lru);
+ cfs_list_move(&flde->fce_lru, &cache->fci_lru);
cache->fci_stat.fst_cache++;
- spin_unlock(&cache->fci_lock);
+ cfs_spin_unlock(&cache->fci_lock);
RETURN(0);
}
}
- spin_unlock(&cache->fci_lock);
+ cfs_spin_unlock(&cache->fci_lock);
RETURN(-ENOENT);
}