struct ll_sa_entry {
/* link into sai->sai_entries */
- cfs_list_t se_link;
+ struct list_head se_link;
/* link into sai->sai_entries_{received,stated} */
- cfs_list_t se_list;
+ struct list_head se_list;
/* link into sai hash table locally */
- cfs_list_t se_hash;
+ struct list_head se_hash;
/* entry reference count */
- atomic_t se_refcount;
+ atomic_t se_refcount;
/* entry index in the sai */
__u64 se_index;
/* low layer ldlm lock handle */
static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
{
- return cfs_list_empty(&entry->se_hash);
+ return list_empty(&entry->se_hash);
}
/*
int i = ll_sa_entry_hash(entry->se_qstr.hash);
spin_lock(&sai->sai_cache_lock[i]);
- cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
+ list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
spin_unlock(&sai->sai_cache_lock[i]);
}
int i = ll_sa_entry_hash(entry->se_qstr.hash);
spin_lock(&sai->sai_cache_lock[i]);
- cfs_list_del_init(&entry->se_hash);
+ list_del_init(&entry->se_hash);
spin_unlock(&sai->sai_cache_lock[i]);
}
static inline struct ll_sa_entry *
sa_first_received_entry(struct ll_statahead_info *sai)
{
- return cfs_list_entry(sai->sai_entries_received.next,
- struct ll_sa_entry, se_list);
+ return list_entry(sai->sai_entries_received.next,
+ struct ll_sa_entry, se_list);
}
static inline struct ll_inode_info *
agl_first_entry(struct ll_statahead_info *sai)
{
- return cfs_list_entry(sai->sai_entries_agl.next,
- struct ll_inode_info, lli_agl_list);
+ return list_entry(sai->sai_entries_agl.next,
+ struct ll_inode_info, lli_agl_list);
}
static inline int sa_sent_full(struct ll_statahead_info *sai)
static inline int sa_received_empty(struct ll_statahead_info *sai)
{
- return cfs_list_empty(&sai->sai_entries_received);
+ return list_empty(&sai->sai_entries_received);
}
static inline int agl_list_empty(struct ll_statahead_info *sai)
{
- return cfs_list_empty(&sai->sai_entries_agl);
+ return list_empty(&sai->sai_entries_agl);
}
/**
lli = ll_i2info(sai->sai_inode);
spin_lock(&lli->lli_sa_lock);
- cfs_list_add_tail(&entry->se_link, &sai->sai_entries);
- CFS_INIT_LIST_HEAD(&entry->se_list);
+ list_add_tail(&entry->se_link, &sai->sai_entries);
+ INIT_LIST_HEAD(&entry->se_list);
ll_sa_entry_enhash(sai, entry);
spin_unlock(&lli->lli_sa_lock);
struct ll_sa_entry *entry;
int i = ll_sa_entry_hash(qstr->hash);
- cfs_list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
+ list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
if (entry->se_qstr.hash == qstr->hash &&
entry->se_qstr.len == qstr->len &&
memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
{
struct ll_sa_entry *entry;
- cfs_list_for_each_entry(entry, &sai->sai_entries, se_link) {
+ list_for_each_entry(entry, &sai->sai_entries, se_link) {
if (entry->se_index == index) {
LASSERT(atomic_read(&entry->se_refcount) > 0);
atomic_inc(&entry->se_refcount);
entry->se_qstr.len, entry->se_qstr.name, entry,
entry->se_index);
- LASSERT(cfs_list_empty(&entry->se_link));
- LASSERT(cfs_list_empty(&entry->se_list));
+ LASSERT(list_empty(&entry->se_link));
+ LASSERT(list_empty(&entry->se_list));
LASSERT(ll_sa_entry_unhashed(entry));
ll_sa_entry_cleanup(sai, entry);
struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
LASSERT(!ll_sa_entry_unhashed(entry));
- LASSERT(!cfs_list_empty(&entry->se_link));
+ LASSERT(!list_empty(&entry->se_link));
ll_sa_entry_unhash(sai, entry);
spin_lock(&lli->lli_sa_lock);
entry->se_stat = SA_ENTRY_DEST;
- cfs_list_del_init(&entry->se_link);
- if (likely(!cfs_list_empty(&entry->se_list)))
- cfs_list_del_init(&entry->se_list);
+ list_del_init(&entry->se_link);
+ if (likely(!list_empty(&entry->se_list)))
+ list_del_init(&entry->se_list);
spin_unlock(&lli->lli_sa_lock);
ll_sa_entry_put(sai, entry);
do_sa_entry_fini(sai, entry);
/* drop old entry, only 'scanner' process does this, no need to lock */
- cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
+ list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
if (!is_omitted_entry(sai, pos->se_index))
break;
do_sa_entry_fini(sai, pos);
struct ll_sa_entry *entry, se_stat_t stat)
{
struct ll_sa_entry *se;
- cfs_list_t *pos = &sai->sai_entries_stated;
+ struct list_head *pos = &sai->sai_entries_stated;
- if (!cfs_list_empty(&entry->se_list))
- cfs_list_del_init(&entry->se_list);
+ if (!list_empty(&entry->se_list))
+ list_del_init(&entry->se_list);
- cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
+ list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
if (se->se_index < entry->se_index) {
pos = &se->se_list;
break;
}
}
- cfs_list_add(&entry->se_list, pos);
+ list_add(&entry->se_list, pos);
entry->se_stat = stat;
}
child->lli_agl_index = index;
spin_unlock(&child->lli_agl_lock);
- LASSERT(cfs_list_empty(&child->lli_agl_list));
+ LASSERT(list_empty(&child->lli_agl_list));
igrab(inode);
spin_lock(&parent->lli_agl_lock);
if (agl_list_empty(sai))
added = 1;
- cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
+ list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
spin_unlock(&parent->lli_agl_lock);
} else {
spin_unlock(&child->lli_agl_lock);
init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
- CFS_INIT_LIST_HEAD(&sai->sai_entries);
- CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
- CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
- CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
+ INIT_LIST_HEAD(&sai->sai_entries);
+ INIT_LIST_HEAD(&sai->sai_entries_received);
+ INIT_LIST_HEAD(&sai->sai_entries_stated);
+ INIT_LIST_HEAD(&sai->sai_entries_agl);
for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
- CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
+ INIT_LIST_HEAD(&sai->sai_cache[i]);
spin_lock_init(&sai->sai_cache_lock[i]);
}
atomic_set(&sai->sai_cache_count, 0);
PFID(&lli->lli_fid),
sai->sai_sent, sai->sai_replied);
- cfs_list_for_each_entry_safe(entry, next,
- &sai->sai_entries, se_link)
+ list_for_each_entry_safe(entry, next,
+ &sai->sai_entries, se_link)
do_sa_entry_fini(sai, entry);
LASSERT(list_empty(&sai->sai_entries));
int rc;
ENTRY;
- LASSERT(cfs_list_empty(&lli->lli_agl_list));
+ LASSERT(list_empty(&lli->lli_agl_list));
/* AGL maybe fall behind statahead with one entry */
if (is_omitted_entry(sai, index + 1)) {
}
entry = sa_first_received_entry(sai);
atomic_inc(&entry->se_refcount);
- cfs_list_del_init(&entry->se_list);
+ list_del_init(&entry->se_list);
spin_unlock(&lli->lli_sa_lock);
LASSERT(entry->se_handle != 0);
entry->se_handle = it->d.lustre.it_lock_handle;
ll_intent_drop_lock(it);
wakeup = sa_received_empty(sai);
- cfs_list_add_tail(&entry->se_list,
+ list_add_tail(&entry->se_list,
&sai->sai_entries_received);
}
sai->sai_replied++;
* so check whether list empty again. */
if (!agl_list_empty(sai)) {
clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
+ list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
ll_agl_trigger(&clli->lli_vfs_inode, sai);
} else {
sai->sai_agl_valid = 0;
while (!agl_list_empty(sai)) {
clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
+ list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
clli->lli_agl_index = 0;
iput(&clli->lli_vfs_inode);
spin_lock(&plli->lli_agl_lock);
while (!agl_list_empty(sai)) {
clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
+ list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
ll_agl_trigger(&clli->lli_vfs_inode,
sai);
while (!agl_list_empty(sai) &&
thread_is_running(thread)) {
clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
+ list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
ll_agl_trigger(&clli->lli_vfs_inode, sai);
spin_lock(&plli->lli_agl_lock);
if (sai) {
thread = &sai->sai_thread;
if (unlikely(thread_is_stopped(thread) &&
- cfs_list_empty(&sai->sai_entries_stated))) {
+ list_empty(&sai->sai_entries_stated))) {
/* to release resource */
ll_stop_statahead(dir, lli->lli_opendir_key);
RETURN(-EAGAIN);