/* dynamic ioctl number support routins */
static struct llioc_ctl_data {
struct rw_semaphore ioc_sem;
- cfs_list_t ioc_head;
+ struct list_head ioc_head;
} llioc = {
- __RWSEM_INITIALIZER(llioc.ioc_sem),
- CFS_LIST_HEAD_INIT(llioc.ioc_head)
+ __RWSEM_INITIALIZER(llioc.ioc_sem),
+ LIST_HEAD_INIT(llioc.ioc_head)
};
struct llioc_data {
- cfs_list_t iocd_list;
+ struct list_head iocd_list;
unsigned int iocd_size;
llioc_callback_t iocd_cb;
unsigned int iocd_count;
memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
down_write(&llioc.ioc_sem);
- cfs_list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
+ list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
up_write(&llioc.ioc_sem);
RETURN(in_data);
return;
down_write(&llioc.ioc_sem);
- cfs_list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
+ list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
if (tmp == magic) {
unsigned int size = tmp->iocd_size;
- cfs_list_del(&tmp->iocd_list);
+ list_del(&tmp->iocd_list);
up_write(&llioc.ioc_sem);
OBD_FREE(tmp, size);
int rc = -EINVAL, i;
down_read(&llioc.ioc_sem);
- cfs_list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
+ list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
for (i = 0; i < data->iocd_count; i++) {
if (cmd != data->iocd_cmd[i])
continue;
*/
/* capas for oss writeback and those failed to renew */
-static CFS_LIST_HEAD(ll_idle_capas);
+static struct list_head ll_idle_capas = LIST_HEAD_INIT(ll_idle_capas);
static struct ptlrpc_thread ll_capa_thread;
-static cfs_list_t *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
+static struct list_head *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
/* llite capa renewal timer */
struct timer_list ll_capa_timer;
* expired capa, return 1.
*/
spin_lock(&capa_lock);
- if (!cfs_list_empty(ll_capa_list)) {
- ocapa = cfs_list_entry(ll_capa_list->next, struct obd_capa,
+ if (!list_empty(ll_capa_list)) {
+ ocapa = list_entry(ll_capa_list->next, struct obd_capa,
c_list);
expired = capa_is_to_expire(ocapa);
if (!expired)
update_capa_timer(ocapa, capa_renewal_time(ocapa));
- } else if (!cfs_list_empty(&ll_idle_capas)) {
- ocapa = cfs_list_entry(ll_idle_capas.next, struct obd_capa,
+ } else if (!list_empty(&ll_idle_capas)) {
+ ocapa = list_entry(ll_idle_capas.next, struct obd_capa,
c_list);
expired = capa_is_expired(ocapa);
if (!expired)
return expired;
}
-static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
+static void sort_add_capa(struct obd_capa *ocapa, struct list_head *head)
{
struct obd_capa *tmp;
- cfs_list_t *before = NULL;
+ struct list_head *before = NULL;
/* TODO: client capa is sorted by expiry, this could be optimized */
- cfs_list_for_each_entry_reverse(tmp, head, c_list) {
+ list_for_each_entry_reverse(tmp, head, c_list) {
if (cfs_time_aftereq(ocapa->c_expiry, tmp->c_expiry)) {
before = &tmp->c_list;
break;
}
LASSERT(&ocapa->c_list != before);
- cfs_list_add(&ocapa->c_list, before ?: head);
+ list_add(&ocapa->c_list, before ?: head);
}
static inline int obd_capa_open_count(struct obd_capa *oc)
LASSERT(lli->lli_mds_capa == ocapa);
lli->lli_mds_capa = NULL;
} else if (capa_for_oss(&ocapa->c_capa)) {
- cfs_list_del_init(&ocapa->u.cli.lli_list);
+ list_del_init(&ocapa->u.cli.lli_list);
}
DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free client");
- cfs_list_del_init(&ocapa->c_list);
+ list_del_init(&ocapa->c_list);
capa_count[CAPA_SITE_CLIENT]--;
/* release the ref when alloc */
capa_put(ocapa);
next = NULL;
spin_lock(&capa_lock);
- cfs_list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
+ list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
__u64 ibits;
LASSERT(ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC);
break;
}
- cfs_list_del_init(&ocapa->c_list);
+ list_del_init(&ocapa->c_list);
/* for MDS capability, only renew those which belong to
* dir, or its inode is opened, or client holds LOOKUP
if (next)
update_capa_timer(next, capa_renewal_time(next));
- cfs_list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
+ list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
c_list) {
if (!capa_is_expired(ocapa)) {
if (!next)
opc == CAPA_OPC_OSS_TRUNC);
spin_lock(&capa_lock);
- cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+ list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
if (capa_is_expired(ocapa))
continue;
if ((opc & CAPA_OPC_OSS_WRITE) &&
struct obd_capa *ocapa;
/* inside capa_lock */
- cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+ list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
if ((capa_opc(&ocapa->c_capa) & opc) != opc)
continue;
{
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_capa *tmp;
- cfs_list_t *next = NULL;
+ struct list_head *next = NULL;
/* capa is sorted in lli_oss_capas so lookup can always find the
* latest one */
- cfs_list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
+ list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
if (cfs_time_after(ocapa->c_expiry, tmp->c_expiry)) {
next = &tmp->u.cli.lli_list;
break;
}
}
LASSERT(&ocapa->u.cli.lli_list != next);
- cfs_list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
+ list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
}
static struct obd_capa *do_add_oss_capa(struct inode *inode,
old = do_lookup_oss_capa(inode, capa_opc(capa) & CAPA_OPC_OSS_ONLY);
if (!old) {
ocapa->u.cli.inode = inode;
- CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
+ INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
capa_count[CAPA_SITE_CLIENT]++;
DEBUG_CAPA(D_SEC, capa, "add OSS");
}
}
- cfs_list_del_init(&ocapa->c_list);
+ list_del_init(&ocapa->c_list);
sort_add_capa(ocapa, &ll_idle_capas);
spin_unlock(&capa_lock);
DEBUG_CAPA(D_SEC, capa, "renew");
EXIT;
retry:
- cfs_list_del_init(&ocapa->c_list);
+ list_del_init(&ocapa->c_list);
sort_add_capa(ocapa, ll_capa_list);
update_capa_timer(ocapa, capa_renewal_time(ocapa));
spin_unlock(&capa_lock);
if (ocapa)
ll_delete_capa(ocapa);
- cfs_list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
+ list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
u.cli.lli_list)
ll_delete_capa(ocapa);
spin_unlock(&capa_lock);
ENTRY;
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
- cfs_list_add(&page->cpg_pending_linkage,
+ if (page != NULL && list_empty(&page->cpg_pending_linkage))
+ list_add(&page->cpg_pending_linkage,
&club->cob_pending_list);
spin_unlock(&lli->lli_lock);
EXIT;
ENTRY;
spin_lock(&lli->lli_lock);
- if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
- cfs_list_del_init(&page->cpg_pending_linkage);
+ if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
+ list_del_init(&page->cpg_pending_linkage);
rc = 1;
}
spin_unlock(&lli->lli_lock);
lli->lli_flags |= flags;
if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- cfs_list_empty(&club->cob_pending_list)) {
+ list_empty(&club->cob_pending_list)) {
struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
/* DONE_WRITING is allowed and inode has no dirty page. */
spin_lock(&lcq->lcq_lock);
- LASSERT(cfs_list_empty(&lli->lli_close_list));
+ LASSERT(list_empty(&lli->lli_close_list));
CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
PFID(ll_inode2fid(inode)));
- cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
+ list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
/* Avoid a concurrent insertion into the close thread queue:
* an inode is already in the close thread, open(), write(),
ENTRY;
spin_lock(&lli->lli_lock);
- if (!(cfs_list_empty(&club->cob_pending_list))) {
+ if (!(list_empty(&club->cob_pending_list))) {
if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
LASSERT(*och != NULL);
LASSERT(lli->lli_pending_och == NULL);
}
}
- LASSERT(cfs_list_empty(&club->cob_pending_list));
+ LASSERT(list_empty(&club->cob_pending_list));
lli->lli_flags &= ~LLIF_SOM_DIRTY;
spin_unlock(&lli->lli_lock);
ll_done_writing_attr(inode, op_data);
return -ENOMEM;
spin_lock_init(&lcq->lcq_lock);
- CFS_INIT_LIST_HEAD(&lcq->lcq_head);
+ INIT_LIST_HEAD(&lcq->lcq_head);
init_waitqueue_head(&lcq->lcq_waitq);
init_completion(&lcq->lcq_comp);
/* llite setxid/access permission for user on remote client */
struct ll_remote_perm {
- cfs_hlist_node_t lrp_list;
+ struct hlist_node lrp_list;
uid_t lrp_uid;
gid_t lrp_gid;
uid_t lrp_fsuid;
spinlock_t lli_lock;
struct posix_acl *lli_posix_acl;
- cfs_hlist_head_t *lli_remote_perms;
- struct mutex lli_rmtperm_mutex;
+ struct hlist_head *lli_remote_perms;
+ struct mutex lli_rmtperm_mutex;
/* identifying fields for both metadata and data stacks. */
struct lu_fid lli_fid;
struct rw_semaphore f_glimpse_sem;
cfs_time_t f_glimpse_time;
- cfs_list_t f_agl_list;
+ struct list_head f_agl_list;
__u64 f_agl_index;
/* for writepage() only to communicate to fsync */
#define RCE_HASHES 32
struct rmtacl_ctl_entry {
- cfs_list_t rce_list;
+ struct list_head rce_list;
pid_t rce_key; /* hash key */
int rce_ops; /* acl operation type */
};
struct rmtacl_ctl_table {
spinlock_t rct_lock;
- cfs_list_t rct_entries[RCE_HASHES];
+ struct list_head rct_entries[RCE_HASHES];
};
#define EE_HASHES 32
struct eacl_entry {
- cfs_list_t ee_list;
+ struct list_head ee_list;
pid_t ee_key; /* hash key */
struct lu_fid ee_fid;
int ee_type; /* ACL type for ACCESS or DEFAULT */
struct eacl_table {
spinlock_t et_lock;
- cfs_list_t et_entries[EE_HASHES];
+ struct list_head et_entries[EE_HASHES];
};
struct ll_sb_info {
- cfs_list_t ll_list;
+ struct list_head ll_list;
/* this protects pglist and ra_info. It isn't safe to
* grab from interrupt contexts */
spinlock_t ll_lock;
int ll_flags;
unsigned int ll_umounting:1,
ll_xattr_cache_enabled:1;
- cfs_list_t ll_conn_chain; /* per-conn chain of SBs */
+ /* per-conn chain of SBs */
+ struct list_head ll_conn_chain;
struct lustre_client_ocd ll_lco;
- cfs_list_t ll_orphan_dentry_list; /*please don't ask -p*/
+ /*please don't ask -p*/
+ struct list_head ll_orphan_dentry_list;
struct ll_close_queue *ll_lcq;
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
pgoff_t lrr_start;
pgoff_t lrr_count;
struct task_struct *lrr_reader;
- cfs_list_t lrr_linkage;
+ struct list_head lrr_linkage;
};
/*
* progress against this file descriptor. Used by read-ahead code,
* protected by ->ras_lock.
*/
- cfs_list_t ras_read_beads;
+ struct list_head ras_read_beads;
/*
* The following 3 items are used for detecting the stride I/O
* mode.
/* llite/llite_close.c */
struct ll_close_queue {
spinlock_t lcq_lock;
- cfs_list_t lcq_head;
+ struct list_head lcq_head;
wait_queue_head_t lcq_waitq;
struct completion lcq_comp;
atomic_t lcq_stop;
extern struct kmem_cache *ll_remote_perm_cachep;
extern struct kmem_cache *ll_rmtperm_hash_cachep;
-void free_rmtperm_hash(cfs_hlist_head_t *hash);
+void free_rmtperm_hash(struct hlist_head *hash);
int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
int lustre_check_remote_perm(struct inode *inode, int mask);
struct kmem_cache *ll_file_data_slab;
-static LIST_HEAD(ll_super_blocks);
+static struct list_head ll_super_blocks = LIST_HEAD_INIT(ll_super_blocks);
static DEFINE_SPINLOCK(ll_sb_lock);
#ifndef log2
sbi->ll_cache.ccc_lru_max = lru_page_max;
atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
- CFS_INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
+ INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
SBI_DEFAULT_READAHEAD_WHOLE_MAX;
- CFS_INIT_LIST_HEAD(&sbi->ll_conn_chain);
- CFS_INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
+ INIT_LIST_HEAD(&sbi->ll_conn_chain);
+ INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
ll_generate_random_uuid(uuid);
class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
spin_lock(&ll_sb_lock);
- cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
+ list_add_tail(&sbi->ll_list, &ll_super_blocks);
spin_unlock(&ll_sb_lock);
sbi->ll_flags |= LL_SBI_VERBOSE;
if (sbi != NULL) {
spin_lock(&ll_sb_lock);
- cfs_list_del(&sbi->ll_list);
+ list_del(&sbi->ll_list);
spin_unlock(&ll_sb_lock);
OBD_FREE(sbi, sizeof(*sbi));
}
cl_sb_fini(sb);
- cfs_list_del(&sbi->ll_conn_chain);
+ list_del(&sbi->ll_conn_chain);
obd_fid_fini(sbi->ll_dt_exp->exp_obd);
obd_disconnect(sbi->ll_dt_exp);
mutex_init(&lli->lli_write_mutex);
init_rwsem(&lli->lli_glimpse_sem);
lli->lli_glimpse_time = 0;
- CFS_INIT_LIST_HEAD(&lli->lli_agl_list);
+ INIT_LIST_HEAD(&lli->lli_agl_list);
lli->lli_agl_index = 0;
lli->lli_async_rc = 0;
}
if (!rce)
return NULL;
- CFS_INIT_LIST_HEAD(&rce->rce_list);
+ INIT_LIST_HEAD(&rce->rce_list);
rce->rce_key = key;
rce->rce_ops = ops;
static void rce_free(struct rmtacl_ctl_entry *rce)
{
- if (!cfs_list_empty(&rce->rce_list))
- cfs_list_del(&rce->rce_list);
+ if (!list_empty(&rce->rce_list))
+ list_del(&rce->rce_list);
OBD_FREE_PTR(rce);
}
pid_t key)
{
struct rmtacl_ctl_entry *rce;
- cfs_list_t *head = &rct->rct_entries[rce_hashfunc(key)];
+ struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
- cfs_list_for_each_entry(rce, head, rce_list)
+ list_for_each_entry(rce, head, rce_list)
if (rce->rce_key == key)
return rce;
"[key: %d] [ops: %d]\n", (int)key, ops);
rce_free(e);
}
- cfs_list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
+ list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
spin_unlock(&rct->rct_lock);
return 0;
spin_lock_init(&rct->rct_lock);
for (i = 0; i < RCE_HASHES; i++)
- CFS_INIT_LIST_HEAD(&rct->rct_entries[i]);
+ INIT_LIST_HEAD(&rct->rct_entries[i]);
}
void rct_fini(struct rmtacl_ctl_table *rct)
spin_lock(&rct->rct_lock);
for (i = 0; i < RCE_HASHES; i++)
- while (!cfs_list_empty(&rct->rct_entries[i])) {
- rce = cfs_list_entry(rct->rct_entries[i].next,
+ while (!list_empty(&rct->rct_entries[i])) {
+ rce = list_entry(rct->rct_entries[i].next,
struct rmtacl_ctl_entry, rce_list);
rce_free(rce);
}
if (!ee)
return NULL;
- CFS_INIT_LIST_HEAD(&ee->ee_list);
+ INIT_LIST_HEAD(&ee->ee_list);
ee->ee_key = key;
ee->ee_fid = *fid;
ee->ee_type = type;
void ee_free(struct eacl_entry *ee)
{
- if (!cfs_list_empty(&ee->ee_list))
- cfs_list_del(&ee->ee_list);
+ if (!list_empty(&ee->ee_list))
+ list_del(&ee->ee_list);
if (ee->ee_acl)
lustre_ext_acl_xattr_free(ee->ee_acl);
struct lu_fid *fid, int type)
{
struct eacl_entry *ee;
- cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
+ struct list_head *head = &et->et_entries[ee_hashfunc(key)];
LASSERT(fid != NULL);
- cfs_list_for_each_entry(ee, head, ee_list)
+ list_for_each_entry(ee, head, ee_list)
if (ee->ee_key == key) {
if (lu_fid_eq(&ee->ee_fid, fid) &&
ee->ee_type == type) {
- cfs_list_del_init(&ee->ee_list);
+ list_del_init(&ee->ee_list);
return ee;
}
}
void et_search_free(struct eacl_table *et, pid_t key)
{
struct eacl_entry *ee, *next;
- cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
+ struct list_head *head = &et->et_entries[ee_hashfunc(key)];
spin_lock(&et->et_lock);
- cfs_list_for_each_entry_safe(ee, next, head, ee_list)
+ list_for_each_entry_safe(ee, next, head, ee_list)
if (ee->ee_key == key)
ee_free(ee);
(int)key, PFID(fid), type);
ee_free(e);
}
- cfs_list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
+ list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
spin_unlock(&et->et_lock);
return 0;
spin_lock_init(&et->et_lock);
for (i = 0; i < EE_HASHES; i++)
- CFS_INIT_LIST_HEAD(&et->et_entries[i]);
+ INIT_LIST_HEAD(&et->et_entries[i]);
}
void et_fini(struct eacl_table *et)
spin_lock(&et->et_lock);
for (i = 0; i < EE_HASHES; i++)
- while (!cfs_list_empty(&et->et_entries[i])) {
- ee = cfs_list_entry(et->et_entries[i].next,
- struct eacl_entry, ee_list);
+ while (!list_empty(&et->et_entries[i])) {
+ ee = list_entry(et->et_entries[i].next,
+ struct eacl_entry, ee_list);
ee_free(ee);
}
spin_unlock(&et->et_lock);
OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, GFP_KERNEL);
if (lrp)
- CFS_INIT_HLIST_NODE(&lrp->lrp_list);
+ INIT_HLIST_NODE(&lrp->lrp_list);
return lrp;
}
if (!lrp)
return;
- if (!cfs_hlist_unhashed(&lrp->lrp_list))
- cfs_hlist_del(&lrp->lrp_list);
+ if (!hlist_unhashed(&lrp->lrp_list))
+ hlist_del(&lrp->lrp_list);
OBD_SLAB_FREE(lrp, ll_remote_perm_cachep, sizeof(*lrp));
}
static struct hlist_head *alloc_rmtperm_hash(void)
{
- cfs_hlist_head_t *hash;
+ struct hlist_head *hash;
int i;
OBD_SLAB_ALLOC_GFP(hash, ll_rmtperm_hash_cachep,
return NULL;
for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- CFS_INIT_HLIST_HEAD(hash + i);
+ INIT_HLIST_HEAD(hash + i);
return hash;
}
-void free_rmtperm_hash(cfs_hlist_head_t *hash)
+void free_rmtperm_hash(struct hlist_head *hash)
{
int i;
struct ll_remote_perm *lrp;
- cfs_hlist_node_t *node, *next;
+ struct hlist_node *node, *next;
if(!hash)
return;
* MDT when client get remote permission. */
static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
{
- cfs_hlist_head_t *head;
+ struct hlist_head *head;
struct ll_remote_perm *lrp;
- cfs_hlist_node_t *node;
+ struct hlist_node *node;
int found = 0, rc;
ENTRY;
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_remote_perm *lrp = NULL, *tmp = NULL;
- cfs_hlist_head_t *head, *perm_hash = NULL;
- cfs_hlist_node_t *node;
+ struct hlist_head *head, *perm_hash = NULL;
+ struct hlist_node *node;
ENTRY;
LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
lrp->lrp_gid = perm->rp_gid;
lrp->lrp_fsuid = perm->rp_fsuid;
lrp->lrp_fsgid = perm->rp_fsgid;
- cfs_hlist_add_head(&lrp->lrp_list, head);
+ hlist_add_head(&lrp->lrp_list, head);
}
lli->lli_rmtperm_time = cfs_time_current();
spin_unlock(&lli->lli_lock);
ptlrpc_req_finished(req);
RETURN(rc);
}
-
-#if 0 /* NB: remote perms can't be freed in ll_mdc_blocking_ast of UPDATE lock,
- * because it will fail sanity test 48.
- */
-void ll_free_remote_perms(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- cfs_hlist_head_t *hash = lli->lli_remote_perms;
- struct ll_remote_perm *lrp;
- cfs_hlist_node_t *node, *next;
- int i;
-
- LASSERT(hash);
-
- spin_lock(&lli->lli_lock);
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
- cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
- lrp_list)
- free_ll_remote_perm(lrp);
- }
-
- spin_unlock(&lli->lli_lock);
-}
-#endif
ras->ras_consecutive_requests++;
rar->lrr_reader = current;
- cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ list_add(&rar->lrr_linkage, &ras->ras_read_beads);
spin_unlock(&ras->ras_lock);
}
ras = ll_ras_get(f);
spin_lock(&ras->ras_lock);
- cfs_list_del_init(&rar->lrr_linkage);
+ list_del_init(&rar->lrr_linkage);
spin_unlock(&ras->ras_lock);
}
{
struct ll_ra_read *scan;
- cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
if (scan->lrr_reader == current)
return scan;
}
spin_lock_init(&ras->ras_lock);
ras_reset(inode, ras, 0);
ras->ras_requests = 0;
- CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
+ INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
struct ll_sa_entry {
/* link into sai->sai_entries */
- cfs_list_t se_link;
+ struct list_head se_link;
/* link into sai->sai_entries_{received,stated} */
- cfs_list_t se_list;
+ struct list_head se_list;
/* link into sai hash table locally */
- cfs_list_t se_hash;
+ struct list_head se_hash;
/* entry reference count */
- atomic_t se_refcount;
+ atomic_t se_refcount;
/* entry index in the sai */
__u64 se_index;
/* low layer ldlm lock handle */
static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
{
- return cfs_list_empty(&entry->se_hash);
+ return list_empty(&entry->se_hash);
}
/*
int i = ll_sa_entry_hash(entry->se_qstr.hash);
spin_lock(&sai->sai_cache_lock[i]);
- cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
+ list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
spin_unlock(&sai->sai_cache_lock[i]);
}
int i = ll_sa_entry_hash(entry->se_qstr.hash);
spin_lock(&sai->sai_cache_lock[i]);
- cfs_list_del_init(&entry->se_hash);
+ list_del_init(&entry->se_hash);
spin_unlock(&sai->sai_cache_lock[i]);
}
static inline struct ll_sa_entry *
sa_first_received_entry(struct ll_statahead_info *sai)
{
- return cfs_list_entry(sai->sai_entries_received.next,
- struct ll_sa_entry, se_list);
+ return list_entry(sai->sai_entries_received.next,
+ struct ll_sa_entry, se_list);
}
static inline struct ll_inode_info *
agl_first_entry(struct ll_statahead_info *sai)
{
- return cfs_list_entry(sai->sai_entries_agl.next,
- struct ll_inode_info, lli_agl_list);
+ return list_entry(sai->sai_entries_agl.next,
+ struct ll_inode_info, lli_agl_list);
}
static inline int sa_sent_full(struct ll_statahead_info *sai)
static inline int sa_received_empty(struct ll_statahead_info *sai)
{
- return cfs_list_empty(&sai->sai_entries_received);
+ return list_empty(&sai->sai_entries_received);
}
static inline int agl_list_empty(struct ll_statahead_info *sai)
{
- return cfs_list_empty(&sai->sai_entries_agl);
+ return list_empty(&sai->sai_entries_agl);
}
/**
lli = ll_i2info(sai->sai_inode);
spin_lock(&lli->lli_sa_lock);
- cfs_list_add_tail(&entry->se_link, &sai->sai_entries);
- CFS_INIT_LIST_HEAD(&entry->se_list);
+ list_add_tail(&entry->se_link, &sai->sai_entries);
+ INIT_LIST_HEAD(&entry->se_list);
ll_sa_entry_enhash(sai, entry);
spin_unlock(&lli->lli_sa_lock);
struct ll_sa_entry *entry;
int i = ll_sa_entry_hash(qstr->hash);
- cfs_list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
+ list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
if (entry->se_qstr.hash == qstr->hash &&
entry->se_qstr.len == qstr->len &&
memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
{
struct ll_sa_entry *entry;
- cfs_list_for_each_entry(entry, &sai->sai_entries, se_link) {
+ list_for_each_entry(entry, &sai->sai_entries, se_link) {
if (entry->se_index == index) {
LASSERT(atomic_read(&entry->se_refcount) > 0);
atomic_inc(&entry->se_refcount);
entry->se_qstr.len, entry->se_qstr.name, entry,
entry->se_index);
- LASSERT(cfs_list_empty(&entry->se_link));
- LASSERT(cfs_list_empty(&entry->se_list));
+ LASSERT(list_empty(&entry->se_link));
+ LASSERT(list_empty(&entry->se_list));
LASSERT(ll_sa_entry_unhashed(entry));
ll_sa_entry_cleanup(sai, entry);
struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
LASSERT(!ll_sa_entry_unhashed(entry));
- LASSERT(!cfs_list_empty(&entry->se_link));
+ LASSERT(!list_empty(&entry->se_link));
ll_sa_entry_unhash(sai, entry);
spin_lock(&lli->lli_sa_lock);
entry->se_stat = SA_ENTRY_DEST;
- cfs_list_del_init(&entry->se_link);
- if (likely(!cfs_list_empty(&entry->se_list)))
- cfs_list_del_init(&entry->se_list);
+ list_del_init(&entry->se_link);
+ if (likely(!list_empty(&entry->se_list)))
+ list_del_init(&entry->se_list);
spin_unlock(&lli->lli_sa_lock);
ll_sa_entry_put(sai, entry);
do_sa_entry_fini(sai, entry);
/* drop old entry, only 'scanner' process does this, no need to lock */
- cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
+ list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
if (!is_omitted_entry(sai, pos->se_index))
break;
do_sa_entry_fini(sai, pos);
struct ll_sa_entry *entry, se_stat_t stat)
{
struct ll_sa_entry *se;
- cfs_list_t *pos = &sai->sai_entries_stated;
+ struct list_head *pos = &sai->sai_entries_stated;
- if (!cfs_list_empty(&entry->se_list))
- cfs_list_del_init(&entry->se_list);
+ if (!list_empty(&entry->se_list))
+ list_del_init(&entry->se_list);
- cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
+ list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
if (se->se_index < entry->se_index) {
pos = &se->se_list;
break;
}
}
- cfs_list_add(&entry->se_list, pos);
+ list_add(&entry->se_list, pos);
entry->se_stat = stat;
}
child->lli_agl_index = index;
spin_unlock(&child->lli_agl_lock);
- LASSERT(cfs_list_empty(&child->lli_agl_list));
+ LASSERT(list_empty(&child->lli_agl_list));
igrab(inode);
spin_lock(&parent->lli_agl_lock);
if (agl_list_empty(sai))
added = 1;
- cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
+ list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
spin_unlock(&parent->lli_agl_lock);
} else {
spin_unlock(&child->lli_agl_lock);
init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
- CFS_INIT_LIST_HEAD(&sai->sai_entries);
- CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
- CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
- CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
+ INIT_LIST_HEAD(&sai->sai_entries);
+ INIT_LIST_HEAD(&sai->sai_entries_received);
+ INIT_LIST_HEAD(&sai->sai_entries_stated);
+ INIT_LIST_HEAD(&sai->sai_entries_agl);
for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
- CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
+ INIT_LIST_HEAD(&sai->sai_cache[i]);
spin_lock_init(&sai->sai_cache_lock[i]);
}
atomic_set(&sai->sai_cache_count, 0);
PFID(&lli->lli_fid),
sai->sai_sent, sai->sai_replied);
- cfs_list_for_each_entry_safe(entry, next,
- &sai->sai_entries, se_link)
+ list_for_each_entry_safe(entry, next,
+ &sai->sai_entries, se_link)
do_sa_entry_fini(sai, entry);
LASSERT(list_empty(&sai->sai_entries));
int rc;
ENTRY;
- LASSERT(cfs_list_empty(&lli->lli_agl_list));
+ LASSERT(list_empty(&lli->lli_agl_list));
/* AGL maybe fall behind statahead with one entry */
if (is_omitted_entry(sai, index + 1)) {
}
entry = sa_first_received_entry(sai);
atomic_inc(&entry->se_refcount);
- cfs_list_del_init(&entry->se_list);
+ list_del_init(&entry->se_list);
spin_unlock(&lli->lli_sa_lock);
LASSERT(entry->se_handle != 0);
entry->se_handle = it->d.lustre.it_lock_handle;
ll_intent_drop_lock(it);
wakeup = sa_received_empty(sai);
- cfs_list_add_tail(&entry->se_list,
+ list_add_tail(&entry->se_list,
&sai->sai_entries_received);
}
sai->sai_replied++;
* so check whether list empty again. */
if (!agl_list_empty(sai)) {
clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
+ list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
ll_agl_trigger(&clli->lli_vfs_inode, sai);
} else {
sai->sai_agl_valid = 0;
while (!agl_list_empty(sai)) {
clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
+ list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
clli->lli_agl_index = 0;
iput(&clli->lli_vfs_inode);
spin_lock(&plli->lli_agl_lock);
while (!agl_list_empty(sai)) {
clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
+ list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
ll_agl_trigger(&clli->lli_vfs_inode,
sai);
while (!agl_list_empty(sai) &&
thread_is_running(thread)) {
clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
+ list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
ll_agl_trigger(&clli->lli_vfs_inode, sai);
spin_lock(&plli->lli_agl_lock);
if (sai) {
thread = &sai->sai_thread;
if (unlikely(thread_is_stopped(thread) &&
- cfs_list_empty(&sai->sai_entries_stated))) {
+ list_empty(&sai->sai_entries_stated))) {
/* to release resource */
ll_stop_statahead(dir, lli->lli_opendir_key);
RETURN(-EAGAIN);
ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
REMOTE_PERM_HASHSIZE *
- sizeof(cfs_list_t),
+ sizeof(struct list_head),
0, 0, NULL);
if (ll_rmtperm_hash_cachep == NULL) {
kmem_cache_destroy(ll_remote_perm_cachep);
}
static int vvp_pgcache_obj_get(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
struct vvp_pgcache_id *id = data;
struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
cpg->cpg_page = vmpage;
page_cache_get(vmpage);
- CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+ INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {
/* in cache, decref in vvp_page_delete */
atomic_inc(&page->cp_ref);
LASSERT(lli != NULL);
- CFS_INIT_LIST_HEAD(&lli->lli_xattrs);
+ INIT_LIST_HEAD(&lli->lli_xattrs);
lli->lli_flags |= LLIF_XATTR_CACHE;
}
* A linkage into per sub-lock list of all corresponding top-locks,
* hanging off lovsub_lock::lss_parents.
*/
- cfs_list_t lll_list;
+ struct list_head lll_list;
};
/**
* List of top-locks that have given sub-lock as their part. Protected
* by cl_lock::cll_guard mutex.
*/
- cfs_list_t lss_parents;
+ struct list_head lss_parents;
/**
* Top-lock that initiated current operation on this sub-lock. This is
* only set during top-to-bottom lock operations like enqueue, and is
* Linkage into a list (hanging off lov_io::lis_active) of all
* sub-io's active for the current IO iteration.
*/
- cfs_list_t sub_linkage;
+ struct list_head sub_linkage;
/**
* true, iff cl_io_init() was successfully executed against
* lov_io_sub::sub_io.
/**
* List of active sub-io's.
*/
- cfs_list_t lis_active;
+ struct list_head lis_active;
};
struct lov_session {
OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, GFP_NOFS);
if (info != NULL)
- CFS_INIT_LIST_HEAD(&info->lti_closure.clc_list);
+ INIT_LIST_HEAD(&info->lti_closure.clc_list);
else
info = ERR_PTR(-ENOMEM);
return info;
struct lu_context_key *key, void *data)
{
struct lov_thread_info *info = data;
- LINVRNT(cfs_list_empty(&info->lti_closure.clc_list));
+ LINVRNT(list_empty(&info->lti_closure.clc_list));
OBD_SLAB_FREE_PTR(info, lov_thread_kmem);
}
char pool_name[LOV_MAXPOOLNAME + 1];
struct ost_pool pool_obds;
atomic_t pool_refcount;
- cfs_hlist_node_t pool_hash; /* access by poolname */
+ struct hlist_node pool_hash; /* access by poolname */
struct list_head pool_list; /* serial access */
struct proc_dir_entry *pool_proc_entry;
struct obd_device *pool_lobd; /* owner */
struct lov_request {
struct obd_info rq_oi;
struct lov_request_set *rq_rqset;
- cfs_list_t rq_link;
+ struct list_head rq_link;
int rq_idx; /* index in lov->tgts array */
int rq_stripe; /* stripe number */
int rq_complete;
atomic_t set_success;
atomic_t set_finish_checked;
struct llog_cookie *set_cookies;
- cfs_list_t set_list;
+ struct list_head set_list;
wait_queue_head_t set_waitq;
};
rc = PTR_ERR(sub);
if (!rc)
- cfs_list_add_tail(&sub->sub_linkage, &lio->lis_active);
+ list_add_tail(&sub->sub_linkage, &lio->lis_active);
else
break;
}
int rc = 0;
ENTRY;
- cfs_list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
lov_sub_enter(sub);
rc = iofunc(sub->sub_env, sub->sub_io);
lov_sub_exit(sub);
ENTRY;
rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
LASSERT(rc == 0);
- while (!cfs_list_empty(&lio->lis_active))
- cfs_list_del_init(lio->lis_active.next);
+ while (!list_empty(&lio->lis_active))
+ list_del_init(lio->lis_active.next);
EXIT;
}
ENTRY;
*written = 0;
- cfs_list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
struct cl_io *subio = sub->sub_io;
lov_sub_enter(sub);
lck->lls_sub[idx].sub_lock = lsl;
lck->lls_nr_filled++;
LASSERT(lck->lls_nr_filled <= lck->lls_nr);
- cfs_list_add_tail(&link->lll_list, &lsl->lss_parents);
+ list_add_tail(&link->lll_list, &lsl->lss_parents);
link->lll_idx = idx;
link->lll_super = lck;
cl_lock_get(parent);
int result = 0;
ENTRY;
- LASSERT(cfs_list_empty(&closure->clc_list));
+ LASSERT(list_empty(&closure->clc_list));
sublock = lls->sub_lock;
child = sublock->lss_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
ENTRY;
- cfs_list_del_init(&link->lll_list);
+ list_del_init(&link->lll_list);
LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
/* yank this sub-lock from parent's array */
lck->lls_sub[link->lll_idx].sub_lock = NULL;
LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
ENTRY;
- cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ list_for_each_entry(scan, &sub->lss_parents, lll_list) {
if (scan->lll_super == lck)
RETURN(scan);
}
struct cl_lock_closure *closure;
closure = &lov_env_info(env)->lti_closure;
- LASSERT(cfs_list_empty(&closure->clc_list));
+ LASSERT(list_empty(&closure->clc_list));
cl_lock_closure_init(env, closure, parent, 1);
return closure;
}
if (!tgt || !tgt->ltd_reap)
continue;
- cfs_list_add(&tgt->ltd_kill, &kill);
+ list_add(&tgt->ltd_kill, &kill);
/* XXX - right now there is a dependency on ld_tgt_count
* being the maximum tgt index for computing the
* mds_max_easize. So we can't shrink it. */
}
mutex_unlock(&lov->lov_lock);
- cfs_list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
- cfs_list_del(&tgt->ltd_kill);
+ list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
+ list_del(&tgt->ltd_kill);
/* Disconnect */
__lov_del_obd(obd, tgt);
}
CFS_HASH_MAX_THETA,
&pool_hash_operations,
CFS_HASH_DEFAULT);
- CFS_INIT_LIST_HEAD(&lov->lov_pool_list);
+ INIT_LIST_HEAD(&lov->lov_pool_list);
lov->lov_pool_count = 0;
rc = lov_ost_pool_init(&lov->lov_packed, 0);
if (rc)
static int lov_cleanup(struct obd_device *obd)
{
struct lov_obd *lov = &obd->u.lov;
- cfs_list_t *pos, *tmp;
+ struct list_head *pos, *tmp;
struct pool_desc *pool;
ENTRY;
- cfs_list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
- pool = cfs_list_entry(pos, struct pool_desc, pool_list);
+ list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
+ pool = list_entry(pos, struct pool_desc, pool_list);
/* free pool structs */
CDEBUG(D_INFO, "delete pool %p\n", pool);
/* In the function below, .hs_keycmp resolves to
struct lov_request_set *set;
struct obd_info oinfo;
struct lov_request *req;
- cfs_list_t *pos;
+ struct list_head *pos;
struct lov_obd *lov;
int rc = 0, err = 0;
ENTRY;
if (rc)
GOTO(out, rc);
- cfs_list_for_each (pos, &set->set_list) {
- req = cfs_list_entry(pos, struct lov_request, rq_link);
+ list_for_each(pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
if (oa->o_valid & OBD_MD_FLCOOKIE)
oti->oti_logcookies = set->set_cookies + req->rq_stripe;
{
struct lov_request_set *lovset;
struct lov_obd *lov;
- cfs_list_t *pos;
+ struct list_head *pos;
struct lov_request *req;
int rc = 0, err;
ENTRY;
POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
oinfo->oi_md->lsm_stripe_size);
- cfs_list_for_each(pos, &lovset->set_list) {
- req = cfs_list_entry(pos, struct lov_request, rq_link);
+ list_for_each(pos, &lovset->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
"%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
}
}
- if (!cfs_list_empty(&rqset->set_requests)) {
+ if (!list_empty(&rqset->set_requests)) {
LASSERT(rc == 0);
LASSERT (rqset->set_interpret == NULL);
rqset->set_interpret = lov_getattr_interpret;
{
struct lov_request_set *set;
struct lov_request *req;
- cfs_list_t *pos;
+ struct list_head *pos;
struct lov_obd *lov;
int rc = 0;
ENTRY;
oinfo->oi_md->lsm_stripe_count,
oinfo->oi_md->lsm_stripe_size);
- cfs_list_for_each(pos, &set->set_list) {
- req = cfs_list_entry(pos, struct lov_request, rq_link);
+ list_for_each(pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
oti->oti_logcookies = set->set_cookies + req->rq_stripe;
}
/* If we are not waiting for responses on async requests, return. */
- if (rc || !rqset || cfs_list_empty(&rqset->set_requests)) {
+ if (rc || !rqset || list_empty(&rqset->set_requests)) {
int err;
if (rc)
atomic_set(&set->set_completes, 0);
struct obd_device *obd = class_exp2obd(exp);
struct lov_request_set *set;
struct lov_request *req;
- cfs_list_t *pos;
+ struct list_head *pos;
struct lov_obd *lov;
int rc = 0;
ENTRY;
if (rc)
RETURN(rc);
- cfs_list_for_each (pos, &set->set_list) {
- req = cfs_list_entry(pos, struct lov_request, rq_link);
+ list_for_each(pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, max_age, rqset);
if (rc)
break;
}
- if (rc || cfs_list_empty(&rqset->set_requests)) {
+ if (rc || list_empty(&rqset->set_requests)) {
int err;
if (rc)
atomic_set(&set->set_completes, 0);
RETURN(PTR_ERR(sub));
subobj = lovsub2cl(r0->lo_sub[stripe]);
- cfs_list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
- co_lu.lo_linkage) {
+ list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
+ co_lu.lo_linkage) {
if (o->co_ops->coo_page_init != NULL) {
rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
cl_index(subobj, suboff));
return (result % mask);
}
-static void *pool_key(cfs_hlist_node_t *hnode)
+static void *pool_key(struct hlist_node *hnode)
{
struct pool_desc *pool;
- pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+ pool = hlist_entry(hnode, struct pool_desc, pool_hash);
return (pool->pool_name);
}
-static int pool_hashkey_keycmp(const void *key, cfs_hlist_node_t *compared_hnode)
+static int
+pool_hashkey_keycmp(const void *key, struct hlist_node *compared_hnode)
{
char *pool_name;
struct pool_desc *pool;
pool_name = (char *)key;
- pool = cfs_hlist_entry(compared_hnode, struct pool_desc, pool_hash);
+ pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash);
return !strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME);
}
-static void *pool_hashobject(cfs_hlist_node_t *hnode)
+static void *pool_hashobject(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+ return hlist_entry(hnode, struct pool_desc, pool_hash);
}
-static void pool_hashrefcount_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct pool_desc *pool;
- pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+ pool = hlist_entry(hnode, struct pool_desc, pool_hash);
lov_pool_getref(pool);
}
static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
struct pool_desc *pool;
- pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+ pool = hlist_entry(hnode, struct pool_desc, pool_hash);
lov_pool_putref_locked(pool);
}
if (rc)
GOTO(out_err, rc);
- CFS_INIT_HLIST_NODE(&new_pool->pool_hash);
+ INIT_HLIST_NODE(&new_pool->pool_hash);
#ifdef LPROCFS
/* we need this assert seq_file is not implementated for liblustre */
#endif
spin_lock(&obd->obd_dev_lock);
- cfs_list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
+ list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
lov->lov_pool_count++;
spin_unlock(&obd->obd_dev_lock);
out_err:
spin_lock(&obd->obd_dev_lock);
- cfs_list_del_init(&new_pool->pool_list);
+ list_del_init(&new_pool->pool_list);
lov->lov_pool_count--;
spin_unlock(&obd->obd_dev_lock);
lprocfs_remove(&new_pool->pool_proc_entry);
}
spin_lock(&obd->obd_dev_lock);
- cfs_list_del_init(&pool->pool_list);
+ list_del_init(&pool->pool_list);
lov->lov_pool_count--;
spin_unlock(&obd->obd_dev_lock);
atomic_set(&set->set_success, 0);
atomic_set(&set->set_finish_checked, 0);
set->set_cookies = 0;
- CFS_INIT_LIST_HEAD(&set->set_list);
+ INIT_LIST_HEAD(&set->set_list);
atomic_set(&set->set_refcount, 1);
init_waitqueue_head(&set->set_waitq);
}
void lov_set_add_req(struct lov_request *req, struct lov_request_set *set)
{
- cfs_list_add_tail(&req->rq_link, &set->set_list);
+ list_add_tail(&req->rq_link, &set->set_list);
set->set_count++;
req->rq_rqset = set;
}
static int common_attr_done(struct lov_request_set *set)
{
- cfs_list_t *pos;
+ struct list_head *pos;
struct lov_request *req;
struct obdo *tmp_oa;
int rc = 0, attrset = 0;
if (tmp_oa == NULL)
GOTO(out, rc = -ENOMEM);
- cfs_list_for_each (pos, &set->set_list) {
- req = cfs_list_entry(pos, struct lov_request, rq_link);
+ list_for_each(pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
if (!req->rq_complete || req->rq_rc)
continue;
ENTRY;
lsl = cl2lovsub_lock(slice);
- LASSERT(cfs_list_empty(&lsl->lss_parents));
+ LASSERT(list_empty(&lsl->lss_parents));
OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
EXIT;
}
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
ENTRY;
- cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ list_for_each_entry(scan, &sub->lss_parents, lll_list) {
struct lov_lock *lov = scan->lll_super;
struct cl_lock *parent = lov->lls_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
- if (!cfs_list_empty(&lock->lss_parents)) {
+ if (!list_empty(&lock->lss_parents)) {
/*
* It is not clear whether all parents have to be asked and
* their estimations summed, or it is enough to ask one. For
LASSERT(cl_lock_mode_match(d->cld_mode,
s->cls_lock->cll_descr.cld_mode));
- cfs_list_for_each_entry(scan, &lock->lss_parents, lll_list) {
+ list_for_each_entry(scan, &lock->lss_parents, lll_list) {
int rc;
lov = scan->lll_super;
sub = cl2lovsub_lock(slice);
result = 0;
- cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ list_for_each_entry(scan, &sub->lss_parents, lll_list) {
parent = scan->lll_super->lls_cl.cls_lock;
result = cl_lock_closure_build(env, parent, closure);
if (result != 0)
struct lov_lock_link *temp;
restart = 0;
- cfs_list_for_each_entry_safe(scan, temp,
+ list_for_each_entry_safe(scan, temp,
&sub->lss_parents, lll_list) {
lov = scan->lll_super;
lovsub_parent_lock(env, lov);
struct lov_lock *lov;
struct lov_lock_link *scan;
- cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ list_for_each_entry(scan, &sub->lss_parents, lll_list) {
lov = scan->lll_super;
(*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
if (lov != NULL)
ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, GFP_NOFS);
if (lsk != NULL) {
- CFS_INIT_LIST_HEAD(&lsk->lss_parents);
+ INIT_LIST_HEAD(&lsk->lss_parents);
cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
result = 0;
} else
struct cl_object_header eo_hdr;
struct echo_device *eo_dev;
- cfs_list_t eo_obj_chain;
+ struct list_head eo_obj_chain;
struct lov_stripe_md *eo_lsm;
- atomic_t eo_npages;
+ atomic_t eo_npages;
int eo_deleted;
};
};
struct echo_lock {
- struct cl_lock_slice el_cl;
- cfs_list_t el_chain;
- struct echo_object *el_object;
- __u64 el_cookie;
- atomic_t el_refcount;
+ struct cl_lock_slice el_cl;
+ struct list_head el_chain;
+ struct echo_object *el_object;
+ __u64 el_cookie;
+ atomic_t el_refcount;
};
static int echo_client_setup(const struct lu_env *env,
{
struct echo_lock *ecl = cl2echo_lock(slice);
- LASSERT(cfs_list_empty(&ecl->el_chain));
+ LASSERT(list_empty(&ecl->el_chain));
OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
}
{
struct echo_lock *ecl = cl2echo_lock(slice);
- LASSERT(cfs_list_empty(&ecl->el_chain));
+ LASSERT(list_empty(&ecl->el_chain));
}
static int echo_lock_fits_into(const struct lu_env *env,
if (el != NULL) {
cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
el->el_object = cl2echo_obj(obj);
- CFS_INIT_LIST_HEAD(&el->el_chain);
+ INIT_LIST_HEAD(&el->el_chain);
atomic_set(&el->el_refcount, 0);
}
RETURN(el == NULL ? -ENOMEM : 0);
cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
spin_lock(&ec->ec_lock);
- cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+ list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
spin_unlock(&ec->ec_lock);
RETURN(0);
LASSERT(atomic_read(&eco->eo_npages) == 0);
spin_lock(&ec->ec_lock);
- cfs_list_del_init(&eco->eo_obj_chain);
+ list_del_init(&eco->eo_obj_chain);
spin_unlock(&ec->ec_lock);
lu_object_fini(obj);
ls = next->ld_site;
spin_lock(&ls->ls_ld_lock);
- cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
+ list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
found = 1;
break;
* parallelly accessed.
*/
spin_lock(&ec->ec_lock);
- cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+ list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
eco->eo_deleted = 1;
spin_unlock(&ec->ec_lock);
/* Wait for the last reference to be dropped. */
spin_lock(&ec->ec_lock);
- while (!cfs_list_empty(&ec->ec_objects)) {
+ while (!list_empty(&ec->ec_objects)) {
spin_unlock(&ec->ec_lock);
CERROR("echo_client still has objects at cleanup time, "
"wait for 1 second\n");
}
spin_unlock(&ec->ec_lock);
- LASSERT(cfs_list_empty(&ec->ec_locks));
+ LASSERT(list_empty(&ec->ec_locks));
CDEBUG(D_INFO, "No object exists, exiting...\n");
if (rc == 0) {
el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
spin_lock(&ec->ec_lock);
- if (cfs_list_empty(&el->el_chain)) {
- cfs_list_add(&el->el_chain, &ec->ec_locks);
+ if (list_empty(&el->el_chain)) {
+ list_add(&el->el_chain, &ec->ec_locks);
el->el_cookie = ++ec->ec_unique;
}
atomic_inc(&el->el_refcount);
{
struct echo_client_obd *ec = ed->ed_ec;
struct echo_lock *ecl = NULL;
- cfs_list_t *el;
+ struct list_head *el;
int found = 0, still_used = 0;
ENTRY;
LASSERT(ec != NULL);
spin_lock(&ec->ec_lock);
- cfs_list_for_each (el, &ec->ec_locks) {
- ecl = cfs_list_entry (el, struct echo_lock, el_chain);
+ list_for_each(el, &ec->ec_locks) {
+ ecl = list_entry(el, struct echo_lock, el_chain);
CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
found = (ecl->el_cookie == cookie);
if (found) {
if (atomic_dec_and_test(&ecl->el_refcount))
- cfs_list_del_init(&ecl->el_chain);
+ list_del_init(&ecl->el_chain);
else
still_used = 1;
break;
}
spin_lock_init(&ec->ec_lock);
- CFS_INIT_LIST_HEAD (&ec->ec_objects);
- CFS_INIT_LIST_HEAD (&ec->ec_locks);
+ INIT_LIST_HEAD(&ec->ec_objects);
+ INIT_LIST_HEAD(&ec->ec_locks);
ec->ec_unique = 0;
ec->ec_nstripes = 0;
if (rc == 0) {
/* Turn off pinger because it connects to tgt obd directly. */
spin_lock(&tgt->obd_dev_lock);
- cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+ list_del_init(&ec->ec_exp->exp_obd_chain_timed);
spin_unlock(&tgt->obd_dev_lock);
}
RETURN(0);
}
- if (!cfs_list_empty(&obddev->obd_exports)) {
+ if (!list_empty(&obddev->obd_exports)) {
CERROR("still has clients!\n");
RETURN(-EBUSY);
}
if (dt_update == NULL)
return;
- cfs_list_del(&dt_update->dur_list);
+ list_del(&dt_update->dur_list);
if (dt_update->dur_req != NULL)
OBD_FREE_LARGE(dt_update->dur_req, dt_update->dur_req_len);
struct lu_env env;
struct ptlrpc_request *req;
__u32 start_epoch;
- cfs_list_t client_list;
+ struct list_head client_list;
int rc;
if (tgt->lut_obd->obd_stopping)
* with resend requests. Move final list to separate one for processing
*/
spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
- cfs_list_splice_init(&tgt->lut_obd->obd_final_req_queue, &client_list);
+ list_splice_init(&tgt->lut_obd->obd_final_req_queue, &client_list);
spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
/**
* go through list of exports participated in recovery and
* set new epoch for them
*/
- cfs_list_for_each_entry(req, &client_list, rq_list) {
+ list_for_each_entry(req, &client_list, rq_list) {
LASSERT(!req->rq_export->exp_delayed);
if (!req->rq_export->exp_vbr_failed)
tgt_client_epoch_update(&env, req->rq_export);
}
/** return list back at once */
spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
- cfs_list_splice_init(&client_list, &tgt->lut_obd->obd_final_req_queue);
+ list_splice_init(&client_list, &tgt->lut_obd->obd_final_req_queue);
spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
/** update server epoch */
tgt_server_data_update(&env, tgt, 1);