/* init mgs_export_data for fsc */
spin_lock_init(&data->med_lock);
- CFS_INIT_LIST_HEAD(&data->med_clients);
+ INIT_LIST_HEAD(&data->med_clients);
spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
*/
struct mgs_nidtbl;
struct mgs_nidtbl_target {
- cfs_list_t mnt_list;
- struct mgs_nidtbl *mnt_fs;
- u64 mnt_version;
- int mnt_type; /* OST or MDT */
- cfs_time_t mnt_last_active;
- struct mgs_target_info mnt_mti;
+ struct list_head mnt_list;
+ struct mgs_nidtbl *mnt_fs;
+ u64 mnt_version;
+ int mnt_type; /* OST or MDT */
+ cfs_time_t mnt_last_active;
+ struct mgs_target_info mnt_mti;
};
enum {
* maintain fs client nodes of mgs.
*/
struct mgs_fsc {
- struct fs_db *mfc_fsdb;
+ struct fs_db *mfc_fsdb;
/**
* Where the fs client comes from.
*/
- struct obd_export *mfc_export;
+ struct obd_export *mfc_export;
/**
* list of fs clients from the same export,
* protected by mgs_export_data->med_lock
*/
- cfs_list_t mfc_export_list;
+ struct list_head mfc_export_list;
/**
* list of fs clients in the same fsdb, protected by fsdb->fsdb_mutex
*/
- cfs_list_t mfc_fsdb_list;
- unsigned mfc_ir_capable:1;
+ struct list_head mfc_fsdb_list;
+ unsigned mfc_ir_capable:1;
};
struct mgs_nidtbl {
- struct fs_db *mn_fsdb;
- struct file *mn_version_file;
- struct mutex mn_lock;
- u64 mn_version;
- int mn_nr_targets;
- cfs_list_t mn_targets;
+ struct fs_db *mn_fsdb;
+ struct file *mn_version_file;
+ struct mutex mn_lock;
+ u64 mn_version;
+ int mn_nr_targets;
+ struct list_head mn_targets;
};
struct mgs_tgt_srpc_conf {
#define FSDB_REVOKING_PARAMS (6) /* DLM lock is being revoked */
struct fs_db {
- char fsdb_name[9];
- cfs_list_t fsdb_list; /* list of databases */
+ char fsdb_name[9];
+ struct list_head fsdb_list; /* list of databases */
struct mutex fsdb_mutex;
- void *fsdb_ost_index_map; /* bitmap of used indicies */
- void *fsdb_mdt_index_map; /* bitmap of used indicies */
- int fsdb_mdt_count;
- char *fsdb_clilov; /* COMPAT_146 client lov name */
- char *fsdb_clilmv;
- unsigned long fsdb_flags;
- __u32 fsdb_gen;
+ void *fsdb_ost_index_map; /* bitmap of used indicies */
+ void *fsdb_mdt_index_map; /* bitmap of used indicies */
+ int fsdb_mdt_count;
+ char *fsdb_clilov; /* COMPAT_146 client lov name */
+ char *fsdb_clilmv;
+ unsigned long fsdb_flags;
+ __u32 fsdb_gen;
/* in-memory copy of the srpc rules, guarded by fsdb_lock */
struct sptlrpc_rule_set fsdb_srpc_gen;
struct mgs_tgt_srpc_conf *fsdb_srpc_tgt;
/* list of fs clients, mgs_fsc. protected by mgs_mutex */
- cfs_list_t fsdb_clients;
+ struct list_head fsdb_clients;
int fsdb_nonir_clients;
int fsdb_ir_state;
/* async thread to notify clients */
struct mgs_device *fsdb_mgs;
- wait_queue_head_t fsdb_notify_waitq;
- struct completion fsdb_notify_comp;
- cfs_time_t fsdb_notify_start;
- atomic_t fsdb_notify_phase;
+ wait_queue_head_t fsdb_notify_waitq;
+ struct completion fsdb_notify_comp;
+ cfs_time_t fsdb_notify_start;
+ atomic_t fsdb_notify_phase;
volatile unsigned int fsdb_notify_async:1,
fsdb_notify_stop:1;
/* statistic data */
struct obd_export *mgs_bottom_exp;
struct dt_object *mgs_configs_dir;
struct dt_object *mgs_nidtbl_dir;
- cfs_list_t mgs_fs_db_list;
+ struct list_head mgs_fs_db_list;
spinlock_t mgs_lock; /* covers mgs_fs_db_list */
cfs_proc_dir_entry_t *mgs_proc_live;
cfs_proc_dir_entry_t *mgs_proc_osd;
struct fs_db *mgs_find_fsdb(struct mgs_device *mgs, char *fsname)
{
struct fs_db *fsdb;
- cfs_list_t *tmp;
+ struct list_head *tmp;
- cfs_list_for_each(tmp, &mgs->mgs_fs_db_list) {
- fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
+ list_for_each(tmp, &mgs->mgs_fs_db_list) {
+ fsdb = list_entry(tmp, struct fs_db, fsdb_list);
if (strcmp(fsdb->fsdb_name, fsname) == 0)
return fsdb;
}
lproc_mgs_add_live(mgs, fsdb);
}
- cfs_list_add(&fsdb->fsdb_list, &mgs->mgs_fs_db_list);
+ list_add(&fsdb->fsdb_list, &mgs->mgs_fs_db_list);
RETURN(fsdb);
err:
static void mgs_free_fsdb(struct mgs_device *mgs, struct fs_db *fsdb)
{
- /* wait for anyone with the sem */
+ /* wait for anyone with the sem */
mutex_lock(&fsdb->fsdb_mutex);
lproc_mgs_del_live(mgs, fsdb);
- cfs_list_del(&fsdb->fsdb_list);
+ list_del(&fsdb->fsdb_list);
/* deinitialize fsr */
mgs_ir_fini_fs(mgs, fsdb);
int mgs_init_fsdb_list(struct mgs_device *mgs)
{
- CFS_INIT_LIST_HEAD(&mgs->mgs_fs_db_list);
+ INIT_LIST_HEAD(&mgs->mgs_fs_db_list);
return 0;
}
int mgs_cleanup_fsdb_list(struct mgs_device *mgs)
{
- struct fs_db *fsdb;
- cfs_list_t *tmp, *tmp2;
+ struct fs_db *fsdb;
+ struct list_head *tmp, *tmp2;
+
mutex_lock(&mgs->mgs_mutex);
- cfs_list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
- fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
+ list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
+ fsdb = list_entry(tmp, struct fs_db, fsdb_list);
mgs_free_fsdb(mgs, fsdb);
- }
+ }
mutex_unlock(&mgs->mgs_mutex);
- return 0;
+ return 0;
}
int mgs_find_or_make_fsdb(const struct lu_env *env,
static int nidtbl_is_sane(struct mgs_nidtbl *tbl)
{
- struct mgs_nidtbl_target *tgt;
- int version = 0;
+ struct mgs_nidtbl_target *tgt;
+ int version = 0;
LASSERT(mutex_is_locked(&tbl->mn_lock));
- cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
- if (!tgt->mnt_version)
- continue;
+ list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
+ if (!tgt->mnt_version)
+ continue;
- if (version >= tgt->mnt_version)
- return 0;
+ if (version >= tgt->mnt_version)
+ return 0;
- version = tgt->mnt_version;
- }
- return 1;
+ version = tgt->mnt_version;
+ }
+ return 1;
}
/**
* otherwise, it's for clients, then llog entries for both OSTs and
* MDTs will be returned.
*/
- cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
+ list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
int entry_len = sizeof(*entry);
if (tgt->mnt_version < version)
type &= ~LDD_F_SV_TYPE_MGS;
LASSERT(type != 0);
- tbl = &fsdb->fsdb_nidtbl;
+ tbl = &fsdb->fsdb_nidtbl;
mutex_lock(&tbl->mn_lock);
- cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
- struct mgs_target_info *info = &tgt->mnt_mti;
+ list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
+ struct mgs_target_info *info = &tgt->mnt_mti;
+
if (type == tgt->mnt_type &&
mti->mti_stripe_index == info->mti_stripe_index) {
found = true;
if (tgt == NULL)
GOTO(out, rc = -ENOMEM);
- CFS_INIT_LIST_HEAD(&tgt->mnt_list);
- tgt->mnt_fs = tbl;
- tgt->mnt_version = 0; /* 0 means invalid */
- tgt->mnt_type = type;
+ INIT_LIST_HEAD(&tgt->mnt_list);
+ tgt->mnt_fs = tbl;
+ tgt->mnt_version = 0; /* 0 means invalid */
+ tgt->mnt_type = type;
- ++tbl->mn_nr_targets;
- }
+ ++tbl->mn_nr_targets;
+ }
- tgt->mnt_version = ++tbl->mn_version;
- tgt->mnt_mti = *mti;
+ tgt->mnt_version = ++tbl->mn_version;
+ tgt->mnt_mti = *mti;
- cfs_list_move_tail(&tgt->mnt_list, &tbl->mn_targets);
+ list_move_tail(&tgt->mnt_list, &tbl->mn_targets);
rc = nidtbl_update_version(env, fsdb->fsdb_mgs, tbl);
- EXIT;
+ EXIT;
out:
mutex_unlock(&tbl->mn_lock);
static void mgs_nidtbl_fini_fs(struct fs_db *fsdb)
{
struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
- CFS_LIST_HEAD(head);
+ struct list_head head = LIST_HEAD_INIT(head);
mutex_lock(&tbl->mn_lock);
tbl->mn_nr_targets = 0;
- cfs_list_splice_init(&tbl->mn_targets, &head);
+ list_splice_init(&tbl->mn_targets, &head);
mutex_unlock(&tbl->mn_lock);
- while (!cfs_list_empty(&head)) {
+ while (!list_empty(&head)) {
struct mgs_nidtbl_target *tgt;
tgt = list_entry(head.next, struct mgs_nidtbl_target, mnt_list);
- cfs_list_del(&tgt->mnt_list);
+ list_del(&tgt->mnt_list);
OBD_FREE_PTR(tgt);
}
}
struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
int rc;
- CFS_INIT_LIST_HEAD(&tbl->mn_targets);
+ INIT_LIST_HEAD(&tbl->mn_targets);
mutex_init(&tbl->mn_lock);
tbl->mn_nr_targets = 0;
tbl->mn_fsdb = fsdb;
mgs->mgs_start_time + ir_timeout))
fsdb->fsdb_ir_state = IR_STARTUP;
fsdb->fsdb_nonir_clients = 0;
- CFS_INIT_LIST_HEAD(&fsdb->fsdb_clients);
+ INIT_LIST_HEAD(&fsdb->fsdb_clients);
/* start notify thread */
fsdb->fsdb_mgs = mgs;
mgs_nidtbl_fini_fs(fsdb);
- LASSERT(cfs_list_empty(&fsdb->fsdb_clients));
+ LASSERT(list_empty(&fsdb->fsdb_clients));
fsdb->fsdb_notify_stop = 1;
wake_up(&fsdb->fsdb_notify_waitq);
if (new_fsc == NULL)
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&new_fsc->mfc_export_list);
- CFS_INIT_LIST_HEAD(&new_fsc->mfc_fsdb_list);
+ INIT_LIST_HEAD(&new_fsc->mfc_export_list);
+ INIT_LIST_HEAD(&new_fsc->mfc_fsdb_list);
new_fsc->mfc_fsdb = fsdb;
new_fsc->mfc_export = class_export_get(exp);
new_fsc->mfc_ir_capable = !!(exp_connect_flags(exp) &
/* tend to find it in export list because this list is shorter. */
spin_lock(&data->med_lock);
- cfs_list_for_each_entry(fsc, &data->med_clients, mfc_export_list) {
+ list_for_each_entry(fsc, &data->med_clients, mfc_export_list) {
if (strcmp(fsname, fsc->mfc_fsdb->fsdb_name) == 0) {
found = true;
break;
new_fsc = NULL;
/* add it into export list. */
- cfs_list_add(&fsc->mfc_export_list, &data->med_clients);
+ list_add(&fsc->mfc_export_list, &data->med_clients);
/* add into fsdb list. */
- cfs_list_add(&fsc->mfc_fsdb_list, &fsdb->fsdb_clients);
+ list_add(&fsc->mfc_fsdb_list, &fsdb->fsdb_clients);
if (!fsc->mfc_ir_capable) {
++fsdb->fsdb_nonir_clients;
if (fsdb->fsdb_ir_state == IR_FULL)
{
struct mgs_export_data *data = &exp->u.eu_mgs_data;
struct mgs_fsc *fsc, *tmp;
- CFS_LIST_HEAD(head);
+ struct list_head head = LIST_HEAD_INIT(head);
spin_lock(&data->med_lock);
- cfs_list_splice_init(&data->med_clients, &head);
+ list_splice_init(&data->med_clients, &head);
spin_unlock(&data->med_lock);
- cfs_list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
+ list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
struct fs_db *fsdb = fsc->mfc_fsdb;
LASSERT(fsc->mfc_export == exp);
mutex_lock(&fsdb->fsdb_mutex);
- cfs_list_del_init(&fsc->mfc_fsdb_list);
+ list_del_init(&fsc->mfc_fsdb_list);
if (fsc->mfc_ir_capable == 0) {
--fsdb->fsdb_nonir_clients;
LASSERT(fsdb->fsdb_ir_state != IR_FULL);
fsdb->fsdb_ir_state = IR_FULL;
}
mutex_unlock(&fsdb->fsdb_mutex);
- cfs_list_del_init(&fsc->mfc_export_list);
+ list_del_init(&fsc->mfc_export_list);
class_export_put(fsc->mfc_export);
OBD_FREE_PTR(fsc);
}
{
struct mgs_fsc *fsc, *tmp;
- cfs_list_for_each_entry_safe(fsc, tmp, &fsdb->fsdb_clients,
+ list_for_each_entry_safe(fsc, tmp, &fsdb->fsdb_clients,
mfc_fsdb_list) {
struct mgs_export_data *data = &fsc->mfc_export->u.eu_mgs_data;
LASSERT(fsdb == fsc->mfc_fsdb);
- cfs_list_del_init(&fsc->mfc_fsdb_list);
+ list_del_init(&fsc->mfc_fsdb_list);
spin_lock(&data->med_lock);
- cfs_list_del_init(&fsc->mfc_export_list);
+ list_del_init(&fsc->mfc_export_list);
spin_unlock(&data->med_lock);
class_export_put(fsc->mfc_export);
OBD_FREE_PTR(fsc);
dev->od_ost_map->om_subdir_count = rc;
rc = 0;
- CFS_INIT_LIST_HEAD(&dev->od_ost_map->om_seq_list);
+ INIT_LIST_HEAD(&dev->od_ost_map->om_seq_list);
rwlock_init(&dev->od_ost_map->om_seq_list_lock);
mutex_init(&dev->od_ost_map->om_dir_init_mutex);
{
int j;
- cfs_list_del_init(&osd_seq->oos_seq_list);
+ list_del_init(&osd_seq->oos_seq_list);
if (osd_seq->oos_dirs) {
for (j = 0; j < osd_seq->oos_subdir_count; j++) {
return;
write_lock(&map->om_seq_list_lock);
- cfs_list_for_each_entry_safe(osd_seq, tmp,
- &map->om_seq_list,
- oos_seq_list) {
+ list_for_each_entry_safe(osd_seq, tmp, &map->om_seq_list,
+ oos_seq_list) {
osd_seq_free(map, osd_seq);
}
write_unlock(&map->om_seq_list_lock);
{
struct osd_obj_seq *osd_seq;
- cfs_list_for_each_entry(osd_seq, &map->om_seq_list, oos_seq_list) {
+ list_for_each_entry(osd_seq, &map->om_seq_list, oos_seq_list) {
if (osd_seq->oos_seq == seq)
return osd_seq;
}
if (osd_seq == NULL)
GOTO(cleanup, rc = -ENOMEM);
- CFS_INIT_LIST_HEAD(&osd_seq->oos_seq_list);
+ INIT_LIST_HEAD(&osd_seq->oos_seq_list);
osd_seq->oos_seq = seq;
/* Init subdir count to be 32, but each seq can have
* different subdir count */
GOTO(cleanup, rc);
write_lock(&map->om_seq_list_lock);
- cfs_list_add(&osd_seq->oos_seq_list, &map->om_seq_list);
+ list_add(&osd_seq->oos_seq_list, &map->om_seq_list);
write_unlock(&map->om_seq_list_lock);
cleanup:
}
id = &info->oti_id;
- if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+ if (!list_empty(&scrub->os_inconsistent_items)) {
/* Search order: 2. OI scrub pending list. */
result = osd_oii_lookup(dev, fid, id);
if (result == 0)
dt_txn_hook_commit(th);
/* call per-transaction callbacks if any */
- cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
+ list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
"commit callback entry: magic=%x name='%s'\n",
dcb->dcb_magic, dcb->dcb_name);
- cfs_list_del_init(&dcb->dcb_linkage);
+ list_del_init(&dcb->dcb_linkage);
dcb->dcb_func(NULL, th, dcb, error);
}
atomic_set(&th->th_refc, 1);
th->th_alloc_size = sizeof(*oh);
oti->oti_dev = osd_dt_dev(d);
- CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
+ INIT_LIST_HEAD(&oh->ot_dcb_list);
osd_th_alloced(oh);
memset(oti->oti_declare_ops, 0,
LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
LASSERT(&dcb->dcb_func != NULL);
- cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
+ list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
return 0;
}
/* self-repair LMA by default */
o->od_lma_self_repair = 1;
- CFS_INIT_LIST_HEAD(&o->od_ios_list);
+ INIT_LIST_HEAD(&o->od_ios_list);
/* setup scrub, including OI files initialization */
rc = osd_scrub_setup(env, o);
if (rc < 0)
*
* No locking. Callers synchronize.
*/
-static CFS_LIST_HEAD(iam_formats);
+static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
void iam_format_register(struct iam_format *fmt)
{
- cfs_list_add(&fmt->if_linkage, &iam_formats);
+ list_add(&fmt->if_linkage, &iam_formats);
}
EXPORT_SYMBOL(iam_format_register);
}
result = -ENOENT;
- cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
+ list_for_each_entry(fmt, &iam_formats, if_linkage) {
result = fmt->if_guess(c);
if (result == 0)
break;
/*
* Linkage into global list of container formats.
*/
- cfs_list_t if_linkage;
+ struct list_head if_linkage;
};
void iam_format_register(struct iam_format *fmt);
struct dentry *oos_root; /* O/<seq> */
struct dentry **oos_dirs; /* O/<seq>/d0-dXX */
obd_seq oos_seq; /* seq number */
- cfs_list_t oos_seq_list; /* list to seq_list */
+ struct list_head oos_seq_list; /* list to seq_list */
};
struct osd_obj_map {
struct dentry *om_root; /* dentry for /O */
rwlock_t om_seq_list_lock; /* lock for seq_list */
- cfs_list_t om_seq_list; /* list head for seq */
+ struct list_head om_seq_list; /* list head for seq */
int om_subdir_count;
struct mutex om_dir_init_mutex;
};
struct osd_mdobj {
struct dentry *om_root; /* AGENT/<index> */
- obd_seq om_index; /* mdt index */
- cfs_list_t om_list; /* list to omm_list */
+ obd_seq om_index; /* mdt index */
+ struct list_head om_list; /* list to omm_list */
};
struct osd_mdobj_map {
struct osd_inconsistent_item {
/* link into osd_scrub::os_inconsistent_items,
* protected by osd_scrub::os_lock. */
- cfs_list_t oii_list;
+ struct list_head oii_list;
/* The right FID <=> ino#/gen mapping. */
struct osd_idmap_cache oii_cache;
unsigned long od_capa_timeout;
__u32 od_capa_alg;
struct lustre_capa_key *od_capa_keys;
- cfs_hlist_head_t *od_capa_hash;
+ struct hlist_head *od_capa_hash;
- cfs_proc_dir_entry_t *od_proc_entry;
+ struct proc_dir_entry *od_proc_entry;
struct lprocfs_stats *od_stats;
spinlock_t od_osfs_lock;
struct mutex od_otable_mutex;
struct osd_otable_it *od_otable_it;
struct osd_scrub od_scrub;
- cfs_list_t od_ios_list;
+ struct list_head od_ios_list;
/* service name associated with the osd device */
char od_svname[MAX_OBD_NAME];
struct thandle ot_super;
handle_t *ot_handle;
struct ldiskfs_journal_cb_entry ot_jcb;
- cfs_list_t ot_dcb_list;
+ struct list_head ot_dcb_list;
/* Link to the device, for debugging. */
struct lu_ref_link ot_dev_link;
unsigned short ot_credits;
};
struct osd_quota_leaf {
- cfs_list_t oql_link;
+ struct list_head oql_link;
uint oql_blk;
};
/** the record index in the leaf/index block */
uint oiq_index[LUSTRE_DQTREEDEPTH + 1];
/** list of already processed leaf blocks */
- cfs_list_t oiq_list;
+ struct list_head oiq_list;
};
#define MAX_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512)
memset(it, 0, sizeof(*it));
lu_object_get(lo);
it->oiq_obj = obj;
- CFS_INIT_LIST_HEAD(&it->oiq_list);
+ INIT_LIST_HEAD(&it->oiq_list);
/* LUSTRE_DQTREEOFF is the initial offset where the tree can be found */
it->oiq_blk[0] = LUSTRE_DQTREEOFF;
lu_object_put(env, &it->oiq_obj->oo_dt.do_lu);
- cfs_list_for_each_entry_safe(leaf, tmp, &it->oiq_list, oql_link) {
- cfs_list_del_init(&leaf->oql_link);
+ list_for_each_entry_safe(leaf, tmp, &it->oiq_list, oql_link) {
+ list_del_init(&leaf->oql_link);
OBD_FREE_PTR(leaf);
}
EXIT;
OBD_ALLOC_PTR(leaf);
if (leaf == NULL)
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&leaf->oql_link);
+ INIT_LIST_HEAD(&leaf->oql_link);
leaf->oql_blk = it->oiq_blk[depth];
- cfs_list_add_tail(&leaf->oql_link, &it->oiq_list);
+ list_add_tail(&leaf->oql_link, &it->oiq_list);
RETURN(0);
}
ENTRY;
/* check if the leaf block has been processed before */
- cfs_list_for_each_entry(leaf, &it->oiq_list, oql_link) {
+ list_for_each_entry(leaf, &it->oiq_list, oql_link) {
if (leaf->oql_blk == blk)
RETURN(1);
}
GOTO(out, rc = val);
if (scrub->os_in_prior)
- oii = cfs_list_entry(oic, struct osd_inconsistent_item,
- oii_cache);
+ oii = list_entry(oic, struct osd_inconsistent_item,
+ oii_cache);
if (lid->oii_ino < sf->sf_pos_latest_start && oii == NULL)
GOTO(out, rc = 0);
iput(inode);
if (oii != NULL) {
- LASSERT(!cfs_list_empty(&oii->oii_list));
+ LASSERT(!list_empty(&oii->oii_list));
spin_lock(&scrub->os_lock);
- cfs_list_del_init(&oii->oii_list);
+ list_del_init(&oii->oii_list);
spin_unlock(&scrub->os_lock);
OBD_FREE_PTR(oii);
}
lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val), NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
- !cfs_list_empty(&scrub->os_inconsistent_items) ||
+ !list_empty(&scrub->os_inconsistent_items) ||
!thread_is_running(thread),
&lwi);
}
if (unlikely(!thread_is_running(thread)))
return SCRUB_NEXT_EXIT;
- if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+ if (!list_empty(&scrub->os_inconsistent_items)) {
struct osd_inconsistent_item *oii;
- oii = cfs_list_entry(scrub->os_inconsistent_items.next,
- struct osd_inconsistent_item, oii_list);
+ oii = list_entry(scrub->os_inconsistent_items.next,
+ struct osd_inconsistent_item, oii_list);
*oic = &oii->oii_cache;
scrub->os_in_prior = 1;
return 0;
{
spin_lock(&scrub->os_lock);
if (osd_scrub_has_window(scrub, &it->ooi_cache) ||
- !cfs_list_empty(&scrub->os_inconsistent_items) ||
+ !list_empty(&scrub->os_inconsistent_items) ||
it->ooi_waiting || !thread_is_running(&scrub->os_thread))
scrub->os_waiting = 0;
else
rc, scrub->os_pos_current);
out:
- while (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+ while (!list_empty(&scrub->os_inconsistent_items)) {
struct osd_inconsistent_item *oii;
- oii = cfs_list_entry(scrub->os_inconsistent_items.next,
+ oii = list_entry(scrub->os_inconsistent_items.next,
struct osd_inconsistent_item, oii_list);
- cfs_list_del_init(&oii->oii_list);
+ list_del_init(&oii->oii_list);
OBD_FREE_PTR(oii);
}
lu_env_fini(&env);
};
struct osd_ios_item {
- cfs_list_t oii_list;
+ struct list_head oii_list;
struct dentry *oii_dentry;
scandir_t oii_scandir;
filldir_t oii_filldir;
if (item == NULL)
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&item->oii_list);
+ INIT_LIST_HEAD(&item->oii_list);
item->oii_dentry = dget(dentry);
item->oii_scandir = scandir;
item->oii_filldir = filldir;
- cfs_list_add_tail(&item->oii_list, &dev->od_ios_list);
+ list_add_tail(&item->oii_list, &dev->od_ios_list);
RETURN(0);
}
if (rc != 0)
break;
- if (cfs_list_empty(&dev->od_ios_list))
+ if (list_empty(&dev->od_ios_list))
break;
- item = cfs_list_entry(dev->od_ios_list.next,
- struct osd_ios_item, oii_list);
- cfs_list_del_init(&item->oii_list);
+ item = list_entry(dev->od_ios_list.next,
+ struct osd_ios_item, oii_list);
+ list_del_init(&item->oii_list);
LASSERT(item->oii_scandir != NULL);
scandir = item->oii_scandir;
dentry = item->oii_dentry;
}
- while (!cfs_list_empty(&dev->od_ios_list)) {
- item = cfs_list_entry(dev->od_ios_list.next,
- struct osd_ios_item, oii_list);
- cfs_list_del_init(&item->oii_list);
+ while (!list_empty(&dev->od_ios_list)) {
+ item = list_entry(dev->od_ios_list.next,
+ struct osd_ios_item, oii_list);
+ list_del_init(&item->oii_list);
dput(item->oii_dentry);
OBD_FREE_PTR(item);
}
init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
init_rwsem(&scrub->os_rwsem);
spin_lock_init(&scrub->os_lock);
- CFS_INIT_LIST_HEAD(&scrub->os_inconsistent_items);
+ INIT_LIST_HEAD(&scrub->os_inconsistent_items);
push_ctxt(&saved, ctxt);
filp = filp_open(osd_scrub_name, O_RDWR | O_CREAT, 0644);
if (unlikely(oii == NULL))
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&oii->oii_list);
+ INIT_LIST_HEAD(&oii->oii_list);
oii->oii_cache = *oic;
oii->oii_insert = insert;
RETURN(-EAGAIN);
}
- if (cfs_list_empty(&scrub->os_inconsistent_items))
+ if (list_empty(&scrub->os_inconsistent_items))
wakeup = 1;
- cfs_list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
+ list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
spin_unlock(&scrub->os_lock);
if (wakeup != 0)
ENTRY;
spin_lock(&scrub->os_lock);
- cfs_list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
+ list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
if (lu_fid_eq(fid, &oii->oii_cache.oic_fid)) {
*id = oii->oii_cache.oic_lid;
spin_unlock(&scrub->os_lock);
struct lvfs_run_ctxt os_ctxt;
struct ptlrpc_thread os_thread;
struct osd_idmap_cache os_oic;
- cfs_list_t os_inconsistent_items;
+ struct list_head os_inconsistent_items;
/* write lock for scrub prep/update/post/checkpoint,
* read lock for scrub dump. */
/* callback is register once per diskfs -- that's the whole point */
struct dt_txn_callback otr_tx_cb;
/* single node can run many clusters */
- cfs_list_t otr_wakeup_list;
- cfs_list_t otr_list;
+ struct list_head otr_wakeup_list;
+ struct list_head otr_list;
/* underlying shared device */
struct dt_device *otr_dev;
/* how many users of this tracker */
struct ptlrpc_thread opd_syn_thread;
wait_queue_head_t opd_syn_waitq;
/* list of remotely committed rpc */
- cfs_list_t opd_syn_committed_there;
+ struct list_head opd_syn_committed_there;
/* number of changes being under sync */
int opd_syn_sync_in_progress;
/* number of RPCs in flight - flow control */
/* last processed (taken from llog) id */
unsigned long opd_syn_last_processed_id;
struct osp_id_tracker *opd_syn_tracker;
- cfs_list_t opd_syn_ontrack;
+ struct list_head opd_syn_ontrack;
/*
* statfs related fields: OSP maintains it on its own
return 1;
/* has remotely committed? */
- if (!cfs_list_empty(&d->opd_syn_committed_there))
+ if (!list_empty(&d->opd_syn_committed_there))
return 1;
return 0;
LASSERT(d);
LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
- LASSERT(cfs_list_empty(&req->rq_exp_list));
+ LASSERT(list_empty(&req->rq_exp_list));
ptlrpc_request_addref(req);
spin_lock(&d->opd_syn_lock);
- cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+ list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
/* XXX: some batching wouldn't hurt */
* but object doesn't exist anymore - cancell llog record
*/
LASSERT(req->rq_transno == 0);
- LASSERT(cfs_list_empty(&req->rq_exp_list));
+ LASSERT(list_empty(&req->rq_exp_list));
ptlrpc_request_addref(req);
spin_lock(&d->opd_syn_lock);
- cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+ list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
wake_up(&d->opd_syn_waitq);
body->oa.o_lcookie.lgc_lgl = llh->lgh_id;
body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
body->oa.o_lcookie.lgc_index = h->lrh_index;
- CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+ INIT_LIST_HEAD(&req->rq_exp_list);
req->rq_svc_thread = (void *) OSP_JOB_MAGIC;
req->rq_interpret_reply = osp_sync_interpret;
struct ptlrpc_request *req, *tmp;
struct llog_ctxt *ctxt;
struct llog_handle *llh;
- cfs_list_t list;
+ struct list_head list;
int rc, done = 0;
ENTRY;
- if (cfs_list_empty(&d->opd_syn_committed_there))
+ if (list_empty(&d->opd_syn_committed_there))
return;
/*
llh = ctxt->loc_handle;
LASSERT(llh);
- CFS_INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&list);
spin_lock(&d->opd_syn_lock);
- cfs_list_splice(&d->opd_syn_committed_there, &list);
- CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
+ list_splice(&d->opd_syn_committed_there, &list);
+ INIT_LIST_HEAD(&d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
- cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
+ list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
struct llog_cookie *lcookie = NULL;
LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
- cfs_list_del_init(&req->rq_exp_list);
+ list_del_init(&req->rq_exp_list);
if (d->opd_connect_mdt) {
struct object_update_request *ureq;
l_wait_event(d->opd_syn_waitq,
!osp_sync_running(d) ||
osp_sync_can_process_new(d, rec) ||
- !cfs_list_empty(&d->opd_syn_committed_there),
+ !list_empty(&d->opd_syn_committed_there),
&lwi);
} while (1);
}
LASSERTF(count < 10, "%s: %d %d %sempty\n",
d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
d->opd_syn_rpc_in_flight,
- cfs_list_empty(&d->opd_syn_committed_there) ? "" :"!");
+ list_empty(&d->opd_syn_committed_there) ? "" : "!");
}
"%s: %d %d %sempty\n",
d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
d->opd_syn_rpc_in_flight,
- cfs_list_empty(&d->opd_syn_committed_there) ? "" : "!");
+ list_empty(&d->opd_syn_committed_there) ? "" : "!");
thread->t_flags = SVC_STOPPED;
spin_lock_init(&d->opd_syn_lock);
init_waitqueue_head(&d->opd_syn_waitq);
init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq);
- CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
+ INIT_LIST_HEAD(&d->opd_syn_committed_there);
task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u",
d->opd_index, d->opd_group);
}
static DEFINE_MUTEX(osp_id_tracker_sem);
-static CFS_LIST_HEAD(osp_id_tracker_list);
+static struct list_head osp_id_tracker_list =
+ LIST_HEAD_INIT(osp_id_tracker_list);
static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
{
tr->otr_committed_id, txn->oti_current_id);
tr->otr_committed_id = txn->oti_current_id;
- cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
- opd_syn_ontrack) {
+ list_for_each_entry(d, &tr->otr_wakeup_list,
+ opd_syn_ontrack) {
d->opd_syn_last_committed_id = tr->otr_committed_id;
wake_up(&d->opd_syn_waitq);
}
LASSERT(d);
LASSERT(d->opd_storage);
LASSERT(d->opd_syn_tracker == NULL);
- CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack);
+ INIT_LIST_HEAD(&d->opd_syn_ontrack);
mutex_lock(&osp_id_tracker_sem);
- cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
+ list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
if (tr->otr_dev == d->opd_storage) {
LASSERT(atomic_read(&tr->otr_refcount));
atomic_inc(&tr->otr_refcount);
tr->otr_next_id = 1;
tr->otr_committed_id = 0;
atomic_set(&tr->otr_refcount, 1);
- CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list);
- cfs_list_add(&tr->otr_list, &osp_id_tracker_list);
+ INIT_LIST_HEAD(&tr->otr_wakeup_list);
+ list_add(&tr->otr_list, &osp_id_tracker_list);
tr->otr_tx_cb.dtc_txn_commit =
osp_sync_tracker_commit_cb;
tr->otr_tx_cb.dtc_cookie = tr;
mutex_lock(&osp_id_tracker_sem);
if (atomic_dec_and_test(&tr->otr_refcount)) {
dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
- LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
- cfs_list_del(&tr->otr_list);
+ LASSERT(list_empty(&tr->otr_wakeup_list));
+ list_del(&tr->otr_list);
OBD_FREE_PTR(tr);
d->opd_syn_tracker = NULL;
}
id = tr->otr_next_id++;
if (id > d->opd_syn_last_used_id)
d->opd_syn_last_used_id = id;
- if (cfs_list_empty(&d->opd_syn_ontrack))
- cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
+ if (list_empty(&d->opd_syn_ontrack))
+ list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
spin_unlock(&tr->otr_lock);
CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
tr = d->opd_syn_tracker;
LASSERT(tr);
- if (cfs_list_empty(&d->opd_syn_ontrack))
+ if (list_empty(&d->opd_syn_ontrack))
return;
spin_lock(&tr->otr_lock);
- cfs_list_del_init(&d->opd_syn_ontrack);
+ list_del_init(&d->opd_syn_ontrack);
spin_unlock(&tr->otr_lock);
}
return cfs_hash_u64_hash(*((__u64 *)key), mask);
}
-static void *lqe64_hash_key(cfs_hlist_node_t *hnode)
+static void *lqe64_hash_key(struct hlist_node *hnode)
{
struct lquota_entry *lqe;
- lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+ lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
return &lqe->lqe_id.qid_uid;
}
-static int lqe64_hash_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int lqe64_hash_keycmp(const void *key, struct hlist_node *hnode)
{
struct lquota_entry *lqe;
- lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+ lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
return (lqe->lqe_id.qid_uid == *((__u64*)key));
}
-static void *lqe_hash_object(cfs_hlist_node_t *hnode)
+static void *lqe_hash_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+ return hlist_entry(hnode, struct lquota_entry, lqe_hash);
}
-static void lqe_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lqe_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct lquota_entry *lqe;
- lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+ lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
lqe_getref(lqe);
}
-static void lqe_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lqe_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct lquota_entry *lqe;
- lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+ lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
lqe_putref(lqe);
}
-static void lqe_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lqe_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
CERROR("Should not have any item left!\n");
}
};
static int lqe_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
struct lqe_iter_data *d = (struct lqe_iter_data *)data;
struct lquota_entry *lqe;
- lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+ lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
LASSERT(atomic_read(&lqe->lqe_ref) > 0);
/* Only one reference held by hash table, and nobody else can
atomic_set(&new->lqe_ref, 1); /* hold 1 for caller */
new->lqe_id = *qid;
new->lqe_site = site;
- CFS_INIT_LIST_HEAD(&new->lqe_link);
+ INIT_LIST_HEAD(&new->lqe_link);
/* quota settings need to be updated from disk, that's why
* lqe->lqe_uptodate isn't set yet */
* A lquota_entry structure belong to a single lquota_site */
struct lquota_entry {
/* link to site hash table */
- cfs_hlist_node_t lqe_hash;
+ struct hlist_node lqe_hash;
/* quota identifier associated with this entry */
union lquota_id lqe_id;
/* linked to list of lqes which:
* - need quota space adjustment on slave
* - need glimpse to be sent on master */
- cfs_list_t lqe_link;
+ struct list_head lqe_link;
/* current quota settings/usage of this ID */
__u64 lqe_granted; /* granted limit, inodes or kbytes */
/* set up and start rebalance thread */
thread_set_flags(&qmt->qmt_reba_thread, SVC_STOPPED);
init_waitqueue_head(&qmt->qmt_reba_thread.t_ctl_waitq);
- CFS_INIT_LIST_HEAD(&qmt->qmt_reba_list);
+ INIT_LIST_HEAD(&qmt->qmt_reba_list);
spin_lock_init(&qmt->qmt_reba_lock);
rc = qmt_start_reba_thread(qmt);
if (rc) {
cfs_hash_t *qmt_pool_hash;
/* List of pools managed by this master target */
- cfs_list_t qmt_pool_list;
+ struct list_head qmt_pool_list;
/* procfs root directory for this qmt */
cfs_proc_dir_entry_t *qmt_proc;
struct ptlrpc_thread qmt_reba_thread;
/* list of lqe entry which need space rebalancing */
- cfs_list_t qmt_reba_list;
+ struct list_head qmt_reba_list;
/* lock protecting rebalancing list */
spinlock_t qmt_reba_lock;
*/
struct qmt_pool_info {
/* link to qmt's pool hash */
- cfs_hlist_node_t qpi_hash;
+ struct hlist_node qpi_hash;
/* chained list of all pools managed by the same qmt */
- cfs_list_t qpi_linkage;
+ struct list_head qpi_linkage;
/* Pool key composed of pool_id | (pool_type << 16)
* Only pool ID 0 is supported for now and the pool type is either
struct ldlm_resource *res, union ldlm_gl_desc *desc,
qmt_glimpse_cb_t cb, void *arg)
{
- cfs_list_t *tmp, *pos;
- CFS_LIST_HEAD(gl_list);
+ struct list_head *tmp, *pos;
+ struct list_head gl_list = LIST_HEAD_INIT(gl_list);
int rc = 0;
ENTRY;
lock_res(res);
/* scan list of granted locks */
- cfs_list_for_each(pos, &res->lr_granted) {
+ list_for_each(pos, &res->lr_granted) {
struct ldlm_glimpse_work *work;
struct ldlm_lock *lock;
struct obd_uuid *uuid;
- lock = cfs_list_entry(pos, struct ldlm_lock, l_res_link);
+ lock = list_entry(pos, struct ldlm_lock, l_res_link);
LASSERT(lock->l_export);
uuid = &lock->l_export->exp_client_uuid;
continue;
}
- cfs_list_add_tail(&work->gl_list, &gl_list);
+ list_add_tail(&work->gl_list, &gl_list);
work->gl_lock = LDLM_LOCK_GET(lock);
work->gl_flags = 0;
work->gl_desc = desc;
}
unlock_res(res);
- if (cfs_list_empty(&gl_list)) {
+ if (list_empty(&gl_list)) {
CDEBUG(D_QUOTA, "%s: nobody to notify\n", qmt->qmt_svname);
RETURN(0);
}
/* issue glimpse callbacks to all connected slaves */
rc = ldlm_glimpse_locks(res, &gl_list);
- cfs_list_for_each_safe(pos, tmp, &gl_list) {
+ list_for_each_safe(pos, tmp, &gl_list) {
struct ldlm_glimpse_work *work;
- work = cfs_list_entry(pos, struct ldlm_glimpse_work, gl_list);
+ work = list_entry(pos, struct ldlm_glimpse_work, gl_list);
- cfs_list_del(&work->gl_list);
+ list_del(&work->gl_list);
CERROR("%s: failed to notify %s of new quota settings\n",
qmt->qmt_svname,
obd_uuid2str(&work->gl_lock->l_export->exp_client_uuid));
lqe_getref(lqe);
spin_lock(&qmt->qmt_reba_lock);
- if (!qmt->qmt_stopping && cfs_list_empty(&lqe->lqe_link)) {
- cfs_list_add_tail(&lqe->lqe_link, &qmt->qmt_reba_list);
+ if (!qmt->qmt_stopping && list_empty(&lqe->lqe_link)) {
+ list_add_tail(&lqe->lqe_link, &qmt->qmt_reba_list);
added = true;
}
spin_unlock(&qmt->qmt_reba_lock);
while (1) {
l_wait_event(thread->t_ctl_waitq,
- !cfs_list_empty(&qmt->qmt_reba_list) ||
+ !list_empty(&qmt->qmt_reba_list) ||
!thread_is_running(thread), &lwi);
spin_lock(&qmt->qmt_reba_lock);
- cfs_list_for_each_entry_safe(lqe, tmp, &qmt->qmt_reba_list,
- lqe_link) {
- cfs_list_del_init(&lqe->lqe_link);
+ list_for_each_entry_safe(lqe, tmp, &qmt->qmt_reba_list,
+ lqe_link) {
+ list_del_init(&lqe->lqe_link);
spin_unlock(&qmt->qmt_reba_lock);
if (thread_is_running(thread))
l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
&lwi);
}
- LASSERT(cfs_list_empty(&qmt->qmt_reba_list));
+ LASSERT(list_empty(&qmt->qmt_reba_list));
}
return cfs_hash_u32_hash(*((__u32 *)key), mask);
}
-static void *qpi_hash_key(cfs_hlist_node_t *hnode)
+static void *qpi_hash_key(struct hlist_node *hnode)
{
struct qmt_pool_info *pool;
- pool = cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+ pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
return &pool->qpi_key;
}
-static int qpi_hash_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int qpi_hash_keycmp(const void *key, struct hlist_node *hnode)
{
struct qmt_pool_info *pool;
- pool = cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+ pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
return pool->qpi_key == *((__u32 *)key);
}
-static void *qpi_hash_object(cfs_hlist_node_t *hnode)
+static void *qpi_hash_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+ return hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
}
-static void qpi_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void qpi_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct qmt_pool_info *pool;
- pool = cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+ pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
qpi_getref(pool);
}
-static void qpi_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void qpi_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct qmt_pool_info *pool;
- pool = cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+ pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
qpi_putref_locked(pool);
}
-static void qpi_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void qpi_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
CERROR("Should not have any item left!\n");
}
OBD_ALLOC_PTR(pool);
if (pool == NULL)
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&pool->qpi_linkage);
+ INIT_LIST_HEAD(&pool->qpi_linkage);
/* assign key used by hash functions */
pool->qpi_key = pool_id + (pool_type << 16);
}
/* add to qmt pool list */
- cfs_list_add_tail(&pool->qpi_linkage, &qmt->qmt_pool_list);
+ list_add_tail(&pool->qpi_linkage, &qmt->qmt_pool_list);
EXIT;
out:
if (rc)
pool->qpi_qmt = NULL;
}
- LASSERT(cfs_list_empty(&pool->qpi_linkage));
+ LASSERT(list_empty(&pool->qpi_linkage));
OBD_FREE_PTR(pool);
}
void qmt_pool_fini(const struct lu_env *env, struct qmt_device *qmt)
{
struct qmt_pool_info *pool;
- cfs_list_t *pos, *n;
+ struct list_head *pos, *n;
ENTRY;
if (qmt->qmt_pool_hash == NULL)
RETURN_EXIT;
/* parse list of pool and destroy each element */
- cfs_list_for_each_safe(pos, n, &qmt->qmt_pool_list) {
- pool = cfs_list_entry(pos, struct qmt_pool_info,
- qpi_linkage);
+ list_for_each_safe(pos, n, &qmt->qmt_pool_list) {
+ pool = list_entry(pos, struct qmt_pool_info,
+ qpi_linkage);
/* remove from hash */
cfs_hash_del(qmt->qmt_pool_hash, &pool->qpi_key,
&pool->qpi_hash);
/* remove from list */
- cfs_list_del_init(&pool->qpi_linkage);
+ list_del_init(&pool->qpi_linkage);
/* release extra reference taken in qmt_pool_alloc */
qpi_putref(env, pool);
}
- LASSERT(cfs_list_empty(&qmt->qmt_pool_list));
+ LASSERT(list_empty(&qmt->qmt_pool_list));
cfs_hash_putref(qmt->qmt_pool_hash);
qmt->qmt_pool_hash = NULL;
}
/* initialize pool list */
- CFS_INIT_LIST_HEAD(&qmt->qmt_pool_list);
+ INIT_LIST_HEAD(&qmt->qmt_pool_list);
/* Instantiate pool master for the default data and metadata pool (both
* have pool ID equals to 0).
struct qmt_pool_info *pool;
struct dt_device *dev = NULL;
dt_obj_version_t version;
- cfs_list_t *pos;
+ struct list_head *pos;
int rc = 0, qtype;
ENTRY;
/* iterate over each pool in the hash and allocate a quota site for each
* one. This involves creating a global index file on disk */
- cfs_list_for_each(pos, &qmt->qmt_pool_list) {
+ list_for_each(pos, &qmt->qmt_pool_list) {
struct dt_object *obj;
int pool_type, pool_id;
struct lquota_entry *lqe;
- pool = cfs_list_entry(pos, struct qmt_pool_info,
- qpi_linkage);
+ pool = list_entry(pos, struct qmt_pool_info,
+ qpi_linkage);
pool_id = pool->qpi_key & 0x0000ffff;
pool_type = pool->qpi_key >> 16;
#include "qsd_internal.h"
-static CFS_LIST_HEAD(qfs_list);
+static struct list_head qfs_list = LIST_HEAD_INIT(qfs_list);
/* protect the qfs_list */
static DEFINE_SPINLOCK(qfs_list_lock);
LASSERT(qfs->qfs_ref > 0);
qfs->qfs_ref--;
if (qfs->qfs_ref == 0) {
- LASSERT(cfs_list_empty(&qfs->qfs_qsd_list));
- cfs_list_del(&qfs->qfs_link);
+ LASSERT(list_empty(&qfs->qfs_qsd_list));
+ list_del(&qfs->qfs_link);
OBD_FREE_PTR(qfs);
}
spin_unlock(&qfs_list_lock);
RETURN(NULL);
mutex_init(&new->qfs_mutex);
- CFS_INIT_LIST_HEAD(&new->qfs_qsd_list);
+ INIT_LIST_HEAD(&new->qfs_qsd_list);
strcpy(new->qfs_name, name);
new->qfs_ref = 1;
}
/* search in the fsinfo list */
spin_lock(&qfs_list_lock);
- cfs_list_for_each_entry(qfs, &qfs_list, qfs_link) {
+ list_for_each_entry(qfs, &qfs_list, qfs_link) {
if (!strcmp(qfs->qfs_name, name)) {
qfs->qfs_ref++;
goto out;
if (new) {
/* not found, but we were asked to create a new one */
- cfs_list_add_tail(&new->qfs_link, &qfs_list);
+ list_add_tail(&new->qfs_link, &qfs_list);
qfs = new;
new = NULL;
}
struct qsd_instance *qsd;
struct qsd_qtype_info *qqi;
- cfs_list_for_each_entry(qsd, &qfs->qfs_qsd_list, qsd_link) {
+ list_for_each_entry(qsd, &qfs->qfs_qsd_list, qsd_link) {
bool skip = false;
int type;
struct qsd_fsinfo *qsd_fsinfo;
/* link into qfs_qsd_list of qfs_fsinfo */
- cfs_list_t qsd_link;
+ struct list_head qsd_link;
/* list of lqe entry which might need quota space adjustment */
- cfs_list_t qsd_adjust_list;
+ struct list_head qsd_adjust_list;
/* lock protecting adjust list */
spinlock_t qsd_adjust_lock;
struct ptlrpc_thread qsd_upd_thread;
/* list of update tasks */
- cfs_list_t qsd_upd_list;
+ struct list_head qsd_upd_list;
/* r/w spinlock protecting:
* - the state flags
struct lprocfs_stats *qqi_stats;
/* deferred update for the global index copy */
- cfs_list_t qqi_deferred_glb;
+ struct list_head qqi_deferred_glb;
/* deferred update for the slave index copy */
- cfs_list_t qqi_deferred_slv;
+ struct list_head qqi_deferred_slv;
/* Various flags representing the current state of the slave for this
* quota type. */
unsigned int qfs_enabled[LQUOTA_NR_RES];
/* list of all qsd_instance for this fs */
- cfs_list_t qfs_qsd_list;
+ struct list_head qfs_qsd_list;
struct mutex qfs_mutex;
/* link to the global quota fsinfo list. */
- cfs_list_t qfs_link;
+ struct list_head qfs_link;
/* reference count */
int qfs_ref;
/* udpate record for slave & global index copy */
struct qsd_upd_rec {
- cfs_list_t qur_link; /* link into qsd_upd_list */
+ struct list_head qur_link; /* link into qsd_upd_list */
union lquota_id qur_qid;
union lquota_rec qur_rec;
struct qsd_qtype_info *qur_qqi;
qsd->qsd_type_array[qtype] = NULL;
/* all deferred work lists should be empty */
- LASSERT(cfs_list_empty(&qqi->qqi_deferred_glb));
- LASSERT(cfs_list_empty(&qqi->qqi_deferred_slv));
+ LASSERT(list_empty(&qqi->qqi_deferred_glb));
+ LASSERT(list_empty(&qqi->qqi_deferred_slv));
/* shutdown lquota site */
if (qqi->qqi_site != NULL && !IS_ERR(qqi->qqi_site)) {
qqi->qqi_reint = false;
init_waitqueue_head(&qqi->qqi_reint_thread.t_ctl_waitq);
thread_set_flags(&qqi->qqi_reint_thread, SVC_STOPPED);
- CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_glb);
- CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_slv);
+ INIT_LIST_HEAD(&qqi->qqi_deferred_glb);
+ INIT_LIST_HEAD(&qqi->qqi_deferred_slv);
/* open accounting object */
LASSERT(qqi->qqi_acct_obj == NULL);
if (qsd->qsd_fsinfo != NULL) {
mutex_lock(&qsd->qsd_fsinfo->qfs_mutex);
/* remove from the list of fsinfo */
- cfs_list_del_init(&qsd->qsd_link);
+ list_del_init(&qsd->qsd_link);
mutex_unlock(&qsd->qsd_fsinfo->qfs_mutex);
qsd_put_fsinfo(qsd->qsd_fsinfo);
qsd->qsd_fsinfo = NULL;
/* generic initializations */
rwlock_init(&qsd->qsd_lock);
- CFS_INIT_LIST_HEAD(&qsd->qsd_link);
+ INIT_LIST_HEAD(&qsd->qsd_link);
thread_set_flags(&qsd->qsd_upd_thread, SVC_STOPPED);
init_waitqueue_head(&qsd->qsd_upd_thread.t_ctl_waitq);
- CFS_INIT_LIST_HEAD(&qsd->qsd_upd_list);
+ INIT_LIST_HEAD(&qsd->qsd_upd_list);
spin_lock_init(&qsd->qsd_adjust_lock);
- CFS_INIT_LIST_HEAD(&qsd->qsd_adjust_list);
+ INIT_LIST_HEAD(&qsd->qsd_adjust_list);
qsd->qsd_prepared = false;
qsd->qsd_started = false;
}
static int qsd_entry_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
struct lquota_entry *lqe;
int *pending = (int *)data;
- lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+ lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
LASSERT(atomic_read(&lqe->lqe_ref) > 0);
lqe_read_lock(lqe);
/* any pending quota adjust? */
spin_lock(&qsd->qsd_adjust_lock);
- cfs_list_for_each_entry_safe(lqe, n, &qsd->qsd_adjust_list, lqe_link) {
+ list_for_each_entry_safe(lqe, n, &qsd->qsd_adjust_list, lqe_link) {
if (lqe2qqi(lqe) == qqi) {
- cfs_list_del_init(&lqe->lqe_link);
+ list_del_init(&lqe->lqe_link);
lqe_putref(lqe);
}
}
/* any pending updates? */
read_lock(&qsd->qsd_lock);
- cfs_list_for_each_entry(upd, &qsd->qsd_upd_list, qur_link) {
+ list_for_each_entry(upd, &qsd->qsd_upd_list, qur_link) {
if (upd->qur_qqi == qqi) {
read_unlock(&qsd->qsd_lock);
CDEBUG(D_QUOTA, "%s: pending %s updates for type:%d.\n",
}
/* fill it */
- CFS_INIT_LIST_HEAD(&upd->qur_link);
+ INIT_LIST_HEAD(&upd->qur_link);
upd->qur_qqi = qqi;
upd->qur_lqe = lqe;
if (lqe)
}
/* must hold the qsd_lock */
-static void qsd_add_deferred(struct qsd_instance *qsd, cfs_list_t *list,
+static void qsd_add_deferred(struct qsd_instance *qsd, struct list_head *list,
struct qsd_upd_rec *upd)
{
struct qsd_upd_rec *tmp, *n;
}
/* Sort the updates in ascending order */
- cfs_list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
+ list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
/* There could be some legacy records which have duplicated
* version. Imagine following scenario: slave received global
LASSERT(tmp->qur_lqe);
LQUOTA_ERROR(tmp->qur_lqe, "Found a conflict record "
"with ver:"LPU64"", tmp->qur_ver);
- cfs_list_del_init(&tmp->qur_link);
+ list_del_init(&tmp->qur_link);
qsd_upd_free(tmp);
} else if (upd->qur_ver < tmp->qur_ver) {
continue;
} else {
- cfs_list_add_tail(&upd->qur_link, &tmp->qur_link);
+ list_add_tail(&upd->qur_link, &tmp->qur_link);
return;
}
}
- cfs_list_add(&upd->qur_link, list);
+ list_add(&upd->qur_link, list);
}
/* must hold the qsd_lock */
-static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
- __u64 ver)
+static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi,
+ struct list_head *list, __u64 ver)
{
struct qsd_upd_rec *upd, *tmp;
ENTRY;
/* Get the first update record in the list, which has the smallest
* version, discard all records with versions smaller than the current
* one */
- cfs_list_for_each_entry_safe(upd, tmp, list, qur_link) {
+ list_for_each_entry_safe(upd, tmp, list, qur_link) {
if (upd->qur_ver <= ver) {
/* drop this update */
- cfs_list_del_init(&upd->qur_link);
+ list_del_init(&upd->qur_link);
CDEBUG(D_QUOTA, "%s: skipping deferred update ver:"
LPU64"/"LPU64", global:%d, qid:"LPU64"\n",
qqi->qqi_qsd->qsd_svname, upd->qur_ver, ver,
}
/* No remaining deferred update */
- if (cfs_list_empty(list))
+ if (list_empty(list))
RETURN_EXIT;
CDEBUG(D_QUOTA, "%s: found deferred update record. "
*/
void qsd_bump_version(struct qsd_qtype_info *qqi, __u64 ver, bool global)
{
- cfs_list_t *list;
- __u64 *idx_ver;
+ struct list_head *list;
+ __u64 *idx_ver;
idx_ver = global ? &qqi->qqi_glb_ver : &qqi->qqi_slv_ver;
list = global ? &qqi->qqi_deferred_glb : &qqi->qqi_deferred_slv;
/* Out of order update (the one with smaller version hasn't
* reached slave or hasn't been flushed to disk yet), or
* the reintegration is in progress. Defer the update. */
- cfs_list_t *list = global ? &qqi->qqi_deferred_glb :
- &qqi->qqi_deferred_slv;
+ struct list_head *list = global ? &qqi->qqi_deferred_glb :
+ &qqi->qqi_deferred_slv;
qsd_add_deferred(qsd, list, upd);
}
/* the lqe is being queued for the per-ID lock cancel, we should
* cancel the lock cancel and re-add it for quota adjust */
- if (!cfs_list_empty(&lqe->lqe_link) &&
+ if (!list_empty(&lqe->lqe_link) &&
lqe->lqe_adjust_time == 0) {
- cfs_list_del_init(&lqe->lqe_link);
+ list_del_init(&lqe->lqe_link);
lqe_putref(lqe);
}
- if (cfs_list_empty(&lqe->lqe_link)) {
+ if (list_empty(&lqe->lqe_link)) {
if (cancel)
lqe->lqe_adjust_time = 0;
else
cfs_time_current_64();
/* lqe reference transfered to list */
if (defer)
- cfs_list_add_tail(&lqe->lqe_link,
+ list_add_tail(&lqe->lqe_link,
&qsd->qsd_adjust_list);
else
- cfs_list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
+ list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
added = true;
}
spin_unlock(&qsd->qsd_adjust_lock);
/* return true if there is pending writeback records or the pending
* adjust requests */
-static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
+static bool qsd_job_pending(struct qsd_instance *qsd, struct list_head *upd,
bool *uptodate)
{
bool job_pending = false;
int qtype;
- LASSERT(cfs_list_empty(upd));
+ LASSERT(list_empty(upd));
*uptodate = true;
spin_lock(&qsd->qsd_adjust_lock);
- if (!cfs_list_empty(&qsd->qsd_adjust_list)) {
+ if (!list_empty(&qsd->qsd_adjust_list)) {
struct lquota_entry *lqe;
- lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
+ lqe = list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
if (cfs_time_beforeq_64(lqe->lqe_adjust_time,
cfs_time_current_64()))
spin_unlock(&qsd->qsd_adjust_lock);
write_lock(&qsd->qsd_lock);
- if (!cfs_list_empty(&qsd->qsd_upd_list)) {
- cfs_list_splice_init(&qsd->qsd_upd_list, upd);
+ if (!list_empty(&qsd->qsd_upd_list)) {
+ list_splice_init(&qsd->qsd_upd_list, upd);
job_pending = true;
}
struct qsd_instance *qsd = (struct qsd_instance *)arg;
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
struct l_wait_info lwi;
- cfs_list_t queue;
+ struct list_head queue;
struct qsd_upd_rec *upd, *n;
struct lu_env *env;
int qtype, rc = 0;
thread_set_flags(thread, SVC_RUNNING);
wake_up(&thread->t_ctl_waitq);
- CFS_INIT_LIST_HEAD(&queue);
+ INIT_LIST_HEAD(&queue);
lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
while (1) {
l_wait_event(thread->t_ctl_waitq,
qsd_job_pending(qsd, &queue, &uptodate) ||
!thread_is_running(thread), &lwi);
- cfs_list_for_each_entry_safe(upd, n, &queue, qur_link) {
- cfs_list_del_init(&upd->qur_link);
+ list_for_each_entry_safe(upd, n, &queue, qur_link) {
+ list_del_init(&upd->qur_link);
qsd_process_upd(env, upd);
qsd_upd_free(upd);
}
spin_lock(&qsd->qsd_adjust_lock);
cur_time = cfs_time_current_64();
- cfs_list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list,
- lqe_link) {
+ list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list,
+ lqe_link) {
/* deferred items are sorted by time */
if (!cfs_time_beforeq_64(lqe->lqe_adjust_time,
cur_time))
break;
- cfs_list_del_init(&lqe->lqe_link);
+ list_del_init(&lqe->lqe_link);
spin_unlock(&qsd->qsd_adjust_lock);
if (thread_is_running(thread) && uptodate) {
continue;
write_lock(&qsd->qsd_lock);
- cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
- qur_link) {
+ list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
+ qur_link) {
CWARN("%s: Free global deferred upd: ID:"LPU64", "
"ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
upd->qur_qid.qid_uid, upd->qur_ver,
list_del_init(&upd->qur_link);
qsd_upd_free(upd);
}
- cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
- qur_link) {
+ list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
+ qur_link) {
CWARN("%s: Free slave deferred upd: ID:"LPU64", "
"ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
upd->qur_qid.qid_uid, upd->qur_ver,
struct lquota_entry *lqe;
spin_lock(&qsd->qsd_adjust_lock);
- while (!cfs_list_empty(&qsd->qsd_adjust_list)) {
- lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
- struct lquota_entry, lqe_link);
- cfs_list_del_init(&lqe->lqe_link);
+ while (!list_empty(&qsd->qsd_adjust_list)) {
+ lqe = list_entry(qsd->qsd_adjust_list.next,
+ struct lquota_entry, lqe_link);
+ list_del_init(&lqe->lqe_link);
lqe_putref(lqe);
}
spin_unlock(&qsd->qsd_adjust_lock);
static struct it_node {
struct interval_node node;
- cfs_list_t list;
+ struct list_head list;
int hit, valid;
} *it_array;
static int it_count;
/* list */
contended_count = 0;
gettimeofday(&start, NULL);
- cfs_list_for_each_entry(n, &header, list) {
+ list_for_each_entry(n, &header, list) {
if (extent_overlapped(&ext, &n->node.in_extent)) {
count = LOOP_COUNT;
while (count--);
__F(&n->node.in_extent));
interval_erase(&n->node, &root);
n->valid = 0;
- cfs_list_del_init(&n->list);
+ list_del_init(&n->list);
} else {
__u64 low, high;
low = (random() % max_count) & ALIGN_MASK;
dprintf("Adding a node "__S"\n",
__F(&n->node.in_extent));
n->valid = 1;
- cfs_list_add(&n->list, &header);
+ list_add(&n->list, &header);
}
}
n->hit = 0;
n->valid = 1;
if (i == 0)
- cfs_list_add_tail(&n->list, &header);
+ list_add_tail(&n->list, &header);
else
- cfs_list_add_tail(&n->list, &it_array[rand()%i].list);
+ list_add_tail(&n->list, &it_array[rand()%i].list);
}
return root;
bool dry_run;
struct obd_group_info {
- __u64 grp_last_id;
- __u64 grp_seq;
- cfs_list_t grp_list;
+ __u64 grp_last_id;
+ __u64 grp_seq;
+ struct list_head grp_list;
};
-cfs_list_t grp_info_list;
+struct list_head grp_info_list;
-static void grp_info_list_destroy(cfs_list_t *list)
+static void grp_info_list_destroy(struct list_head *list)
{
struct obd_group_info *grp, *tmp;
- cfs_list_for_each_entry_safe(grp, tmp, list, grp_list) {
- cfs_list_del_init(&grp->grp_list);
+ list_for_each_entry_safe(grp, tmp, list, grp_list) {
+ list_del_init(&grp->grp_list);
free(grp);
}
}
return le64_to_cpu(last_id);
}
-struct obd_group_info *find_or_create_grp(cfs_list_t *list, __u64 seq,
+struct obd_group_info *find_or_create_grp(struct list_head *list, __u64 seq,
const char *mount)
{
struct obd_group_info *grp;
- cfs_list_t *entry;
+ struct list_head *entry;
char tmp_path[PATH_MAX];
char seq_name[32];
int retval;
__u64 tmp_last_id;
- cfs_list_for_each(entry, list) {
- grp = (struct obd_group_info *)cfs_list_entry(entry,
+ list_for_each(entry, list) {
+ grp = (struct obd_group_info *)list_entry(entry,
struct obd_group_info,
grp_list);
if (grp->grp_seq == seq)
grp->grp_last_id = tmp_last_id;
grp->grp_seq = seq;
- cfs_list_add(&grp->grp_list, list);
+ list_add(&grp->grp_list, list);
return grp;
}