struct ost_pool pool_obds; /* pool members */
atomic_t pool_refcount;
struct lod_qos_rr pool_rr;
- cfs_hlist_node_t pool_hash; /* access by poolname */
+ struct hlist_node pool_hash; /* access by poolname */
struct list_head pool_list;
struct proc_dir_entry *pool_proc_entry;
struct obd_device *pool_lobd; /* owner */
struct lod_qos lod_qos; /* qos info per lod */
/* OST pool data */
- struct ost_pool lod_pool_info; /* all OSTs in a packed array */
- int lod_pool_count;
- cfs_hash_t *lod_pools_hash_body; /* used for key access */
- cfs_list_t lod_pool_list; /* used for sequential access */
- cfs_proc_dir_entry_t *lod_pool_proc_entry;
+ struct ost_pool lod_pool_info; /* all OSTs in a packed array */
+ int lod_pool_count;
+ cfs_hash_t *lod_pools_hash_body; /* used for key access */
+ struct list_head lod_pool_list; /* used for sequential access */
+ struct proc_dir_entry *lod_pool_proc_entry;
enum lustre_sec_part lod_sp_me;
- cfs_proc_dir_entry_t *lod_symlink;
+ struct proc_dir_entry *lod_symlink;
};
#define lod_osts lod_ost_descs.ltd_tgts
ltd->ltd_refcount--;
if (ltd->ltd_refcount == 0 && ltd->ltd_death_row) {
struct lod_tgt_desc *tgt_desc, *tmp;
+ struct list_head kill;
unsigned int idx;
- CFS_LIST_HEAD(kill);
CDEBUG(D_CONFIG, "destroying %d ltd desc\n",
ltd->ltd_death_row);
+ INIT_LIST_HEAD(&kill);
+
cfs_foreach_bit(ltd->ltd_tgt_bitmap, idx) {
tgt_desc = LTD_TGT(ltd, idx);
LASSERT(tgt_desc);
if (!tgt_desc->ltd_reap)
continue;
- cfs_list_add(&tgt_desc->ltd_kill, &kill);
+ list_add(&tgt_desc->ltd_kill, &kill);
LTD_TGT(ltd, idx) = NULL;
/*FIXME: only support ost pool for now */
if (ltd == &lod->lod_ost_descs) {
mutex_unlock(<d->ltd_mutex);
up_read(<d->ltd_rw_sem);
- cfs_list_for_each_entry_safe(tgt_desc, tmp, &kill, ltd_kill) {
+ list_for_each_entry_safe(tgt_desc, tmp, &kill, ltd_kill) {
int rc;
- cfs_list_del(&tgt_desc->ltd_kill);
+ list_del(&tgt_desc->ltd_kill);
if (ltd == &lod->lod_ost_descs) {
/* remove from QoS structures */
rc = qos_del_tgt(lod, tgt_desc);
lod->lod_sp_me = LUSTRE_SP_CLI;
/* Set up allocation policy (QoS and RR) */
- CFS_INIT_LIST_HEAD(&lod->lod_qos.lq_oss_list);
+ INIT_LIST_HEAD(&lod->lod_qos.lq_oss_list);
init_rwsem(&lod->lod_qos.lq_rw_sem);
lod->lod_qos.lq_dirty = 1;
lod->lod_qos.lq_rr.lqr_dirty = 1;
if (lod->lod_pools_hash_body == NULL)
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&lod->lod_pool_list);
+ INIT_LIST_HEAD(&lod->lod_pool_list);
lod->lod_pool_count = 0;
rc = lod_ost_pool_init(&lod->lod_pool_info, 0);
if (rc)
int lod_pools_fini(struct lod_device *lod)
{
struct obd_device *obd = lod2obd(lod);
- cfs_list_t *pos, *tmp;
- struct pool_desc *pool;
+ struct pool_desc *pool, *tmp;
ENTRY;
- cfs_list_for_each_safe(pos, tmp, &lod->lod_pool_list) {
- pool = cfs_list_entry(pos, struct pool_desc, pool_list);
+ list_for_each_entry_safe(pool, tmp, &lod->lod_pool_list, pool_list) {
/* free pool structs */
CDEBUG(D_INFO, "delete pool %p\n", pool);
/* In the function below, .hs_keycmp resolves to
{
CDEBUG(D_INFO, "pool %p\n", pool);
if (atomic_dec_and_test(&pool->pool_refcount)) {
- LASSERT(cfs_hlist_unhashed(&pool->pool_hash));
- LASSERT(cfs_list_empty(&pool->pool_list));
+ LASSERT(hlist_unhashed(&pool->pool_hash));
+ LASSERT(list_empty(&pool->pool_list));
LASSERT(pool->pool_proc_entry == NULL);
lod_ost_pool_free(&(pool->pool_rr.lqr_pool));
lod_ost_pool_free(&(pool->pool_obds));
*
* \retval char array referencing the pool name (no refcount)
*/
-static void *pool_key(cfs_hlist_node_t *hnode)
+static void *pool_key(struct hlist_node *hnode)
{
struct pool_desc *pool;
- pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+ pool = hlist_entry(hnode, struct pool_desc, pool_hash);
return pool->pool_name;
}
* \retval 0 if \a key is the same as the key of \a compared
* \retval 1 if \a key is different from the key of \a compared
*/
-static int pool_hashkey_keycmp(const void *key, cfs_hlist_node_t *compared)
+static int pool_hashkey_keycmp(const void *key, struct hlist_node *compared)
{
return !strncmp(key, pool_key(compared), LOV_MAXPOOLNAME);
}
*
* \retval struct pool_desc for the specified \a hnode
*/
-static void *pool_hashobject(cfs_hlist_node_t *hnode)
+static void *pool_hashobject(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+ return hlist_entry(hnode, struct pool_desc, pool_hash);
}
-static void pool_hashrefcount_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
struct pool_desc *pool;
- pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+ pool = hlist_entry(hnode, struct pool_desc, pool_hash);
pool_getref(pool);
}
static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
struct pool_desc *pool;
- pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+ pool = hlist_entry(hnode, struct pool_desc, pool_hash);
pool_putref_locked(pool);
}
#endif
spin_lock(&obd->obd_dev_lock);
- cfs_list_add_tail(&new_pool->pool_list, &lod->lod_pool_list);
+ list_add_tail(&new_pool->pool_list, &lod->lod_pool_list);
lod->lod_pool_count++;
spin_unlock(&obd->obd_dev_lock);
out_err:
spin_lock(&obd->obd_dev_lock);
- cfs_list_del_init(&new_pool->pool_list);
+ list_del_init(&new_pool->pool_list);
lod->lod_pool_count--;
spin_unlock(&obd->obd_dev_lock);
}
spin_lock(&obd->obd_dev_lock);
- cfs_list_del_init(&pool->pool_list);
+ list_del_init(&pool->pool_list);
lod->lod_pool_count--;
spin_unlock(&obd->obd_dev_lock);
struct lod_qos_oss *oss = NULL, *temposs;
struct obd_export *exp = ost_desc->ltd_exp;
int rc = 0, found = 0;
- cfs_list_t *list;
+ struct list_head *list;
ENTRY;
down_write(&lod->lod_qos.lq_rw_sem);
* but there is no official API to access information like this
* with OSD API.
*/
- cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
+ list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
if (obd_uuid_equals(&oss->lqo_uuid,
&exp->exp_connection->c_remote_uuid)) {
found++;
sizeof(oss->lqo_uuid));
} else {
/* Assume we have to move this one */
- cfs_list_del(&oss->lqo_oss_list);
+ list_del(&oss->lqo_oss_list);
}
oss->lqo_ost_count++;
/* Add sorted by # of OSTs. Find the first entry that we're
bigger than... */
list = &lod->lod_qos.lq_oss_list;
- cfs_list_for_each_entry(temposs, list, lqo_oss_list) {
+ list_for_each_entry(temposs, list, lqo_oss_list) {
if (oss->lqo_ost_count > temposs->lqo_ost_count)
break;
}
/* ...and add before it. If we're the first or smallest, temposs
points to the list head, and we add to the end. */
- cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
+ list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
lod->lod_qos.lq_dirty = 1;
lod->lod_qos.lq_rr.lqr_dirty = 1;
if (oss->lqo_ost_count == 0) {
CDEBUG(D_QOS, "removing OSS %s\n",
obd_uuid2str(&oss->lqo_uuid));
- cfs_list_del(&oss->lqo_oss_list);
+ list_del(&oss->lqo_oss_list);
ost_desc->ltd_qos.ltq_oss = NULL;
OBD_FREE_PTR(oss);
}
GOTO(out, rc = -EAGAIN);
/* find bavail on each OSS */
- cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list)
+ list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list)
oss->lqo_bavail = 0;
lod->lod_qos.lq_active_oss_count = 0;
}
/* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
- cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
+ list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
temp = oss->lqo_bavail >> 1;
do_div(temp, oss->lqo_ost_count * num_active);
oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
lod->lod_qos.lq_active_oss_count;
/* Decrease all OSS penalties */
- cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
+ list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
oss->lqo_penalty = 0;
else
/* Place all the OSTs from 1 OSS at the same time. */
placed = 0;
- cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
+ list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) {
int j = 0;
for (i = 0; i < lqr->lqr_pool.op_count; i++) {
init_rwsem(&cdt->cdt_request_lock);
mutex_init(&cdt->cdt_restore_lock);
- CFS_INIT_LIST_HEAD(&cdt->cdt_requests);
- CFS_INIT_LIST_HEAD(&cdt->cdt_agents);
- CFS_INIT_LIST_HEAD(&cdt->cdt_restore_hdl);
+ INIT_LIST_HEAD(&cdt->cdt_requests);
+ INIT_LIST_HEAD(&cdt->cdt_agents);
+ INIT_LIST_HEAD(&cdt->cdt_restore_hdl);
rc = lu_env_init(&cdt->cdt_env, LCT_MD_THREAD);
if (rc < 0)
static int mdt_export_cleanup(struct obd_export *exp)
{
- struct mdt_export_data *med = &exp->exp_mdt_data;
- struct obd_device *obd = exp->exp_obd;
- struct mdt_device *mdt;
- struct mdt_thread_info *info;
- struct lu_env env;
- CFS_LIST_HEAD(closing_list);
- struct mdt_file_data *mfd, *n;
- int rc = 0;
- ENTRY;
+ struct list_head closing_list;
+ struct mdt_export_data *med = &exp->exp_mdt_data;
+ struct obd_device *obd = exp->exp_obd;
+ struct mdt_device *mdt;
+ struct mdt_thread_info *info;
+ struct lu_env env;
+ struct mdt_file_data *mfd, *n;
+ int rc = 0;
+ ENTRY;
+ INIT_LIST_HEAD(&closing_list);
spin_lock(&med->med_open_lock);
- while (!cfs_list_empty(&med->med_open_head)) {
- cfs_list_t *tmp = med->med_open_head.next;
- mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list);
+ while (!list_empty(&med->med_open_head)) {
+ struct list_head *tmp = med->med_open_head.next;
+ mfd = list_entry(tmp, struct mdt_file_data, mfd_list);
/* Remove mfd handle so it can't be found again.
* We are consuming the mfd_list reference here. */
class_handle_unhash(&mfd->mfd_handle);
- cfs_list_move_tail(&mfd->mfd_list, &closing_list);
+ list_move_tail(&mfd->mfd_list, &closing_list);
}
spin_unlock(&med->med_open_lock);
mdt = mdt_dev(obd->obd_lu_dev);
info->mti_mdt = mdt;
info->mti_exp = exp;
- if (!cfs_list_empty(&closing_list)) {
- struct md_attr *ma = &info->mti_attr;
+ if (!list_empty(&closing_list)) {
+ struct md_attr *ma = &info->mti_attr;
- /* Close any open files (which may also cause orphan unlinking). */
- cfs_list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
- cfs_list_del_init(&mfd->mfd_list);
+ /* Close any open files (which may also cause orphan
+ * unlinking). */
+ list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
+ list_del_init(&mfd->mfd_list);
ma->ma_need = ma->ma_valid = 0;
/* This file is being closed due to an eviction, it
/* FIXME: Can we avoid using these two interfaces? */
static int mdt_init_export(struct obd_export *exp)
{
- struct mdt_export_data *med = &exp->exp_mdt_data;
- int rc;
- ENTRY;
+ struct mdt_export_data *med = &exp->exp_mdt_data;
+ int rc;
+ ENTRY;
- CFS_INIT_LIST_HEAD(&med->med_open_head);
+ INIT_LIST_HEAD(&med->med_open_head);
spin_lock_init(&med->med_open_lock);
mutex_init(&med->med_idmap_mutex);
med->med_idmap = NULL;
&exp->exp_client_uuid)))
RETURN(0);
- ldlm_destroy_export(exp);
- tgt_client_free(exp);
+ ldlm_destroy_export(exp);
+ tgt_client_free(exp);
- LASSERT(cfs_list_empty(&exp->exp_outstanding_replies));
- LASSERT(cfs_list_empty(&exp->exp_mdt_data.med_open_head));
+ LASSERT(list_empty(&exp->exp_outstanding_replies));
+ LASSERT(list_empty(&exp->exp_mdt_data.med_open_head));
- RETURN(0);
+ RETURN(0);
}
/** The maximum depth that fid2path() will search.
/* file data for open files on MDS */
struct mdt_file_data {
- struct portals_handle mfd_handle; /* must be first */
- __u64 mfd_mode; /* open mode provided by client */
- cfs_list_t mfd_list; /* protected by med_open_lock */
- __u64 mfd_xid; /* xid of the open request */
- struct lustre_handle mfd_old_handle; /* old handle in replay case */
- struct mdt_object *mfd_object; /* point to opened object */
+ /** portals handle must be first */
+ struct portals_handle mfd_handle;
+ /** open mode provided by client */
+ __u64 mfd_mode;
+ /** protected by med_open_lock */
+ struct list_head mfd_list;
+ /** xid of the open request */
+ __u64 mfd_xid;
+ /** old handle in replay case */
+ struct lustre_handle mfd_old_handle;
+ /** point to opened object */
+ struct mdt_object *mfd_object;
};
#define CDT_NONBLOCKING_RESTORE (1ULL << 0)
};
struct cdt_agent_req {
- cfs_list_t car_request_list; /**< to chain all the req. */
+ struct list_head car_request_list; /**< to chain all the req. */
atomic_t car_refcount; /**< reference counter */
__u64 car_compound_id; /**< compound id */
__u64 car_flags; /**< request original flags */
extern struct kmem_cache *mdt_hsm_car_kmem;
struct hsm_agent {
- cfs_list_t ha_list; /**< to chain the agents */
+ struct list_head ha_list; /**< to chain the agents */
struct obd_uuid ha_uuid; /**< agent uuid */
__u32 *ha_archive_id; /**< archive id */
int ha_archive_cnt; /**< number of archive entries
};
struct cdt_restore_handle {
- cfs_list_t crh_list; /**< to chain the handle */
- struct lu_fid crh_fid; /**< fid of the object */
- struct ldlm_extent crh_extent; /**< extent of the restore */
- struct mdt_lock_handle crh_lh; /**< lock handle */
+ struct list_head crh_list; /**< to chain the handle */
+ struct lu_fid crh_fid; /**< fid of the object */
+ struct ldlm_extent crh_extent; /**< extent of the restore */
+ struct mdt_lock_handle crh_lh; /**< lock handle */
};
extern struct kmem_cache *mdt_hsm_cdt_kmem; /** restore handle slab cache */
}
static int match_nosquash_list(struct rw_semaphore *sem,
- cfs_list_t *nidlist,
+ struct list_head *nidlist,
lnet_nid_t peernid)
{
int rc;
return count;
}
- /* 1 stands for self export. */
- cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
- if (exp == obd->obd_self_export)
- continue;
+ /* 1 stands for self export. */
+ list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ if (exp == obd->obd_self_export)
+ continue;
if (exp_connect_flags(exp) & OBD_CONNECT_MDS_MDS)
continue;
/* Some clients are already connected, skip the change */
};
int lprocfs_mdt_print_open_files(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *v)
+ struct hlist_node *hnode, void *v)
{
struct obd_export *exp = cfs_hash_object(hs, hnode);
struct seq_file *seq = v;
struct mdt_file_data *mfd;
spin_lock(&med->med_open_lock);
- cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
+ list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
seq_printf(seq, DFID"\n",
PFID(mdt_object_fid(mfd->mfd_object)));
}
OBD_ALLOC_PTR(mfd);
if (mfd != NULL) {
- CFS_INIT_LIST_HEAD(&mfd->mfd_handle.h_link);
+ INIT_LIST_HEAD(&mfd->mfd_handle.h_link);
mfd->mfd_handle.h_owner = med;
- CFS_INIT_LIST_HEAD(&mfd->mfd_list);
+ INIT_LIST_HEAD(&mfd->mfd_list);
class_handle_hash(&mfd->mfd_handle, &mfd_handle_ops);
}
mfd = class_handle2object(handle->cookie, med);
/* during dw/setattr replay the mfd can be found by old handle */
if (mfd == NULL && is_replay_or_resent) {
- cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
+ list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
if (mfd->mfd_old_handle.cookie == handle->cookie)
RETURN(mfd);
}
/* free mfd */
void mdt_mfd_free(struct mdt_file_data *mfd)
{
- LASSERT(cfs_list_empty(&mfd->mfd_list));
- OBD_FREE_RCU(mfd, sizeof *mfd, &mfd->mfd_handle);
+ LASSERT(list_empty(&mfd->mfd_list));
+ OBD_FREE_RCU(mfd, sizeof *mfd, &mfd->mfd_handle);
}
static int mdt_create_data(struct mdt_thread_info *info,
PFID(mdt_object_fid(mfd->mfd_object)),
info->mti_rr.rr_handle->cookie);
class_handle_unhash(&old_mfd->mfd_handle);
- cfs_list_del_init(&old_mfd->mfd_list);
+ list_del_init(&old_mfd->mfd_list);
spin_unlock(&med->med_open_lock);
/* no attr update for that close */
la->la_valid = 0;
if (req->rq_export->exp_disconnected) {
spin_lock(&med->med_open_lock);
class_handle_unhash(&mfd->mfd_handle);
- cfs_list_del_init(&mfd->mfd_list);
+ list_del_init(&mfd->mfd_list);
spin_unlock(&med->med_open_lock);
mdt_mfd_close(info, mfd);
} else {
spin_lock(&med->med_open_lock);
- cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+ list_add(&mfd->mfd_list, &med->med_open_head);
spin_unlock(&med->med_open_lock);
}
struct mdt_object *p, struct mdt_object *o,
__u64 flags, int created, struct ldlm_reply *rep)
{
- struct ptlrpc_request *req = mdt_info_req(info);
- struct obd_export *exp = req->rq_export;
- struct mdt_export_data *med = &req->rq_export->exp_mdt_data;
- struct md_attr *ma = &info->mti_attr;
- struct lu_attr *la = &ma->ma_attr;
- struct mdt_file_data *mfd;
- struct mdt_body *repbody;
- int rc = 0;
- int isreg, isdir, islnk;
- cfs_list_t *t;
- ENTRY;
+ struct ptlrpc_request *req = mdt_info_req(info);
+ struct obd_export *exp = req->rq_export;
+ struct mdt_export_data *med = &req->rq_export->exp_mdt_data;
+ struct md_attr *ma = &info->mti_attr;
+ struct lu_attr *la = &ma->ma_attr;
+ struct mdt_file_data *mfd;
+ struct mdt_body *repbody;
+ int rc = 0;
+ int isreg, isdir, islnk;
+ struct list_head *t;
+ ENTRY;
LASSERT(ma->ma_valid & MA_INODE);
RETURN(-EAGAIN);
}
- mfd = NULL;
- if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
+ mfd = NULL;
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
spin_lock(&med->med_open_lock);
- cfs_list_for_each(t, &med->med_open_head) {
- mfd = cfs_list_entry(t, struct mdt_file_data, mfd_list);
+ list_for_each(t, &med->med_open_head) {
+ mfd = list_entry(t, struct mdt_file_data, mfd_list);
if (mfd->mfd_xid == req->rq_xid)
break;
mfd = NULL;
mdt_mfd_set_mode(mfd, ret == MDT_IOEPOCH_OPENED ?
MDS_FMODE_EPOCH : MDS_FMODE_SOM);
- LASSERT(mdt_info_req(info));
- med = &mdt_info_req(info)->rq_export->exp_mdt_data;
+ LASSERT(mdt_info_req(info));
+ med = &mdt_info_req(info)->rq_export->exp_mdt_data;
spin_lock(&med->med_open_lock);
- cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+ list_add(&mfd->mfd_list, &med->med_open_head);
class_handle_hash_back(&mfd->mfd_handle);
spin_unlock(&med->med_open_lock);
rc = -ESTALE;
} else {
class_handle_unhash(&mfd->mfd_handle);
- cfs_list_del_init(&mfd->mfd_list);
+ list_del_init(&mfd->mfd_list);
spin_unlock(&med->med_open_lock);
/* Do not lose object before last unlink. */
} else
rc = -ESTALE;
GOTO(error_ucred, rc);
- }
+ }
- LASSERT(mfd->mfd_mode == MDS_FMODE_EPOCH ||
- mfd->mfd_mode == MDS_FMODE_TRUNC);
- class_handle_unhash(&mfd->mfd_handle);
- cfs_list_del_init(&mfd->mfd_list);
+ LASSERT(mfd->mfd_mode == MDS_FMODE_EPOCH ||
+ mfd->mfd_mode == MDS_FMODE_TRUNC);
+ class_handle_unhash(&mfd->mfd_handle);
+ list_del_init(&mfd->mfd_list);
spin_unlock(&med->med_open_lock);
/* Set EPOCH CLOSE flag if not set by client. */
mdt->mdt_txn_cb.dtc_txn_commit = NULL;
mdt->mdt_txn_cb.dtc_cookie = NULL;
mdt->mdt_txn_cb.dtc_tag = LCT_MD_THREAD;
- CFS_INIT_LIST_HEAD(&mdt->mdt_txn_cb.dtc_linkage);
+ INIT_LIST_HEAD(&mdt->mdt_txn_cb.dtc_linkage);
dt_txn_callback_add(mdt->mdt_bottom, &mdt->mdt_txn_cb);
static void mdt_steal_ack_locks(struct ptlrpc_request *req)
{
struct ptlrpc_service_part *svcpt;
- struct obd_export *exp = req->rq_export;
- cfs_list_t *tmp;
- struct ptlrpc_reply_state *oldrep;
- int i;
+ struct obd_export *exp = req->rq_export;
+ struct list_head *tmp;
+ struct ptlrpc_reply_state *oldrep;
+ int i;
- /* CAVEAT EMPTOR: spinlock order */
+ /* CAVEAT EMPTOR: spinlock order */
spin_lock(&exp->exp_lock);
- cfs_list_for_each (tmp, &exp->exp_outstanding_replies) {
- oldrep = cfs_list_entry(tmp, struct ptlrpc_reply_state,
- rs_exp_list);
+ list_for_each(tmp, &exp->exp_outstanding_replies) {
+ oldrep = list_entry(tmp, struct ptlrpc_reply_state,
+ rs_exp_list);
if (oldrep->rs_xid != req->rq_xid)
continue;
svcpt = oldrep->rs_svcpt;
spin_lock(&svcpt->scp_rep_lock);
- cfs_list_del_init (&oldrep->rs_exp_list);
+ list_del_init(&oldrep->rs_exp_list);
CDEBUG(D_HA, "Stealing %d locks from rs %p x"LPD64".t"LPD64
" o%d NID %s\n",
repbody = req_capsule_server_get(mti->mti_pill, &RMF_MDT_BODY);
repbody->mbo_ioepoch = obj->mot_ioepoch;
spin_lock(&med->med_open_lock);
- cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
+ list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
if (mfd->mfd_xid == req->rq_xid)
break;
}
mfd->mfd_xid = req->rq_xid;
spin_lock(&med->med_open_lock);
- cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+ list_add(&mfd->mfd_list, &med->med_open_head);
spin_unlock(&med->med_open_lock);
repbody->mbo_handle.cookie = mfd->mfd_handle.h_cookie;
}
LASSERT(mfd->mfd_mode == MDS_FMODE_SOM);
LASSERT(!(info->mti_ioepoch->flags & MF_EPOCH_CLOSE));
- class_handle_unhash(&mfd->mfd_handle);
- cfs_list_del_init(&mfd->mfd_list);
+ class_handle_unhash(&mfd->mfd_handle);
+ list_del_init(&mfd->mfd_list);
spin_unlock(&med->med_open_lock);
mdt_mfd_close(info, mfd);
GOTO(out, rc);
}
- CFS_INIT_LIST_HEAD(&mll->mll_list);
+ INIT_LIST_HEAD(&mll->mll_list);
mll->mll_obj = mdt_pobj;
list_add_tail(&mll->mll_list, lock_list);
}
GOTO(out_put_child, rc);
/* 3: iterate the linkea of the object and lock all of the objects */
- CFS_INIT_LIST_HEAD(&lock_list);
+ INIT_LIST_HEAD(&lock_list);
rc = mdt_lock_objects_in_linkea(info, mold, msrcdir, &lock_list);
if (rc != 0)
GOTO(out_put_child, rc);
/* Each mechanism is described by the following struct: */
struct gss_api_mech {
- cfs_list_t gm_list;
- struct module *gm_owner;
- char *gm_name;
- rawobj_t gm_oid;
- atomic_t gm_count;
+ struct list_head gm_list;
+ struct module *gm_owner;
+ char *gm_name;
+ rawobj_t gm_oid;
+ atomic_t gm_count;
struct gss_api_ops *gm_ops;
- int gm_sf_num;
+ int gm_sf_num;
struct subflavor_desc *gm_sfs;
};
};
struct gss_sec_pipefs {
- struct gss_sec gsp_base;
- int gsp_chash_size; /* must be 2^n */
- cfs_hlist_head_t gsp_chash[0];
+ struct gss_sec gsp_base;
+ int gsp_chash_size; /* must be 2^n */
+ struct hlist_head gsp_chash[0];
};
/*
/*
* all contexts listed here. access is protected by sec spinlock.
*/
- cfs_hlist_head_t gsk_clist;
+ struct hlist_head gsk_clist;
/*
* specially point to root ctx (only one at a time). access is
* protected by sec spinlock.
atomic_inc(&ctx->cc_refcount);
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+ hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
if (is_root)
gsec_kr->gsk_root_ctx = ctx;
if (gsec_kr->gsk_root_ctx == ctx)
gsec_kr->gsk_root_ctx = NULL;
- cfs_hlist_del_init(&ctx->cc_cache);
+ hlist_del_init(&ctx->cc_cache);
atomic_dec(&ctx->cc_refcount);
spin_unlock_if(&sec->ps_lock, !locked);
/*
* caller should hold one ref on contexts in freelist.
*/
-static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist)
+static void dispose_ctx_list_kr(struct hlist_head *freelist)
{
struct hlist_node __maybe_unused *pos, *next;
struct ptlrpc_cli_ctx *ctx;
if (ctx) {
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist));
+ LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
atomic_inc(&ctx->cc_refcount);
}
if (gsec_kr == NULL)
RETURN(NULL);
- CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
+ INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
gsec_kr->gsk_root_ctx = NULL;
mutex_init(&gsec_kr->gsk_root_uc_lock);
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
- LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist));
+ LASSERT(hlist_empty(&gsec_kr->gsk_clist));
LASSERT(gsec_kr->gsk_root_ctx == NULL);
gss_sec_destroy_common(gsec);
* flush context of root or all, we iterate through the list.
*/
static
-void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
- uid_t uid,
- int grace, int force)
+void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace,
+ int force)
{
struct gss_sec_keyring *gsec_kr;
- struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
+ struct hlist_head freelist = HLIST_HEAD_INIT;
struct hlist_node __maybe_unused *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
atomic_inc(&ctx->cc_refcount);
if (ctx_unlist_kr(ctx, 1)) {
- cfs_hlist_add_head(&ctx->cc_cache, &freelist);
+ hlist_add_head(&ctx->cc_cache, &freelist);
} else {
LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
atomic_dec(&ctx->cc_refcount);
void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
+ struct hlist_head freelist = HLIST_HEAD_INIT;
struct hlist_node __maybe_unused *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
atomic_inc(&ctx->cc_refcount);
if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
- cfs_hlist_add_head(&ctx->cc_cache, &freelist);
+ hlist_add_head(&ctx->cc_cache, &freelist);
CWARN("unhashed ctx %p\n", ctx);
} else {
LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
#include "gss_internal.h"
#include "gss_api.h"
-static CFS_LIST_HEAD(registered_mechs);
+static struct list_head registered_mechs = LIST_HEAD_INIT(registered_mechs);
static DEFINE_SPINLOCK(registered_mechs_lock);
int lgss_mech_register(struct gss_api_mech *gm)
{
spin_lock(®istered_mechs_lock);
- cfs_list_add(&gm->gm_list, ®istered_mechs);
+ list_add(&gm->gm_list, ®istered_mechs);
spin_unlock(®istered_mechs_lock);
CWARN("Register %s mechanism\n", gm->gm_name);
return 0;
void lgss_mech_unregister(struct gss_api_mech *gm)
{
spin_lock(®istered_mechs_lock);
- cfs_list_del(&gm->gm_list);
+ list_del(&gm->gm_list);
spin_unlock(®istered_mechs_lock);
CWARN("Unregister %s mechanism\n", gm->gm_name);
}
struct gss_api_mech *pos, *gm = NULL;
spin_lock(®istered_mechs_lock);
- cfs_list_for_each_entry(pos, ®istered_mechs, gm_list) {
+ list_for_each_entry(pos, ®istered_mechs, gm_list) {
if (0 == strcmp(name, pos->gm_name)) {
if (!try_module_get(pos->gm_owner))
continue;
struct gss_api_mech *pos, *gm = NULL;
spin_lock(®istered_mechs_lock);
- cfs_list_for_each_entry(pos, ®istered_mechs, gm_list) {
+ list_for_each_entry(pos, ®istered_mechs, gm_list) {
if (!try_module_get(pos->gm_owner))
continue;
if (!mech_supports_subflavor(pos, subflavor)) {
}
static
-void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
+void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
{
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
atomic_inc(&ctx->cc_refcount);
- cfs_hlist_add_head(&ctx->cc_cache, hash);
+ hlist_add_head(&ctx->cc_cache, hash);
}
/*
* caller must hold spinlock
*/
static
-void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
+void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
{
assert_spin_locked(&ctx->cc_sec->ps_lock);
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
if (atomic_dec_and_test(&ctx->cc_refcount)) {
- __cfs_hlist_del(&ctx->cc_cache);
- cfs_hlist_add_head(&ctx->cc_cache, freelist);
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, freelist);
} else {
- cfs_hlist_del_init(&ctx->cc_cache);
+ hlist_del_init(&ctx->cc_cache);
}
}
*/
static
int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx,
- cfs_hlist_head_t *freelist)
+ struct hlist_head *freelist)
{
if (cli_ctx_check_death(ctx)) {
if (freelist)
static inline
int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
- cfs_hlist_head_t *freelist)
+ struct hlist_head *freelist)
{
LASSERT(ctx->cc_sec);
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
}
static
-void ctx_list_destroy_pf(cfs_hlist_head_t *head)
+void ctx_list_destroy_pf(struct hlist_head *head)
{
struct ptlrpc_cli_ctx *ctx;
- while (!cfs_hlist_empty(head)) {
+ while (!hlist_empty(head)) {
ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
cc_cache);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
&ctx->cc_flags) == 0);
- cfs_hlist_del_init(&ctx->cc_cache);
+ hlist_del_init(&ctx->cc_cache);
ctx_destroy_pf(ctx->cc_sec, ctx);
}
}
spin_lock(&ctx->cc_sec->ps_lock);
if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
- LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
LASSERT(atomic_read(&ctx->cc_refcount) > 1);
- cfs_hlist_del_init(&ctx->cc_cache);
+ hlist_del_init(&ctx->cc_cache);
if (atomic_dec_and_test(&ctx->cc_refcount))
LBUG();
}
static
void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
- cfs_hlist_head_t *freelist)
+ struct hlist_head *freelist)
{
struct ptlrpc_sec *sec;
struct ptlrpc_cli_ctx *ctx;
hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
alloc_size = sizeof(*gsec_pf) +
- sizeof(cfs_hlist_head_t) * hash_size;
+ sizeof(struct hlist_head) * hash_size;
OBD_ALLOC(gsec_pf, alloc_size);
if (!gsec_pf)
gsec_pf->gsp_chash_size = hash_size;
for (i = 0; i < hash_size; i++)
- CFS_INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
+ INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs,
imp, ctx, sf))
gss_sec_destroy_common(gsec);
OBD_FREE(gsec, sizeof(*gsec_pf) +
- sizeof(cfs_hlist_head_t) * gsec_pf->gsp_chash_size);
+ sizeof(struct hlist_head) * gsec_pf->gsp_chash_size);
}
static
if (found) {
if (new && new != ctx) {
/* lost the race, just free it */
- cfs_hlist_add_head(&new->cc_cache, &freelist);
+ hlist_add_head(&new->cc_cache, &freelist);
new = NULL;
}
/* hot node, move to head */
if (hash_head->first != &ctx->cc_cache) {
- __cfs_hlist_del(&ctx->cc_cache);
- cfs_hlist_add_head(&ctx->cc_cache, hash_head);
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, hash_head);
}
} else {
/* don't allocate for reverse sec */
int sync)
{
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
- LASSERT(cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(hlist_unhashed(&ctx->cc_cache));
/* if required async, we must clear the UPTODATE bit to prevent extra
* rpcs during destroy procedure. */
struct gss_upcall_msg {
struct rpc_pipe_msg gum_base;
atomic_t gum_refcount;
- cfs_list_t gum_list;
+ struct list_head gum_list;
__u32 gum_mechidx;
struct gss_sec *gum_gsec;
struct gss_cli_ctx *gum_gctx;
/* pipefs dentries for each mechanisms */
static struct dentry *de_pipes[MECH_MAX] = { NULL, };
/* all upcall messgaes linked here */
-static cfs_list_t upcall_lists[MECH_MAX];
+static struct list_head upcall_lists[MECH_MAX];
/* and protected by this */
static spinlock_t upcall_locks[MECH_MAX];
static
void upcall_msg_enlist(struct gss_upcall_msg *msg)
{
- __u32 idx = msg->gum_mechidx;
+ __u32 idx = msg->gum_mechidx;
- upcall_list_lock(idx);
- cfs_list_add(&msg->gum_list, &upcall_lists[idx]);
- upcall_list_unlock(idx);
+ upcall_list_lock(idx);
+ list_add(&msg->gum_list, &upcall_lists[idx]);
+ upcall_list_unlock(idx);
}
static
void upcall_msg_delist(struct gss_upcall_msg *msg)
{
- __u32 idx = msg->gum_mechidx;
+ __u32 idx = msg->gum_mechidx;
- upcall_list_lock(idx);
- cfs_list_del_init(&msg->gum_list);
- upcall_list_unlock(idx);
+ upcall_list_lock(idx);
+ list_del_init(&msg->gum_list);
+ upcall_list_unlock(idx);
}
/****************************************
gmsg->gum_gctx = NULL;
}
- LASSERT(cfs_list_empty(&gmsg->gum_list));
- LASSERT(cfs_list_empty(&gmsg->gum_base.list));
- OBD_FREE_PTR(gmsg);
- EXIT;
+ LASSERT(list_empty(&gmsg->gum_list));
+ LASSERT(list_empty(&gmsg->gum_base.list));
+ OBD_FREE_PTR(gmsg);
+ EXIT;
}
static
LASSERT(idx < MECH_MAX);
assert_spin_locked(&upcall_locks[idx]);
- if (cfs_list_empty(&gmsg->gum_list))
+ if (list_empty(&gmsg->gum_list))
return;
- cfs_list_del_init(&gmsg->gum_list);
+ list_del_init(&gmsg->gum_list);
LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
atomic_dec(&gmsg->gum_refcount);
}
struct gss_upcall_msg *gmsg;
upcall_list_lock(mechidx);
- cfs_list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
+ list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
if (gmsg->gum_data.gum_seq != seq)
continue;
static cfs_time_t ratelimit = 0;
ENTRY;
- LASSERT(cfs_list_empty(&msg->list));
+ LASSERT(list_empty(&msg->list));
/* normally errno is >= 0 */
if (msg->errno >= 0) {
static
void gss_pipe_release(struct inode *inode)
{
- struct rpc_inode *rpci = RPC_I(inode);
- __u32 idx;
- ENTRY;
+ struct rpc_inode *rpci = RPC_I(inode);
+ __u32 idx;
+ ENTRY;
- idx = (__u32) (long) rpci->private;
- LASSERT(idx < MECH_MAX);
+ idx = (__u32) (long) rpci->private;
+ LASSERT(idx < MECH_MAX);
- upcall_list_lock(idx);
- while (!cfs_list_empty(&upcall_lists[idx])) {
- struct gss_upcall_msg *gmsg;
- struct gss_upcall_msg_data *gumd;
+ upcall_list_lock(idx);
+ while (!list_empty(&upcall_lists[idx])) {
+ struct gss_upcall_msg *gmsg;
+ struct gss_upcall_msg_data *gumd;
- gmsg = cfs_list_entry(upcall_lists[idx].next,
- struct gss_upcall_msg, gum_list);
- gumd = &gmsg->gum_data;
- LASSERT(cfs_list_empty(&gmsg->gum_base.list));
+ gmsg = list_entry(upcall_lists[idx].next,
+ struct gss_upcall_msg, gum_list);
+ gumd = &gmsg->gum_data;
+ LASSERT(list_empty(&gmsg->gum_base.list));
CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
"nid "LPX64", obd %.*s\n", gmsg,
RETURN(-ENOMEM);
/* initialize pipefs base msg */
- CFS_INIT_LIST_HEAD(&gmsg->gum_base.list);
+ INIT_LIST_HEAD(&gmsg->gum_base.list);
gmsg->gum_base.data = &gmsg->gum_data;
gmsg->gum_base.len = sizeof(gmsg->gum_data);
gmsg->gum_base.copied = 0;
}
de_pipes[MECH_KRB5] = de;
- CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
+ INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
spin_lock_init(&upcall_locks[MECH_KRB5]);
return 0;
static
void __exit gss_exit_pipefs_upcall(void)
{
- __u32 i;
+ __u32 i;
- for (i = 0; i < MECH_MAX; i++) {
- LASSERT(cfs_list_empty(&upcall_lists[i]));
+ for (i = 0; i < MECH_MAX; i++) {
+ LASSERT(list_empty(&upcall_lists[i]));
- /* dput pipe dentry here might cause lgssd oops. */
- de_pipes[i] = NULL;
- }
+ /* dput pipe dentry here might cause lgssd oops. */
+ de_pipes[i] = NULL;
+ }
- rpc_unlink(LUSTRE_PIPE_KRB5);
- rpc_rmdir(LUSTRE_PIPE_ROOT);
+ rpc_unlink(LUSTRE_PIPE_KRB5);
+ rpc_rmdir(LUSTRE_PIPE_ROOT);
}
int __init gss_init_pipefs(void)
void __exit gss_exit_pipefs(void)
{
- gss_exit_pipefs_upcall();
- sptlrpc_unregister_policy(&gss_policy_pipefs);
+ gss_exit_pipefs_upcall();
+ sptlrpc_unregister_policy(&gss_policy_pipefs);
}
sec->ps_flvr = *sf;
sec->ps_import = class_import_get(imp);
spin_lock_init(&sec->ps_lock);
- CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
+ INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
sec->ps_gc_interval = GSS_GC_INTERVAL;
gctx->gc_win = 0;
atomic_set(&gctx->gc_seq, 0);
- CFS_INIT_HLIST_NODE(&ctx->cc_cache);
+ INIT_HLIST_NODE(&ctx->cc_cache);
atomic_set(&ctx->cc_refcount, 0);
ctx->cc_sec = sec;
ctx->cc_ops = ctxops;
ctx->cc_flags = PTLRPC_CTX_NEW;
ctx->cc_vcred = *vcred;
spin_lock_init(&ctx->cc_lock);
- CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
- CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
+ INIT_LIST_HEAD(&ctx->cc_req_list);
+ INIT_LIST_HEAD(&ctx->cc_gc_chain);
/* take a ref on belonging sec, balanced in ctx destroying */
atomic_inc(&sec->ps_refcount);