From: James Simmons Date: Mon, 18 Aug 2014 14:37:13 +0000 (-0400) Subject: LU-3963 libcfs: convert lod, mdt, and gss to linux list api X-Git-Tag: 2.6.52~33 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=0b85782334b5f0c2ef362b35ea672ddc575cbc30;p=fs%2Flustre-release.git LU-3963 libcfs: convert lod, mdt, and gss to linux list api Move from the cfs_[h]list api to the native linux api for the lod, mdt, and gss part of the ptlrpc layers. Change-Id: Ieff231f3220a850521713a6f1c997b7e09130a4c Signed-off-by: James Simmons Reviewed-on: http://review.whamcloud.com/10387 Reviewed-by: Bob Glossman Tested-by: Jenkins Reviewed-by: frank zago Tested-by: Maloo Reviewed-by: Oleg Drokin --- diff --git a/lustre/lod/lod_internal.h b/lustre/lod/lod_internal.h index ce1fd8b..21d32d0 100644 --- a/lustre/lod/lod_internal.h +++ b/lustre/lod/lod_internal.h @@ -58,7 +58,7 @@ struct pool_desc { struct ost_pool pool_obds; /* pool members */ atomic_t pool_refcount; struct lod_qos_rr pool_rr; - cfs_hlist_node_t pool_hash; /* access by poolname */ + struct hlist_node pool_hash; /* access by poolname */ struct list_head pool_list; struct proc_dir_entry *pool_proc_entry; struct obd_device *pool_lobd; /* owner */ @@ -182,15 +182,15 @@ struct lod_device { struct lod_qos lod_qos; /* qos info per lod */ /* OST pool data */ - struct ost_pool lod_pool_info; /* all OSTs in a packed array */ - int lod_pool_count; - cfs_hash_t *lod_pools_hash_body; /* used for key access */ - cfs_list_t lod_pool_list; /* used for sequential access */ - cfs_proc_dir_entry_t *lod_pool_proc_entry; + struct ost_pool lod_pool_info; /* all OSTs in a packed array */ + int lod_pool_count; + cfs_hash_t *lod_pools_hash_body; /* used for key access */ + struct list_head lod_pool_list; /* used for sequential access */ + struct proc_dir_entry *lod_pool_proc_entry; enum lustre_sec_part lod_sp_me; - cfs_proc_dir_entry_t *lod_symlink; + struct proc_dir_entry *lod_symlink; }; #define lod_osts lod_ost_descs.ltd_tgts diff --git a/lustre/lod/lod_lov.c b/lustre/lod/lod_lov.c index 924780c..4bdb1a3 100644 --- a/lustre/lod/lod_lov.c +++ b/lustre/lod/lod_lov.c @@ -67,12 +67,14 @@ void lod_putref(struct lod_device *lod, struct lod_tgt_descs *ltd) ltd->ltd_refcount--; if (ltd->ltd_refcount == 0 && ltd->ltd_death_row) { struct lod_tgt_desc *tgt_desc, *tmp; + struct list_head kill; unsigned int idx; - CFS_LIST_HEAD(kill); CDEBUG(D_CONFIG, "destroying %d ltd desc\n", ltd->ltd_death_row); + INIT_LIST_HEAD(&kill); + cfs_foreach_bit(ltd->ltd_tgt_bitmap, idx) { tgt_desc = LTD_TGT(ltd, idx); LASSERT(tgt_desc); @@ -80,7 +82,7 @@ void lod_putref(struct lod_device *lod, struct lod_tgt_descs *ltd) if (!tgt_desc->ltd_reap) continue; - cfs_list_add(&tgt_desc->ltd_kill, &kill); + list_add(&tgt_desc->ltd_kill, &kill); LTD_TGT(ltd, idx) = NULL; /*FIXME: only support ost pool for now */ if (ltd == &lod->lod_ost_descs) { @@ -95,9 +97,9 @@ void lod_putref(struct lod_device *lod, struct lod_tgt_descs *ltd) mutex_unlock(<d->ltd_mutex); up_read(<d->ltd_rw_sem); - cfs_list_for_each_entry_safe(tgt_desc, tmp, &kill, ltd_kill) { + list_for_each_entry_safe(tgt_desc, tmp, &kill, ltd_kill) { int rc; - cfs_list_del(&tgt_desc->ltd_kill); + list_del(&tgt_desc->ltd_kill); if (ltd == &lod->lod_ost_descs) { /* remove from QoS structures */ rc = qos_del_tgt(lod, tgt_desc); @@ -1134,7 +1136,7 @@ int lod_pools_init(struct lod_device *lod, struct lustre_cfg *lcfg) lod->lod_sp_me = LUSTRE_SP_CLI; /* Set up allocation policy (QoS and RR) */ - CFS_INIT_LIST_HEAD(&lod->lod_qos.lq_oss_list); + INIT_LIST_HEAD(&lod->lod_qos.lq_oss_list); init_rwsem(&lod->lod_qos.lq_rw_sem); lod->lod_qos.lq_dirty = 1; lod->lod_qos.lq_rr.lqr_dirty = 1; @@ -1155,7 +1157,7 @@ int lod_pools_init(struct lod_device *lod, struct lustre_cfg *lcfg) if (lod->lod_pools_hash_body == NULL) RETURN(-ENOMEM); - CFS_INIT_LIST_HEAD(&lod->lod_pool_list); + INIT_LIST_HEAD(&lod->lod_pool_list); lod->lod_pool_count = 0; rc = lod_ost_pool_init(&lod->lod_pool_info, 0); if (rc) @@ -1177,12 +1179,10 @@ out_hash: int lod_pools_fini(struct lod_device *lod) { struct obd_device *obd = lod2obd(lod); - cfs_list_t *pos, *tmp; - struct pool_desc *pool; + struct pool_desc *pool, *tmp; ENTRY; - cfs_list_for_each_safe(pos, tmp, &lod->lod_pool_list) { - pool = cfs_list_entry(pos, struct pool_desc, pool_list); + list_for_each_entry_safe(pool, tmp, &lod->lod_pool_list, pool_list) { /* free pool structs */ CDEBUG(D_INFO, "delete pool %p\n", pool); /* In the function below, .hs_keycmp resolves to diff --git a/lustre/lod/lod_pool.c b/lustre/lod/lod_pool.c index 3bebbfe..c54f1c8 100644 --- a/lustre/lod/lod_pool.c +++ b/lustre/lod/lod_pool.c @@ -96,8 +96,8 @@ void lod_pool_putref(struct pool_desc *pool) { CDEBUG(D_INFO, "pool %p\n", pool); if (atomic_dec_and_test(&pool->pool_refcount)) { - LASSERT(cfs_hlist_unhashed(&pool->pool_hash)); - LASSERT(cfs_list_empty(&pool->pool_list)); + LASSERT(hlist_unhashed(&pool->pool_hash)); + LASSERT(list_empty(&pool->pool_list)); LASSERT(pool->pool_proc_entry == NULL); lod_ost_pool_free(&(pool->pool_rr.lqr_pool)); lod_ost_pool_free(&(pool->pool_obds)); @@ -154,11 +154,11 @@ static __u32 pool_hashfn(cfs_hash_t *hash_body, const void *key, unsigned mask) * * \retval char array referencing the pool name (no refcount) */ -static void *pool_key(cfs_hlist_node_t *hnode) +static void *pool_key(struct hlist_node *hnode) { struct pool_desc *pool; - pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash); + pool = hlist_entry(hnode, struct pool_desc, pool_hash); return pool->pool_name; } @@ -174,7 +174,7 @@ static void *pool_key(cfs_hlist_node_t *hnode) * \retval 0 if \a key is the same as the key of \a compared * \retval 1 if \a key is different from the key of \a compared */ -static int pool_hashkey_keycmp(const void *key, cfs_hlist_node_t *compared) +static int pool_hashkey_keycmp(const void *key, struct hlist_node *compared) { return !strncmp(key, pool_key(compared), LOV_MAXPOOLNAME); } @@ -190,25 +190,25 @@ static int pool_hashkey_keycmp(const void *key, cfs_hlist_node_t *compared) * * \retval struct pool_desc for the specified \a hnode */ -static void *pool_hashobject(cfs_hlist_node_t *hnode) +static void *pool_hashobject(struct hlist_node *hnode) { - return cfs_hlist_entry(hnode, struct pool_desc, pool_hash); + return hlist_entry(hnode, struct pool_desc, pool_hash); } -static void pool_hashrefcount_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode) { struct pool_desc *pool; - pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash); + pool = hlist_entry(hnode, struct pool_desc, pool_hash); pool_getref(pool); } static void pool_hashrefcount_put_locked(cfs_hash_t *hs, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { struct pool_desc *pool; - pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash); + pool = hlist_entry(hnode, struct pool_desc, pool_hash); pool_putref_locked(pool); } @@ -687,7 +687,7 @@ int lod_pool_new(struct obd_device *obd, char *poolname) #endif spin_lock(&obd->obd_dev_lock); - cfs_list_add_tail(&new_pool->pool_list, &lod->lod_pool_list); + list_add_tail(&new_pool->pool_list, &lod->lod_pool_list); lod->lod_pool_count++; spin_unlock(&obd->obd_dev_lock); @@ -704,7 +704,7 @@ int lod_pool_new(struct obd_device *obd, char *poolname) out_err: spin_lock(&obd->obd_dev_lock); - cfs_list_del_init(&new_pool->pool_list); + list_del_init(&new_pool->pool_list); lod->lod_pool_count--; spin_unlock(&obd->obd_dev_lock); @@ -744,7 +744,7 @@ int lod_pool_del(struct obd_device *obd, char *poolname) } spin_lock(&obd->obd_dev_lock); - cfs_list_del_init(&pool->pool_list); + list_del_init(&pool->pool_list); lod->lod_pool_count--; spin_unlock(&obd->obd_dev_lock); diff --git a/lustre/lod/lod_qos.c b/lustre/lod/lod_qos.c index f63c710..a0c5b03 100644 --- a/lustre/lod/lod_qos.c +++ b/lustre/lod/lod_qos.c @@ -64,7 +64,7 @@ int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) struct lod_qos_oss *oss = NULL, *temposs; struct obd_export *exp = ost_desc->ltd_exp; int rc = 0, found = 0; - cfs_list_t *list; + struct list_head *list; ENTRY; down_write(&lod->lod_qos.lq_rw_sem); @@ -73,7 +73,7 @@ int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) * but there is no official API to access information like this * with OSD API. */ - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { if (obd_uuid_equals(&oss->lqo_uuid, &exp->exp_connection->c_remote_uuid)) { found++; @@ -89,7 +89,7 @@ int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) sizeof(oss->lqo_uuid)); } else { /* Assume we have to move this one */ - cfs_list_del(&oss->lqo_oss_list); + list_del(&oss->lqo_oss_list); } oss->lqo_ost_count++; @@ -102,13 +102,13 @@ int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) /* Add sorted by # of OSTs. Find the first entry that we're bigger than... */ list = &lod->lod_qos.lq_oss_list; - cfs_list_for_each_entry(temposs, list, lqo_oss_list) { + list_for_each_entry(temposs, list, lqo_oss_list) { if (oss->lqo_ost_count > temposs->lqo_ost_count) break; } /* ...and add before it. If we're the first or smallest, temposs points to the list head, and we add to the end. */ - cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list); + list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list); lod->lod_qos.lq_dirty = 1; lod->lod_qos.lq_rr.lqr_dirty = 1; @@ -133,7 +133,7 @@ int qos_del_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) if (oss->lqo_ost_count == 0) { CDEBUG(D_QOS, "removing OSS %s\n", obd_uuid2str(&oss->lqo_uuid)); - cfs_list_del(&oss->lqo_oss_list); + list_del(&oss->lqo_oss_list); ost_desc->ltd_qos.ltq_oss = NULL; OBD_FREE_PTR(oss); } @@ -255,7 +255,7 @@ static int lod_qos_calc_ppo(struct lod_device *lod) GOTO(out, rc = -EAGAIN); /* find bavail on each OSS */ - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) oss->lqo_bavail = 0; lod->lod_qos.lq_active_oss_count = 0; @@ -314,7 +314,7 @@ static int lod_qos_calc_ppo(struct lod_device *lod) } /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */ - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { temp = oss->lqo_bavail >> 1; do_div(temp, oss->lqo_ost_count * num_active); oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8; @@ -400,7 +400,7 @@ static int lod_qos_used(struct lod_device *lod, struct ost_pool *osts, lod->lod_qos.lq_active_oss_count; /* Decrease all OSS penalties */ - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { if (oss->lqo_penalty < oss->lqo_penalty_per_obj) oss->lqo_penalty = 0; else @@ -493,7 +493,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool, /* Place all the OSTs from 1 OSS at the same time. */ placed = 0; - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { int j = 0; for (i = 0; i < lqr->lqr_pool.op_count; i++) { diff --git a/lustre/mdt/mdt_coordinator.c b/lustre/mdt/mdt_coordinator.c index 0a43d93..970d933 100644 --- a/lustre/mdt/mdt_coordinator.c +++ b/lustre/mdt/mdt_coordinator.c @@ -868,9 +868,9 @@ int mdt_hsm_cdt_init(struct mdt_device *mdt) init_rwsem(&cdt->cdt_request_lock); mutex_init(&cdt->cdt_restore_lock); - CFS_INIT_LIST_HEAD(&cdt->cdt_requests); - CFS_INIT_LIST_HEAD(&cdt->cdt_agents); - CFS_INIT_LIST_HEAD(&cdt->cdt_restore_hdl); + INIT_LIST_HEAD(&cdt->cdt_requests); + INIT_LIST_HEAD(&cdt->cdt_agents); + INIT_LIST_HEAD(&cdt->cdt_restore_hdl); rc = lu_env_init(&cdt->cdt_env, LCT_MD_THREAD); if (rc < 0) diff --git a/lustre/mdt/mdt_handler.c b/lustre/mdt/mdt_handler.c index a03a996..a3c7349 100644 --- a/lustre/mdt/mdt_handler.c +++ b/lustre/mdt/mdt_handler.c @@ -5100,25 +5100,26 @@ static int mdt_ctxt_add_dirty_flag(struct lu_env *env, static int mdt_export_cleanup(struct obd_export *exp) { - struct mdt_export_data *med = &exp->exp_mdt_data; - struct obd_device *obd = exp->exp_obd; - struct mdt_device *mdt; - struct mdt_thread_info *info; - struct lu_env env; - CFS_LIST_HEAD(closing_list); - struct mdt_file_data *mfd, *n; - int rc = 0; - ENTRY; + struct list_head closing_list; + struct mdt_export_data *med = &exp->exp_mdt_data; + struct obd_device *obd = exp->exp_obd; + struct mdt_device *mdt; + struct mdt_thread_info *info; + struct lu_env env; + struct mdt_file_data *mfd, *n; + int rc = 0; + ENTRY; + INIT_LIST_HEAD(&closing_list); spin_lock(&med->med_open_lock); - while (!cfs_list_empty(&med->med_open_head)) { - cfs_list_t *tmp = med->med_open_head.next; - mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list); + while (!list_empty(&med->med_open_head)) { + struct list_head *tmp = med->med_open_head.next; + mfd = list_entry(tmp, struct mdt_file_data, mfd_list); /* Remove mfd handle so it can't be found again. * We are consuming the mfd_list reference here. */ class_handle_unhash(&mfd->mfd_handle); - cfs_list_move_tail(&mfd->mfd_list, &closing_list); + list_move_tail(&mfd->mfd_list, &closing_list); } spin_unlock(&med->med_open_lock); mdt = mdt_dev(obd->obd_lu_dev); @@ -5135,12 +5136,13 @@ static int mdt_export_cleanup(struct obd_export *exp) info->mti_mdt = mdt; info->mti_exp = exp; - if (!cfs_list_empty(&closing_list)) { - struct md_attr *ma = &info->mti_attr; + if (!list_empty(&closing_list)) { + struct md_attr *ma = &info->mti_attr; - /* Close any open files (which may also cause orphan unlinking). */ - cfs_list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) { - cfs_list_del_init(&mfd->mfd_list); + /* Close any open files (which may also cause orphan + * unlinking). */ + list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) { + list_del_init(&mfd->mfd_list); ma->ma_need = ma->ma_valid = 0; /* This file is being closed due to an eviction, it @@ -5198,11 +5200,11 @@ static int mdt_obd_disconnect(struct obd_export *exp) /* FIXME: Can we avoid using these two interfaces? */ static int mdt_init_export(struct obd_export *exp) { - struct mdt_export_data *med = &exp->exp_mdt_data; - int rc; - ENTRY; + struct mdt_export_data *med = &exp->exp_mdt_data; + int rc; + ENTRY; - CFS_INIT_LIST_HEAD(&med->med_open_head); + INIT_LIST_HEAD(&med->med_open_head); spin_lock_init(&med->med_open_lock); mutex_init(&med->med_idmap_mutex); med->med_idmap = NULL; @@ -5247,13 +5249,13 @@ static int mdt_destroy_export(struct obd_export *exp) &exp->exp_client_uuid))) RETURN(0); - ldlm_destroy_export(exp); - tgt_client_free(exp); + ldlm_destroy_export(exp); + tgt_client_free(exp); - LASSERT(cfs_list_empty(&exp->exp_outstanding_replies)); - LASSERT(cfs_list_empty(&exp->exp_mdt_data.med_open_head)); + LASSERT(list_empty(&exp->exp_outstanding_replies)); + LASSERT(list_empty(&exp->exp_mdt_data.med_open_head)); - RETURN(0); + RETURN(0); } /** The maximum depth that fid2path() will search. diff --git a/lustre/mdt/mdt_internal.h b/lustre/mdt/mdt_internal.h index 75127e3..fc9a7d3 100644 --- a/lustre/mdt/mdt_internal.h +++ b/lustre/mdt/mdt_internal.h @@ -78,12 +78,18 @@ struct mdt_object; /* file data for open files on MDS */ struct mdt_file_data { - struct portals_handle mfd_handle; /* must be first */ - __u64 mfd_mode; /* open mode provided by client */ - cfs_list_t mfd_list; /* protected by med_open_lock */ - __u64 mfd_xid; /* xid of the open request */ - struct lustre_handle mfd_old_handle; /* old handle in replay case */ - struct mdt_object *mfd_object; /* point to opened object */ + /** portals handle must be first */ + struct portals_handle mfd_handle; + /** open mode provided by client */ + __u64 mfd_mode; + /** protected by med_open_lock */ + struct list_head mfd_list; + /** xid of the open request */ + __u64 mfd_xid; + /** old handle in replay case */ + struct lustre_handle mfd_old_handle; + /** point to opened object */ + struct mdt_object *mfd_object; }; #define CDT_NONBLOCKING_RESTORE (1ULL << 0) @@ -488,7 +494,7 @@ struct cdt_req_progress { }; struct cdt_agent_req { - cfs_list_t car_request_list; /**< to chain all the req. */ + struct list_head car_request_list; /**< to chain all the req. */ atomic_t car_refcount; /**< reference counter */ __u64 car_compound_id; /**< compound id */ __u64 car_flags; /**< request original flags */ @@ -504,7 +510,7 @@ struct cdt_agent_req { extern struct kmem_cache *mdt_hsm_car_kmem; struct hsm_agent { - cfs_list_t ha_list; /**< to chain the agents */ + struct list_head ha_list; /**< to chain the agents */ struct obd_uuid ha_uuid; /**< agent uuid */ __u32 *ha_archive_id; /**< archive id */ int ha_archive_cnt; /**< number of archive entries @@ -516,10 +522,10 @@ struct hsm_agent { }; struct cdt_restore_handle { - cfs_list_t crh_list; /**< to chain the handle */ - struct lu_fid crh_fid; /**< fid of the object */ - struct ldlm_extent crh_extent; /**< extent of the restore */ - struct mdt_lock_handle crh_lh; /**< lock handle */ + struct list_head crh_list; /**< to chain the handle */ + struct lu_fid crh_fid; /**< fid of the object */ + struct ldlm_extent crh_extent; /**< extent of the restore */ + struct mdt_lock_handle crh_lh; /**< lock handle */ }; extern struct kmem_cache *mdt_hsm_cdt_kmem; /** restore handle slab cache */ diff --git a/lustre/mdt/mdt_lib.c b/lustre/mdt/mdt_lib.c index 65ceaf3..30fcaf0 100644 --- a/lustre/mdt/mdt_lib.c +++ b/lustre/mdt/mdt_lib.c @@ -80,7 +80,7 @@ void mdt_exit_ucred(struct mdt_thread_info *info) } static int match_nosquash_list(struct rw_semaphore *sem, - cfs_list_t *nidlist, + struct list_head *nidlist, lnet_nid_t peernid) { int rc; diff --git a/lustre/mdt/mdt_lproc.c b/lustre/mdt/mdt_lproc.c index ff7a260..45cda58 100644 --- a/lustre/mdt/mdt_lproc.c +++ b/lustre/mdt/mdt_lproc.c @@ -736,10 +736,10 @@ mdt_som_seq_write(struct file *file, const char __user *buffer, return count; } - /* 1 stands for self export. */ - cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) { - if (exp == obd->obd_self_export) - continue; + /* 1 stands for self export. */ + list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) { + if (exp == obd->obd_self_export) + continue; if (exp_connect_flags(exp) & OBD_CONNECT_MDS_MDS) continue; /* Some clients are already connected, skip the change */ @@ -883,7 +883,7 @@ static struct lprocfs_seq_vars lprocfs_mdt_obd_vars[] = { }; int lprocfs_mdt_print_open_files(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *v) + struct hlist_node *hnode, void *v) { struct obd_export *exp = cfs_hash_object(hs, hnode); struct seq_file *seq = v; @@ -893,7 +893,7 @@ int lprocfs_mdt_print_open_files(cfs_hash_t *hs, cfs_hash_bd_t *bd, struct mdt_file_data *mfd; spin_lock(&med->med_open_lock); - cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) { + list_for_each_entry(mfd, &med->med_open_head, mfd_list) { seq_printf(seq, DFID"\n", PFID(mdt_object_fid(mfd->mfd_object))); } diff --git a/lustre/mdt/mdt_open.c b/lustre/mdt/mdt_open.c index d179df9..87354a2 100644 --- a/lustre/mdt/mdt_open.c +++ b/lustre/mdt/mdt_open.c @@ -65,9 +65,9 @@ struct mdt_file_data *mdt_mfd_new(const struct mdt_export_data *med) OBD_ALLOC_PTR(mfd); if (mfd != NULL) { - CFS_INIT_LIST_HEAD(&mfd->mfd_handle.h_link); + INIT_LIST_HEAD(&mfd->mfd_handle.h_link); mfd->mfd_handle.h_owner = med; - CFS_INIT_LIST_HEAD(&mfd->mfd_list); + INIT_LIST_HEAD(&mfd->mfd_list); class_handle_hash(&mfd->mfd_handle, &mfd_handle_ops); } @@ -91,7 +91,7 @@ struct mdt_file_data *mdt_handle2mfd(struct mdt_export_data *med, mfd = class_handle2object(handle->cookie, med); /* during dw/setattr replay the mfd can be found by old handle */ if (mfd == NULL && is_replay_or_resent) { - cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) { + list_for_each_entry(mfd, &med->med_open_head, mfd_list) { if (mfd->mfd_old_handle.cookie == handle->cookie) RETURN(mfd); } @@ -104,8 +104,8 @@ struct mdt_file_data *mdt_handle2mfd(struct mdt_export_data *med, /* free mfd */ void mdt_mfd_free(struct mdt_file_data *mfd) { - LASSERT(cfs_list_empty(&mfd->mfd_list)); - OBD_FREE_RCU(mfd, sizeof *mfd, &mfd->mfd_handle); + LASSERT(list_empty(&mfd->mfd_list)); + OBD_FREE_RCU(mfd, sizeof *mfd, &mfd->mfd_handle); } static int mdt_create_data(struct mdt_thread_info *info, @@ -799,7 +799,7 @@ static int mdt_mfd_open(struct mdt_thread_info *info, struct mdt_object *p, PFID(mdt_object_fid(mfd->mfd_object)), info->mti_rr.rr_handle->cookie); class_handle_unhash(&old_mfd->mfd_handle); - cfs_list_del_init(&old_mfd->mfd_list); + list_del_init(&old_mfd->mfd_list); spin_unlock(&med->med_open_lock); /* no attr update for that close */ la->la_valid = 0; @@ -827,12 +827,12 @@ static int mdt_mfd_open(struct mdt_thread_info *info, struct mdt_object *p, if (req->rq_export->exp_disconnected) { spin_lock(&med->med_open_lock); class_handle_unhash(&mfd->mfd_handle); - cfs_list_del_init(&mfd->mfd_list); + list_del_init(&mfd->mfd_list); spin_unlock(&med->med_open_lock); mdt_mfd_close(info, mfd); } else { spin_lock(&med->med_open_lock); - cfs_list_add(&mfd->mfd_list, &med->med_open_head); + list_add(&mfd->mfd_list, &med->med_open_head); spin_unlock(&med->med_open_lock); } @@ -855,17 +855,17 @@ int mdt_finish_open(struct mdt_thread_info *info, struct mdt_object *p, struct mdt_object *o, __u64 flags, int created, struct ldlm_reply *rep) { - struct ptlrpc_request *req = mdt_info_req(info); - struct obd_export *exp = req->rq_export; - struct mdt_export_data *med = &req->rq_export->exp_mdt_data; - struct md_attr *ma = &info->mti_attr; - struct lu_attr *la = &ma->ma_attr; - struct mdt_file_data *mfd; - struct mdt_body *repbody; - int rc = 0; - int isreg, isdir, islnk; - cfs_list_t *t; - ENTRY; + struct ptlrpc_request *req = mdt_info_req(info); + struct obd_export *exp = req->rq_export; + struct mdt_export_data *med = &req->rq_export->exp_mdt_data; + struct md_attr *ma = &info->mti_attr; + struct lu_attr *la = &ma->ma_attr; + struct mdt_file_data *mfd; + struct mdt_body *repbody; + int rc = 0; + int isreg, isdir, islnk; + struct list_head *t; + ENTRY; LASSERT(ma->ma_valid & MA_INODE); @@ -991,11 +991,11 @@ int mdt_finish_open(struct mdt_thread_info *info, RETURN(-EAGAIN); } - mfd = NULL; - if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) { + mfd = NULL; + if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) { spin_lock(&med->med_open_lock); - cfs_list_for_each(t, &med->med_open_head) { - mfd = cfs_list_entry(t, struct mdt_file_data, mfd_list); + list_for_each(t, &med->med_open_head) { + mfd = list_entry(t, struct mdt_file_data, mfd_list); if (mfd->mfd_xid == req->rq_xid) break; mfd = NULL; @@ -2226,10 +2226,10 @@ int mdt_mfd_close(struct mdt_thread_info *info, struct mdt_file_data *mfd) mdt_mfd_set_mode(mfd, ret == MDT_IOEPOCH_OPENED ? MDS_FMODE_EPOCH : MDS_FMODE_SOM); - LASSERT(mdt_info_req(info)); - med = &mdt_info_req(info)->rq_export->exp_mdt_data; + LASSERT(mdt_info_req(info)); + med = &mdt_info_req(info)->rq_export->exp_mdt_data; spin_lock(&med->med_open_lock); - cfs_list_add(&mfd->mfd_list, &med->med_open_head); + list_add(&mfd->mfd_list, &med->med_open_head); class_handle_hash_back(&mfd->mfd_handle); spin_unlock(&med->med_open_lock); @@ -2320,7 +2320,7 @@ int mdt_close(struct tgt_session_info *tsi) rc = -ESTALE; } else { class_handle_unhash(&mfd->mfd_handle); - cfs_list_del_init(&mfd->mfd_list); + list_del_init(&mfd->mfd_list); spin_unlock(&med->med_open_lock); /* Do not lose object before last unlink. */ @@ -2404,12 +2404,12 @@ int mdt_done_writing(struct tgt_session_info *tsi) } else rc = -ESTALE; GOTO(error_ucred, rc); - } + } - LASSERT(mfd->mfd_mode == MDS_FMODE_EPOCH || - mfd->mfd_mode == MDS_FMODE_TRUNC); - class_handle_unhash(&mfd->mfd_handle); - cfs_list_del_init(&mfd->mfd_list); + LASSERT(mfd->mfd_mode == MDS_FMODE_EPOCH || + mfd->mfd_mode == MDS_FMODE_TRUNC); + class_handle_unhash(&mfd->mfd_handle); + list_del_init(&mfd->mfd_list); spin_unlock(&med->med_open_lock); /* Set EPOCH CLOSE flag if not set by client. */ diff --git a/lustre/mdt/mdt_recovery.c b/lustre/mdt/mdt_recovery.c index f5a49da..1ae3aa4 100644 --- a/lustre/mdt/mdt_recovery.c +++ b/lustre/mdt/mdt_recovery.c @@ -116,7 +116,7 @@ int mdt_fs_setup(const struct lu_env *env, struct mdt_device *mdt, mdt->mdt_txn_cb.dtc_txn_commit = NULL; mdt->mdt_txn_cb.dtc_cookie = NULL; mdt->mdt_txn_cb.dtc_tag = LCT_MD_THREAD; - CFS_INIT_LIST_HEAD(&mdt->mdt_txn_cb.dtc_linkage); + INIT_LIST_HEAD(&mdt->mdt_txn_cb.dtc_linkage); dt_txn_callback_add(mdt->mdt_bottom, &mdt->mdt_txn_cb); @@ -139,16 +139,16 @@ void mdt_fs_cleanup(const struct lu_env *env, struct mdt_device *mdt) static void mdt_steal_ack_locks(struct ptlrpc_request *req) { struct ptlrpc_service_part *svcpt; - struct obd_export *exp = req->rq_export; - cfs_list_t *tmp; - struct ptlrpc_reply_state *oldrep; - int i; + struct obd_export *exp = req->rq_export; + struct list_head *tmp; + struct ptlrpc_reply_state *oldrep; + int i; - /* CAVEAT EMPTOR: spinlock order */ + /* CAVEAT EMPTOR: spinlock order */ spin_lock(&exp->exp_lock); - cfs_list_for_each (tmp, &exp->exp_outstanding_replies) { - oldrep = cfs_list_entry(tmp, struct ptlrpc_reply_state, - rs_exp_list); + list_for_each(tmp, &exp->exp_outstanding_replies) { + oldrep = list_entry(tmp, struct ptlrpc_reply_state, + rs_exp_list); if (oldrep->rs_xid != req->rq_xid) continue; @@ -162,7 +162,7 @@ static void mdt_steal_ack_locks(struct ptlrpc_request *req) svcpt = oldrep->rs_svcpt; spin_lock(&svcpt->scp_rep_lock); - cfs_list_del_init (&oldrep->rs_exp_list); + list_del_init(&oldrep->rs_exp_list); CDEBUG(D_HA, "Stealing %d locks from rs %p x"LPD64".t"LPD64 " o%d NID %s\n", @@ -345,7 +345,7 @@ static void mdt_reconstruct_setattr(struct mdt_thread_info *mti, repbody = req_capsule_server_get(mti->mti_pill, &RMF_MDT_BODY); repbody->mbo_ioepoch = obj->mot_ioepoch; spin_lock(&med->med_open_lock); - cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) { + list_for_each_entry(mfd, &med->med_open_head, mfd_list) { if (mfd->mfd_xid == req->rq_xid) break; } diff --git a/lustre/mdt/mdt_reint.c b/lustre/mdt/mdt_reint.c index 2f98026..063ddd3 100644 --- a/lustre/mdt/mdt_reint.c +++ b/lustre/mdt/mdt_reint.c @@ -708,7 +708,7 @@ static int mdt_reint_setattr(struct mdt_thread_info *info, mfd->mfd_xid = req->rq_xid; spin_lock(&med->med_open_lock); - cfs_list_add(&mfd->mfd_list, &med->med_open_head); + list_add(&mfd->mfd_list, &med->med_open_head); spin_unlock(&med->med_open_lock); repbody->mbo_handle.cookie = mfd->mfd_handle.h_cookie; } @@ -734,8 +734,8 @@ static int mdt_reint_setattr(struct mdt_thread_info *info, LASSERT(mfd->mfd_mode == MDS_FMODE_SOM); LASSERT(!(info->mti_ioepoch->flags & MF_EPOCH_CLOSE)); - class_handle_unhash(&mfd->mfd_handle); - cfs_list_del_init(&mfd->mfd_list); + class_handle_unhash(&mfd->mfd_handle); + list_del_init(&mfd->mfd_list); spin_unlock(&med->med_open_lock); mdt_mfd_close(info, mfd); @@ -1423,7 +1423,7 @@ static int mdt_lock_objects_in_linkea(struct mdt_thread_info *info, GOTO(out, rc); } - CFS_INIT_LIST_HEAD(&mll->mll_list); + INIT_LIST_HEAD(&mll->mll_list); mll->mll_obj = mdt_pobj; list_add_tail(&mll->mll_list, lock_list); } @@ -1512,7 +1512,7 @@ static int mdt_reint_migrate_internal(struct mdt_thread_info *info, GOTO(out_put_child, rc); /* 3: iterate the linkea of the object and lock all of the objects */ - CFS_INIT_LIST_HEAD(&lock_list); + INIT_LIST_HEAD(&lock_list); rc = mdt_lock_objects_in_linkea(info, mold, msrcdir, &lock_list); if (rc != 0) GOTO(out_put_child, rc); diff --git a/lustre/ptlrpc/gss/gss_api.h b/lustre/ptlrpc/gss/gss_api.h index d11d80b..a5bbaea 100644 --- a/lustre/ptlrpc/gss/gss_api.h +++ b/lustre/ptlrpc/gss/gss_api.h @@ -99,13 +99,13 @@ struct subflavor_desc { /* Each mechanism is described by the following struct: */ struct gss_api_mech { - cfs_list_t gm_list; - struct module *gm_owner; - char *gm_name; - rawobj_t gm_oid; - atomic_t gm_count; + struct list_head gm_list; + struct module *gm_owner; + char *gm_name; + rawobj_t gm_oid; + atomic_t gm_count; struct gss_api_ops *gm_ops; - int gm_sf_num; + int gm_sf_num; struct subflavor_desc *gm_sfs; }; diff --git a/lustre/ptlrpc/gss/gss_internal.h b/lustre/ptlrpc/gss/gss_internal.h index d42b9a1..876ea39 100644 --- a/lustre/ptlrpc/gss/gss_internal.h +++ b/lustre/ptlrpc/gss/gss_internal.h @@ -286,9 +286,9 @@ struct gss_sec { }; struct gss_sec_pipefs { - struct gss_sec gsp_base; - int gsp_chash_size; /* must be 2^n */ - cfs_hlist_head_t gsp_chash[0]; + struct gss_sec gsp_base; + int gsp_chash_size; /* must be 2^n */ + struct hlist_head gsp_chash[0]; }; /* @@ -301,7 +301,7 @@ struct gss_sec_keyring { /* * all contexts listed here. access is protected by sec spinlock. */ - cfs_hlist_head_t gsk_clist; + struct hlist_head gsk_clist; /* * specially point to root ctx (only one at a time). access is * protected by sec spinlock. diff --git a/lustre/ptlrpc/gss/gss_keyring.c b/lustre/ptlrpc/gss/gss_keyring.c index 407b594..3e943c0 100644 --- a/lustre/ptlrpc/gss/gss_keyring.c +++ b/lustre/ptlrpc/gss/gss_keyring.c @@ -310,7 +310,7 @@ static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked) atomic_inc(&ctx->cc_refcount); set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); - cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist); + hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist); if (is_root) gsec_kr->gsk_root_ctx = ctx; @@ -338,7 +338,7 @@ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked) if (gsec_kr->gsk_root_ctx == ctx) gsec_kr->gsk_root_ctx = NULL; - cfs_hlist_del_init(&ctx->cc_cache); + hlist_del_init(&ctx->cc_cache); atomic_dec(&ctx->cc_refcount); spin_unlock_if(&sec->ps_lock, !locked); @@ -442,7 +442,7 @@ static void kill_key_locked(struct key *key) /* * caller should hold one ref on contexts in freelist. */ -static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist) +static void dispose_ctx_list_kr(struct hlist_head *freelist) { struct hlist_node __maybe_unused *pos, *next; struct ptlrpc_cli_ctx *ctx; @@ -510,7 +510,7 @@ struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec) if (ctx) { LASSERT(atomic_read(&ctx->cc_refcount) > 0); - LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist)); + LASSERT(!hlist_empty(&gsec_kr->gsk_clist)); atomic_inc(&ctx->cc_refcount); } @@ -581,7 +581,7 @@ struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp, if (gsec_kr == NULL) RETURN(NULL); - CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist); + INIT_HLIST_HEAD(&gsec_kr->gsk_clist); gsec_kr->gsk_root_ctx = NULL; mutex_init(&gsec_kr->gsk_root_uc_lock); #ifdef HAVE_KEYRING_UPCALL_SERIALIZED @@ -613,7 +613,7 @@ void gss_sec_destroy_kr(struct ptlrpc_sec *sec) CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec); - LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist)); + LASSERT(hlist_empty(&gsec_kr->gsk_clist)); LASSERT(gsec_kr->gsk_root_ctx == NULL); gss_sec_destroy_common(gsec); @@ -889,12 +889,11 @@ void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec, * flush context of root or all, we iterate through the list. */ static -void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, - uid_t uid, - int grace, int force) +void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace, + int force) { struct gss_sec_keyring *gsec_kr; - struct hlist_head freelist = CFS_HLIST_HEAD_INIT; + struct hlist_head freelist = HLIST_HEAD_INIT; struct hlist_node __maybe_unused *pos, *next; struct ptlrpc_cli_ctx *ctx; ENTRY; @@ -927,7 +926,7 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, atomic_inc(&ctx->cc_refcount); if (ctx_unlist_kr(ctx, 1)) { - cfs_hlist_add_head(&ctx->cc_cache, &freelist); + hlist_add_head(&ctx->cc_cache, &freelist); } else { LASSERT(atomic_read(&ctx->cc_refcount) >= 2); atomic_dec(&ctx->cc_refcount); @@ -962,7 +961,7 @@ static void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec) { struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - struct hlist_head freelist = CFS_HLIST_HEAD_INIT; + struct hlist_head freelist = HLIST_HEAD_INIT; struct hlist_node __maybe_unused *pos, *next; struct ptlrpc_cli_ctx *ctx; ENTRY; @@ -977,7 +976,7 @@ void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec) atomic_inc(&ctx->cc_refcount); if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) { - cfs_hlist_add_head(&ctx->cc_cache, &freelist); + hlist_add_head(&ctx->cc_cache, &freelist); CWARN("unhashed ctx %p\n", ctx); } else { LASSERT(atomic_read(&ctx->cc_refcount) >= 2); diff --git a/lustre/ptlrpc/gss/gss_mech_switch.c b/lustre/ptlrpc/gss/gss_mech_switch.c index 7c5b830..608aa8c 100644 --- a/lustre/ptlrpc/gss/gss_mech_switch.c +++ b/lustre/ptlrpc/gss/gss_mech_switch.c @@ -65,13 +65,13 @@ #include "gss_internal.h" #include "gss_api.h" -static CFS_LIST_HEAD(registered_mechs); +static struct list_head registered_mechs = LIST_HEAD_INIT(registered_mechs); static DEFINE_SPINLOCK(registered_mechs_lock); int lgss_mech_register(struct gss_api_mech *gm) { spin_lock(®istered_mechs_lock); - cfs_list_add(&gm->gm_list, ®istered_mechs); + list_add(&gm->gm_list, ®istered_mechs); spin_unlock(®istered_mechs_lock); CWARN("Register %s mechanism\n", gm->gm_name); return 0; @@ -80,7 +80,7 @@ int lgss_mech_register(struct gss_api_mech *gm) void lgss_mech_unregister(struct gss_api_mech *gm) { spin_lock(®istered_mechs_lock); - cfs_list_del(&gm->gm_list); + list_del(&gm->gm_list); spin_unlock(®istered_mechs_lock); CWARN("Unregister %s mechanism\n", gm->gm_name); } @@ -97,7 +97,7 @@ struct gss_api_mech *lgss_name_to_mech(char *name) struct gss_api_mech *pos, *gm = NULL; spin_lock(®istered_mechs_lock); - cfs_list_for_each_entry(pos, ®istered_mechs, gm_list) { + list_for_each_entry(pos, ®istered_mechs, gm_list) { if (0 == strcmp(name, pos->gm_name)) { if (!try_module_get(pos->gm_owner)) continue; @@ -127,7 +127,7 @@ struct gss_api_mech *lgss_subflavor_to_mech(__u32 subflavor) struct gss_api_mech *pos, *gm = NULL; spin_lock(®istered_mechs_lock); - cfs_list_for_each_entry(pos, ®istered_mechs, gm_list) { + list_for_each_entry(pos, ®istered_mechs, gm_list) { if (!try_module_get(pos->gm_owner)) continue; if (!mech_supports_subflavor(pos, subflavor)) { diff --git a/lustre/ptlrpc/gss/gss_pipefs.c b/lustre/ptlrpc/gss/gss_pipefs.c index 45e0b0b..ed854b2 100644 --- a/lustre/ptlrpc/gss/gss_pipefs.c +++ b/lustre/ptlrpc/gss/gss_pipefs.c @@ -128,31 +128,31 @@ void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx) } static -void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash) +void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash) { set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); atomic_inc(&ctx->cc_refcount); - cfs_hlist_add_head(&ctx->cc_cache, hash); + hlist_add_head(&ctx->cc_cache, hash); } /* * caller must hold spinlock */ static -void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist) +void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist) { assert_spin_locked(&ctx->cc_sec->ps_lock); LASSERT(atomic_read(&ctx->cc_refcount) > 0); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); - LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache)); + LASSERT(!hlist_unhashed(&ctx->cc_cache)); clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); if (atomic_dec_and_test(&ctx->cc_refcount)) { - __cfs_hlist_del(&ctx->cc_cache); - cfs_hlist_add_head(&ctx->cc_cache, freelist); + __hlist_del(&ctx->cc_cache); + hlist_add_head(&ctx->cc_cache, freelist); } else { - cfs_hlist_del_init(&ctx->cc_cache); + hlist_del_init(&ctx->cc_cache); } } @@ -161,7 +161,7 @@ void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist) */ static int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx, - cfs_hlist_head_t *freelist) + struct hlist_head *freelist) { if (cli_ctx_check_death(ctx)) { if (freelist) @@ -174,7 +174,7 @@ int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx, static inline int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx, - cfs_hlist_head_t *freelist) + struct hlist_head *freelist) { LASSERT(ctx->cc_sec); LASSERT(atomic_read(&ctx->cc_refcount) > 0); @@ -194,11 +194,11 @@ int ctx_match_pf(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred) } static -void ctx_list_destroy_pf(cfs_hlist_head_t *head) +void ctx_list_destroy_pf(struct hlist_head *head) { struct ptlrpc_cli_ctx *ctx; - while (!cfs_hlist_empty(head)) { + while (!hlist_empty(head)) { ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_cache); @@ -206,7 +206,7 @@ void ctx_list_destroy_pf(cfs_hlist_head_t *head) LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); - cfs_hlist_del_init(&ctx->cc_cache); + hlist_del_init(&ctx->cc_cache); ctx_destroy_pf(ctx->cc_sec, ctx); } } @@ -236,10 +236,10 @@ void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace) spin_lock(&ctx->cc_sec->ps_lock); if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) { - LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache)); + LASSERT(!hlist_unhashed(&ctx->cc_cache)); LASSERT(atomic_read(&ctx->cc_refcount) > 1); - cfs_hlist_del_init(&ctx->cc_cache); + hlist_del_init(&ctx->cc_cache); if (atomic_dec_and_test(&ctx->cc_refcount)) LBUG(); } @@ -322,7 +322,7 @@ int gss_install_rvs_cli_ctx_pf(struct gss_sec *gsec, static void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf, - cfs_hlist_head_t *freelist) + struct hlist_head *freelist) { struct ptlrpc_sec *sec; struct ptlrpc_cli_ctx *ctx; @@ -363,7 +363,7 @@ struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp, hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE; alloc_size = sizeof(*gsec_pf) + - sizeof(cfs_hlist_head_t) * hash_size; + sizeof(struct hlist_head) * hash_size; OBD_ALLOC(gsec_pf, alloc_size); if (!gsec_pf) @@ -371,7 +371,7 @@ struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp, gsec_pf->gsp_chash_size = hash_size; for (i = 0; i < hash_size; i++) - CFS_INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]); + INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]); if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs, imp, ctx, sf)) @@ -413,7 +413,7 @@ void gss_sec_destroy_pf(struct ptlrpc_sec *sec) gss_sec_destroy_common(gsec); OBD_FREE(gsec, sizeof(*gsec_pf) + - sizeof(cfs_hlist_head_t) * gsec_pf->gsp_chash_size); + sizeof(struct hlist_head) * gsec_pf->gsp_chash_size); } static @@ -465,14 +465,14 @@ retry: if (found) { if (new && new != ctx) { /* lost the race, just free it */ - cfs_hlist_add_head(&new->cc_cache, &freelist); + hlist_add_head(&new->cc_cache, &freelist); new = NULL; } /* hot node, move to head */ if (hash_head->first != &ctx->cc_cache) { - __cfs_hlist_del(&ctx->cc_cache); - cfs_hlist_add_head(&ctx->cc_cache, hash_head); + __hlist_del(&ctx->cc_cache); + hlist_add_head(&ctx->cc_cache, hash_head); } } else { /* don't allocate for reverse sec */ @@ -518,7 +518,7 @@ void gss_sec_release_ctx_pf(struct ptlrpc_sec *sec, int sync) { LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); - LASSERT(cfs_hlist_unhashed(&ctx->cc_cache)); + LASSERT(hlist_unhashed(&ctx->cc_cache)); /* if required async, we must clear the UPTODATE bit to prevent extra * rpcs during destroy procedure. */ @@ -636,7 +636,7 @@ struct gss_upcall_msg_data { struct gss_upcall_msg { struct rpc_pipe_msg gum_base; atomic_t gum_refcount; - cfs_list_t gum_list; + struct list_head gum_list; __u32 gum_mechidx; struct gss_sec *gum_gsec; struct gss_cli_ctx *gum_gctx; @@ -666,7 +666,7 @@ __u32 mech_name2idx(const char *name) /* pipefs dentries for each mechanisms */ static struct dentry *de_pipes[MECH_MAX] = { NULL, }; /* all upcall messgaes linked here */ -static cfs_list_t upcall_lists[MECH_MAX]; +static struct list_head upcall_lists[MECH_MAX]; /* and protected by this */ static spinlock_t upcall_locks[MECH_MAX]; @@ -685,21 +685,21 @@ void upcall_list_unlock(int idx) static void upcall_msg_enlist(struct gss_upcall_msg *msg) { - __u32 idx = msg->gum_mechidx; + __u32 idx = msg->gum_mechidx; - upcall_list_lock(idx); - cfs_list_add(&msg->gum_list, &upcall_lists[idx]); - upcall_list_unlock(idx); + upcall_list_lock(idx); + list_add(&msg->gum_list, &upcall_lists[idx]); + upcall_list_unlock(idx); } static void upcall_msg_delist(struct gss_upcall_msg *msg) { - __u32 idx = msg->gum_mechidx; + __u32 idx = msg->gum_mechidx; - upcall_list_lock(idx); - cfs_list_del_init(&msg->gum_list); - upcall_list_unlock(idx); + upcall_list_lock(idx); + list_del_init(&msg->gum_list); + upcall_list_unlock(idx); } /**************************************** @@ -723,10 +723,10 @@ void gss_release_msg(struct gss_upcall_msg *gmsg) gmsg->gum_gctx = NULL; } - LASSERT(cfs_list_empty(&gmsg->gum_list)); - LASSERT(cfs_list_empty(&gmsg->gum_base.list)); - OBD_FREE_PTR(gmsg); - EXIT; + LASSERT(list_empty(&gmsg->gum_list)); + LASSERT(list_empty(&gmsg->gum_base.list)); + OBD_FREE_PTR(gmsg); + EXIT; } static @@ -737,10 +737,10 @@ void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg) LASSERT(idx < MECH_MAX); assert_spin_locked(&upcall_locks[idx]); - if (cfs_list_empty(&gmsg->gum_list)) + if (list_empty(&gmsg->gum_list)) return; - cfs_list_del_init(&gmsg->gum_list); + list_del_init(&gmsg->gum_list); LASSERT(atomic_read(&gmsg->gum_refcount) > 1); atomic_dec(&gmsg->gum_refcount); } @@ -774,7 +774,7 @@ struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq) struct gss_upcall_msg *gmsg; upcall_list_lock(mechidx); - cfs_list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) { + list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) { if (gmsg->gum_data.gum_seq != seq) continue; @@ -956,7 +956,7 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) static cfs_time_t ratelimit = 0; ENTRY; - LASSERT(cfs_list_empty(&msg->list)); + LASSERT(list_empty(&msg->list)); /* normally errno is >= 0 */ if (msg->errno >= 0) { @@ -991,22 +991,22 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) static void gss_pipe_release(struct inode *inode) { - struct rpc_inode *rpci = RPC_I(inode); - __u32 idx; - ENTRY; + struct rpc_inode *rpci = RPC_I(inode); + __u32 idx; + ENTRY; - idx = (__u32) (long) rpci->private; - LASSERT(idx < MECH_MAX); + idx = (__u32) (long) rpci->private; + LASSERT(idx < MECH_MAX); - upcall_list_lock(idx); - while (!cfs_list_empty(&upcall_lists[idx])) { - struct gss_upcall_msg *gmsg; - struct gss_upcall_msg_data *gumd; + upcall_list_lock(idx); + while (!list_empty(&upcall_lists[idx])) { + struct gss_upcall_msg *gmsg; + struct gss_upcall_msg_data *gumd; - gmsg = cfs_list_entry(upcall_lists[idx].next, - struct gss_upcall_msg, gum_list); - gumd = &gmsg->gum_data; - LASSERT(cfs_list_empty(&gmsg->gum_base.list)); + gmsg = list_entry(upcall_lists[idx].next, + struct gss_upcall_msg, gum_list); + gumd = &gmsg->gum_data; + LASSERT(list_empty(&gmsg->gum_base.list)); CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, " "nid "LPX64", obd %.*s\n", gmsg, @@ -1067,7 +1067,7 @@ int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx) RETURN(-ENOMEM); /* initialize pipefs base msg */ - CFS_INIT_LIST_HEAD(&gmsg->gum_base.list); + INIT_LIST_HEAD(&gmsg->gum_base.list); gmsg->gum_base.data = &gmsg->gum_data; gmsg->gum_base.len = sizeof(gmsg->gum_data); gmsg->gum_base.copied = 0; @@ -1211,7 +1211,7 @@ int __init gss_init_pipefs_upcall(void) } de_pipes[MECH_KRB5] = de; - CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]); + INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]); spin_lock_init(&upcall_locks[MECH_KRB5]); return 0; @@ -1220,17 +1220,17 @@ int __init gss_init_pipefs_upcall(void) static void __exit gss_exit_pipefs_upcall(void) { - __u32 i; + __u32 i; - for (i = 0; i < MECH_MAX; i++) { - LASSERT(cfs_list_empty(&upcall_lists[i])); + for (i = 0; i < MECH_MAX; i++) { + LASSERT(list_empty(&upcall_lists[i])); - /* dput pipe dentry here might cause lgssd oops. */ - de_pipes[i] = NULL; - } + /* dput pipe dentry here might cause lgssd oops. */ + de_pipes[i] = NULL; + } - rpc_unlink(LUSTRE_PIPE_KRB5); - rpc_rmdir(LUSTRE_PIPE_ROOT); + rpc_unlink(LUSTRE_PIPE_KRB5); + rpc_rmdir(LUSTRE_PIPE_ROOT); } int __init gss_init_pipefs(void) @@ -1252,6 +1252,6 @@ int __init gss_init_pipefs(void) void __exit gss_exit_pipefs(void) { - gss_exit_pipefs_upcall(); - sptlrpc_unregister_policy(&gss_policy_pipefs); + gss_exit_pipefs_upcall(); + sptlrpc_unregister_policy(&gss_policy_pipefs); } diff --git a/lustre/ptlrpc/gss/sec_gss.c b/lustre/ptlrpc/gss/sec_gss.c index a830b0d..ccd5ff7 100644 --- a/lustre/ptlrpc/gss/sec_gss.c +++ b/lustre/ptlrpc/gss/sec_gss.c @@ -1128,7 +1128,7 @@ int gss_sec_create_common(struct gss_sec *gsec, sec->ps_flvr = *sf; sec->ps_import = class_import_get(imp); spin_lock_init(&sec->ps_lock); - CFS_INIT_LIST_HEAD(&sec->ps_gc_list); + INIT_LIST_HEAD(&sec->ps_gc_list); if (!svcctx) { sec->ps_gc_interval = GSS_GC_INTERVAL; @@ -1184,7 +1184,7 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec, gctx->gc_win = 0; atomic_set(&gctx->gc_seq, 0); - CFS_INIT_HLIST_NODE(&ctx->cc_cache); + INIT_HLIST_NODE(&ctx->cc_cache); atomic_set(&ctx->cc_refcount, 0); ctx->cc_sec = sec; ctx->cc_ops = ctxops; @@ -1192,8 +1192,8 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec, ctx->cc_flags = PTLRPC_CTX_NEW; ctx->cc_vcred = *vcred; spin_lock_init(&ctx->cc_lock); - CFS_INIT_LIST_HEAD(&ctx->cc_req_list); - CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain); + INIT_LIST_HEAD(&ctx->cc_req_list); + INIT_LIST_HEAD(&ctx->cc_gc_chain); /* take a ref on belonging sec, balanced in ctx destroying */ atomic_inc(&sec->ps_refcount);