From 8701e7e4b5ec1b34700c95b9b6588f4745730b72 Mon Sep 17 00:00:00 2001 From: James Simmons Date: Tue, 15 Jul 2014 12:37:58 -0400 Subject: [PATCH] LU-3963 obdclass: convert to linux list api Move from the cfs_[h]list api to the native linux api for all the code related to obdclass. Change-Id: Id773b6c83b22cac5b210b6d41b470a2c8d6e206d Signed-off-by: James Simmons Reviewed-on: http://review.whamcloud.com/10525 Reviewed-by: Bob Glossman Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Nathaniel Clark Reviewed-by: Oleg Drokin --- lustre/include/cl_object.h | 157 +++++++++---------- lustre/include/dt_object.h | 18 +-- lustre/include/lclient.h | 4 +- lustre/include/lprocfs_status.h | 14 +- lustre/include/lu_object.h | 70 ++++----- lustre/include/lu_ref.h | 14 +- lustre/include/lustre_capa.h | 42 ++--- lustre/include/lustre_disk.h | 6 +- lustre/include/lustre_dlm.h | 110 ++++++------- lustre/include/lustre_export.h | 84 +++++----- lustre/include/lustre_fld.h | 14 +- lustre/include/lustre_handles.h | 2 +- lustre/include/lustre_idmap.h | 4 +- lustre/include/lustre_import.h | 32 ++-- lustre/include/lustre_log.h | 13 +- lustre/include/lustre_net.h | 250 +++++++++++++++--------------- lustre/include/lustre_nodemap.h | 2 +- lustre/include/lustre_nrs_tbf.h | 28 ++-- lustre/include/lustre_sec.h | 28 ++-- lustre/include/md_object.h | 2 +- lustre/include/obd.h | 170 ++++++++++---------- lustre/include/obd_class.h | 14 +- lustre/include/obd_target.h | 8 +- lustre/obdclass/capa.c | 69 ++++----- lustre/obdclass/cl_io.c | 266 ++++++++++++++++---------------- lustre/obdclass/cl_lock.c | 199 ++++++++++++------------ lustre/obdclass/cl_object.c | 89 +++++------ lustre/obdclass/cl_page.c | 71 +++++---- lustre/obdclass/class_obd.c | 7 +- lustre/obdclass/dt_object.c | 29 ++-- lustre/obdclass/genops.c | 190 +++++++++++------------ lustre/obdclass/idmap.c | 134 ++++++++-------- lustre/obdclass/llog.c | 10 +- lustre/obdclass/llog_cat.c | 22 +-- lustre/obdclass/local_storage.c | 20 +-- lustre/obdclass/local_storage.h | 4 +- lustre/obdclass/lprocfs_jobstats.c | 62 ++++---- lustre/obdclass/lprocfs_status_server.c | 10 +- lustre/obdclass/lu_object.c | 237 ++++++++++++++-------------- lustre/obdclass/lu_ref.c | 47 +++--- lustre/obdclass/lustre_handles.c | 20 +-- lustre/obdclass/lustre_peer.c | 77 ++++----- lustre/obdclass/obd_config.c | 97 ++++++------ lustre/obdclass/obd_mount_server.c | 28 ++-- 44 files changed, 1388 insertions(+), 1386 deletions(-) diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index a037f3d..1c214a8 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -408,17 +408,17 @@ struct cl_object_operations { struct cl_object_header { /** Standard lu_object_header. cl_object::co_lu::lo_header points * here. */ - struct lu_object_header coh_lu; + struct lu_object_header coh_lu; /** \name locks * \todo XXX move locks below to the separate cache-lines, they are * mostly useless otherwise. */ /** @{ */ /** Lock protecting lock list. */ - spinlock_t coh_lock_guard; - /** @} locks */ - /** List of cl_lock's granted for this object. */ - cfs_list_t coh_locks; + spinlock_t coh_lock_guard; + /** @} locks */ + /** List of cl_lock's granted for this object. */ + struct list_head coh_locks; /** * Parent object. It is assumed that an object has a well-defined @@ -451,18 +451,19 @@ struct cl_object_header { * Helper macro: iterate over all layers of the object \a obj, assigning every * layer top-to-bottom to \a slice. */ -#define cl_object_for_each(slice, obj) \ - cfs_list_for_each_entry((slice), \ - &(obj)->co_lu.lo_header->loh_layers, \ - co_lu.lo_linkage) +#define cl_object_for_each(slice, obj) \ + list_for_each_entry((slice), \ + &(obj)->co_lu.lo_header->loh_layers,\ + co_lu.lo_linkage) + /** * Helper macro: iterate over all layers of the object \a obj, assigning every * layer bottom-to-top to \a slice. */ -#define cl_object_for_each_reverse(slice, obj) \ - cfs_list_for_each_entry_reverse((slice), \ - &(obj)->co_lu.lo_header->loh_layers, \ - co_lu.lo_linkage) +#define cl_object_for_each_reverse(slice, obj) \ + list_for_each_entry_reverse((slice), \ + &(obj)->co_lu.lo_header->loh_layers,\ + co_lu.lo_linkage) /** @} cl_object */ #define CL_PAGE_EOF ((pgoff_t)~0ull) @@ -771,7 +772,7 @@ struct cl_page_slice { struct cl_object *cpl_obj; const struct cl_page_operations *cpl_ops; /** Linkage into cl_page::cp_layers. Immutable after creation. */ - cfs_list_t cpl_linkage; + struct list_head cpl_linkage; }; /** @@ -1479,7 +1480,7 @@ struct cl_lock_closure { * List of enclosed locks, so far. Locks are linked here through * cl_lock::cll_inclosure. */ - cfs_list_t clc_list; + struct list_head clc_list; /** * True iff closure is in a `wait' mode. This determines what * cl_lock_enclosure() does when a lock L to be added to the closure @@ -1505,20 +1506,20 @@ struct cl_lock_closure { */ struct cl_lock { /** Reference counter. */ - atomic_t cll_ref; + atomic_t cll_ref; /** List of slices. Immutable after creation. */ - cfs_list_t cll_layers; - /** - * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected - * by cl_lock::cll_descr::cld_obj::coh_lock_guard. - */ - cfs_list_t cll_linkage; - /** - * Parameters of this lock. Protected by - * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within - * cl_lock::cll_guard. Modified only on lock creation and in - * cl_lock_modify(). - */ + struct list_head cll_layers; + /** + * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected + * by cl_lock::cll_descr::cld_obj::coh_lock_guard. + */ + struct list_head cll_linkage; + /** + * Parameters of this lock. Protected by + * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within + * cl_lock::cll_guard. Modified only on lock creation and in + * cl_lock_modify(). + */ struct cl_lock_descr cll_descr; /** Protected by cl_lock::cll_guard. */ enum cl_lock_state cll_state; @@ -1571,7 +1572,7 @@ struct cl_lock { * * \see cl_lock_closure */ - cfs_list_t cll_inclosure; + struct list_head cll_inclosure; /** * Confict lock at queuing time. */ @@ -1606,7 +1607,7 @@ struct cl_lock_slice { struct cl_object *cls_obj; const struct cl_lock_operations *cls_ops; /** Linkage into cl_lock::cll_layers. Immutable after creation. */ - cfs_list_t cls_linkage; + struct list_head cls_linkage; }; /** @@ -1828,9 +1829,9 @@ do { \ * @{ */ struct cl_page_list { - unsigned pl_nr; - cfs_list_t pl_pages; - struct task_struct *pl_owner; + unsigned pl_nr; + struct list_head pl_pages; + struct task_struct *pl_owner; }; /** @@ -1969,16 +1970,16 @@ enum cl_io_state { * \see vvp_io, lov_io, osc_io, ccc_io */ struct cl_io_slice { - struct cl_io *cis_io; - /** corresponding object slice. Immutable after creation. */ - struct cl_object *cis_obj; - /** io operations. Immutable after creation. */ - const struct cl_io_operations *cis_iop; - /** - * linkage into a list of all slices for a given cl_io, hanging off - * cl_io::ci_layers. Immutable after creation. - */ - cfs_list_t cis_linkage; + struct cl_io *cis_io; + /** corresponding object slice. Immutable after creation. */ + struct cl_object *cis_obj; + /** io operations. Immutable after creation. */ + const struct cl_io_operations *cis_iop; + /** + * linkage into a list of all slices for a given cl_io, hanging off + * cl_io::ci_layers. Immutable after creation. + */ + struct list_head cis_linkage; }; typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, @@ -2162,13 +2163,13 @@ enum cl_enq_flags { * same lock can be part of multiple io's simultaneously. */ struct cl_io_lock_link { - /** linkage into one of cl_lockset lists. */ - cfs_list_t cill_linkage; - struct cl_lock_descr cill_descr; - struct cl_lock *cill_lock; - /** optional destructor */ - void (*cill_fini)(const struct lu_env *env, - struct cl_io_lock_link *link); + /** linkage into one of cl_lockset lists. */ + struct list_head cill_linkage; + struct cl_lock_descr cill_descr; + struct cl_lock *cill_lock; + /** optional destructor */ + void (*cill_fini)(const struct lu_env *env, + struct cl_io_lock_link *link); }; /** @@ -2201,12 +2202,12 @@ struct cl_io_lock_link { * enqueued. */ struct cl_lockset { - /** locks to be acquired. */ - cfs_list_t cls_todo; - /** locks currently being processed. */ - cfs_list_t cls_curr; - /** locks acquired. */ - cfs_list_t cls_done; + /** locks to be acquired. */ + struct list_head cls_todo; + /** locks currently being processed. */ + struct list_head cls_curr; + /** locks acquired. */ + struct list_head cls_done; }; /** @@ -2263,7 +2264,7 @@ struct cl_io { */ struct cl_io *ci_parent; /** List of slices. Immutable after creation. */ - cfs_list_t ci_layers; + struct list_head ci_layers; /** list of locks (to be) acquired by this io. */ struct cl_lockset ci_lockset; /** lock requirements, this is just a help info for sublayers. */ @@ -2501,26 +2502,26 @@ struct cl_req_obj { * req's pages. */ struct cl_req { - enum cl_req_type crq_type; - /** A list of pages being transfered */ - cfs_list_t crq_pages; - /** Number of pages in cl_req::crq_pages */ - unsigned crq_nrpages; - /** An array of objects which pages are in ->crq_pages */ - struct cl_req_obj *crq_o; - /** Number of elements in cl_req::crq_objs[] */ - unsigned crq_nrobjs; - cfs_list_t crq_layers; + enum cl_req_type crq_type; + /** A list of pages being transfered */ + struct list_head crq_pages; + /** Number of pages in cl_req::crq_pages */ + unsigned crq_nrpages; + /** An array of objects which pages are in ->crq_pages */ + struct cl_req_obj *crq_o; + /** Number of elements in cl_req::crq_objs[] */ + unsigned crq_nrobjs; + struct list_head crq_layers; }; /** * Per-layer state for request. */ struct cl_req_slice { - struct cl_req *crs_req; - struct cl_device *crs_dev; - cfs_list_t crs_linkage; - const struct cl_req_operations *crs_ops; + struct cl_req *crs_req; + struct cl_device *crs_dev; + struct list_head crs_linkage; + const struct cl_req_operations *crs_ops; }; /* @} cl_req */ @@ -2922,8 +2923,8 @@ void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock); int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock); void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock, enum cl_lock_state state); -int cl_queue_match (const cfs_list_t *queue, - const struct cl_lock_descr *need); +int cl_queue_match(const struct list_head *queue, + const struct cl_lock_descr *need); void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock); int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock); @@ -3048,27 +3049,27 @@ do { \ */ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist) { - LASSERT(plist->pl_nr > 0); - return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch); + LASSERT(plist->pl_nr > 0); + return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch); } static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist) { LASSERT(plist->pl_nr > 0); - return cfs_list_entry(plist->pl_pages.next, struct cl_page, cp_batch); + return list_entry(plist->pl_pages.next, struct cl_page, cp_batch); } /** * Iterate over pages in a page list. */ #define cl_page_list_for_each(page, list) \ - cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch) + list_for_each_entry((page), &(list)->pl_pages, cp_batch) /** * Iterate over pages in a page list, taking possible removals into account. */ #define cl_page_list_for_each_safe(page, temp, list) \ - cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) + list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) void cl_page_list_init (struct cl_page_list *plist); void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page); diff --git a/lustre/include/dt_object.h b/lustre/include/dt_object.h index 9247b4d9..acf22f5 100644 --- a/lustre/include/dt_object.h +++ b/lustre/include/dt_object.h @@ -105,10 +105,10 @@ typedef void (*dt_cb_t)(struct lu_env *env, struct thandle *th, #define MAX_COMMIT_CB_STR_LEN 32 struct dt_txn_commit_cb { - cfs_list_t dcb_linkage; - dt_cb_t dcb_func; - __u32 dcb_magic; - char dcb_name[MAX_COMMIT_CB_STR_LEN]; + struct list_head dcb_linkage; + dt_cb_t dcb_func; + __u32 dcb_magic; + char dcb_name[MAX_COMMIT_CB_STR_LEN]; }; /** @@ -681,7 +681,7 @@ struct dt_device { * way, because callbacks are supposed to be added/deleted only during * single-threaded start-up shut-down procedures. */ - cfs_list_t dd_txn_callbacks; + struct list_head dd_txn_callbacks; unsigned int dd_record_fid_accessed:1; }; @@ -711,7 +711,7 @@ struct dt_object { */ struct local_oid_storage { /* all initialized llog systems on this node linked by this */ - cfs_list_t los_list; + struct list_head los_list; /* how many handle's reference this los has */ atomic_t los_refcount; @@ -840,9 +840,9 @@ struct dt_txn_callback { int (*dtc_txn_stop)(const struct lu_env *env, struct thandle *txn, void *cookie); void (*dtc_txn_commit)(struct thandle *txn, void *cookie); - void *dtc_cookie; - __u32 dtc_tag; - cfs_list_t dtc_linkage; + void *dtc_cookie; + __u32 dtc_tag; + struct list_head dtc_linkage; }; void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb); diff --git a/lustre/include/lclient.h b/lustre/include/lclient.h index dd784f3..afa462c 100644 --- a/lustre/include/lclient.h +++ b/lustre/include/lclient.h @@ -205,7 +205,7 @@ struct ccc_object { * * \see ccc_page::cpg_pending_linkage */ - cfs_list_t cob_pending_list; + struct list_head cob_pending_list; /** * Access this counter is protected by inode->i_sem. Now that @@ -469,7 +469,7 @@ struct cl_client_cache { /** * List of entities(OSCs) for this LRU cache */ - cfs_list_t ccc_lru; + struct list_head ccc_lru; /** * Max # of LRU entries */ diff --git a/lustre/include/lprocfs_status.h b/lustre/include/lprocfs_status.h index 4cb3112..319f3b9 100644 --- a/lustre/include/lprocfs_status.h +++ b/lustre/include/lprocfs_status.h @@ -411,13 +411,13 @@ static inline void s2dhms(struct dhms *ts, time_t secs) typedef void (*cntr_init_callback)(struct lprocfs_stats *stats); struct obd_job_stats { - cfs_hash_t *ojs_hash; - cfs_list_t ojs_list; - rwlock_t ojs_lock; /* protect the obj_list */ - cntr_init_callback ojs_cntr_init_fn; - int ojs_cntr_num; - int ojs_cleanup_interval; - time_t ojs_last_cleanup; + cfs_hash_t *ojs_hash; + struct list_head ojs_list; + rwlock_t ojs_lock; /* protect the obj_list */ + cntr_init_callback ojs_cntr_init_fn; + int ojs_cntr_num; + int ojs_cleanup_interval; + time_t ojs_last_cleanup; }; #ifdef LPROCFS diff --git a/lustre/include/lu_object.h b/lustre/include/lu_object.h index 96b65869..b104851 100644 --- a/lustre/include/lu_object.h +++ b/lustre/include/lu_object.h @@ -288,7 +288,7 @@ struct lu_device { /** * Link the device to the site. **/ - cfs_list_t ld_linkage; + struct list_head ld_linkage; }; struct lu_device_type_operations; @@ -473,7 +473,7 @@ struct lu_object { /** * Linkage into list of all layers. */ - cfs_list_t lo_linkage; + struct list_head lo_linkage; /** * Link to the device, for debugging. */ @@ -558,7 +558,7 @@ struct lu_site_bkt_data { /** * number of busy object on this bucket */ - long lsb_busy; + long lsb_busy; /** * LRU list, updated on each access to object. Protected by * bucket lock of lu_site::ls_obj_hash. @@ -567,7 +567,7 @@ struct lu_site_bkt_data { * moved to the lu_site::ls_lru.prev (this is due to the non-existence * of list_for_each_entry_safe_reverse()). */ - cfs_list_t lsb_lru; + struct list_head lsb_lru; /** * Wait-queue signaled when an object in this site is ultimately * destroyed (lu_object_free()). It is used by lu_object_find() to @@ -576,7 +576,7 @@ struct lu_site_bkt_data { * * \see htable_lookup(). */ - wait_queue_head_t lsb_marche_funebre; + wait_queue_head_t lsb_marche_funebre; }; enum { @@ -603,28 +603,28 @@ struct lu_site { /** * objects hash table */ - cfs_hash_t *ls_obj_hash; + cfs_hash_t *ls_obj_hash; /** * index of bucket on hash table while purging */ - int ls_purge_start; - /** - * Top-level device for this stack. - */ - struct lu_device *ls_top_dev; + int ls_purge_start; + /** + * Top-level device for this stack. + */ + struct lu_device *ls_top_dev; /** * Bottom-level device for this stack */ struct lu_device *ls_bottom_dev; - /** - * Linkage into global list of sites. - */ - cfs_list_t ls_linkage; - /** - * List for lu device for this site, protected - * by ls_ld_lock. - **/ - cfs_list_t ls_ld_linkage; + /** + * Linkage into global list of sites. + */ + struct list_head ls_linkage; + /** + * List for lu device for this site, protected + * by ls_ld_lock. + **/ + struct list_head ls_ld_linkage; spinlock_t ls_ld_lock; /** * Lock to serialize site purge. @@ -745,8 +745,8 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env, */ static inline struct lu_object *lu_object_top(struct lu_object_header *h) { - LASSERT(!cfs_list_empty(&h->loh_layers)); - return container_of0(h->loh_layers.next, struct lu_object, lo_linkage); + LASSERT(!list_empty(&h->loh_layers)); + return container_of0(h->loh_layers.next, struct lu_object, lo_linkage); } /** @@ -959,22 +959,22 @@ struct lu_context { * Pointer to an array with key values. Internal implementation * detail. */ - void **lc_value; - /** - * Linkage into a list of all remembered contexts. Only - * `non-transient' contexts, i.e., ones created for service threads - * are placed here. - */ - cfs_list_t lc_remember; - /** - * Version counter used to skip calls to lu_context_refill() when no - * keys were registered. - */ - unsigned lc_version; + void **lc_value; + /** + * Linkage into a list of all remembered contexts. Only + * `non-transient' contexts, i.e., ones created for service threads + * are placed here. + */ + struct list_head lc_remember; + /** + * Version counter used to skip calls to lu_context_refill() when no + * keys were registered. + */ + unsigned lc_version; /** * Debugging cookie. */ - unsigned lc_cookie; + unsigned lc_cookie; }; /** diff --git a/lustre/include/lu_ref.h b/lustre/include/lu_ref.h index 3ff67e0..b0c49ab 100644 --- a/lustre/include/lu_ref.h +++ b/lustre/include/lu_ref.h @@ -121,11 +121,11 @@ struct lu_ref { * Spin-lock protecting lu_ref::lf_list. */ spinlock_t lf_guard; - /** - * List of all outstanding references (each represented by struct - * lu_ref_link), pointing to this object. - */ - cfs_list_t lf_list; + /** + * List of all outstanding references (each represented by struct + * lu_ref_link), pointing to this object. + */ + struct list_head lf_list; /** * # of links. */ @@ -147,12 +147,12 @@ struct lu_ref { /** * Linkage into a global list of all lu_ref's (lu_ref_refs). */ - cfs_list_t lf_linkage; + struct list_head lf_linkage; }; struct lu_ref_link { struct lu_ref *ll_ref; - cfs_list_t ll_linkage; + struct list_head ll_linkage; const char *ll_scope; const void *ll_source; }; diff --git a/lustre/include/lustre_capa.h b/lustre/include/lustre_capa.h index b83cd6f..ac713d0 100644 --- a/lustre/include/lustre_capa.h +++ b/lustre/include/lustre_capa.h @@ -71,16 +71,16 @@ struct capa_hmac_alg { } struct client_capa { - struct inode *inode; - cfs_list_t lli_list; /* link to lli_oss_capas */ + struct inode *inode; + struct list_head lli_list; /* link to lli_oss_capas */ }; struct target_capa { - cfs_hlist_node_t c_hash; /* link to capa hash */ + struct hlist_node c_hash; /* link to capa hash */ }; struct obd_capa { - cfs_list_t c_list; /* link to capa_list */ + struct list_head c_list; /* link to capa_list */ struct lustre_capa c_capa; /* capa */ atomic_t c_refc; /* ref count */ @@ -176,17 +176,17 @@ CDEBUG(level, fmt " capability key@%p seq "LPU64" keyid %u\n", \ typedef int (* renew_capa_cb_t)(struct obd_capa *, struct lustre_capa *); /* obdclass/capa.c */ -extern cfs_list_t capa_list[]; +extern struct list_head capa_list[]; extern spinlock_t capa_lock; extern int capa_count[]; extern struct kmem_cache *capa_cachep; -cfs_hlist_head_t *init_capa_hash(void); -void cleanup_capa_hash(cfs_hlist_head_t *hash); +struct hlist_head *init_capa_hash(void); +void cleanup_capa_hash(struct hlist_head *hash); -struct obd_capa *capa_add(cfs_hlist_head_t *hash, +struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa); -struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, +struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa, int alive); int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key); @@ -205,18 +205,18 @@ static inline struct obd_capa *alloc_capa(int site) if (unlikely(!ocapa)) return ERR_PTR(-ENOMEM); - CFS_INIT_LIST_HEAD(&ocapa->c_list); + INIT_LIST_HEAD(&ocapa->c_list); atomic_set(&ocapa->c_refc, 1); spin_lock_init(&ocapa->c_lock); ocapa->c_site = site; - if (ocapa->c_site == CAPA_SITE_CLIENT) - CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list); - else - CFS_INIT_HLIST_NODE(&ocapa->u.tgt.c_hash); + if (ocapa->c_site == CAPA_SITE_CLIENT) + INIT_LIST_HEAD(&ocapa->u.cli.lli_list); + else + INIT_HLIST_NODE(&ocapa->u.tgt.c_hash); - return ocapa; + return ocapa; #else - return ERR_PTR(-EOPNOTSUPP); + return ERR_PTR(-EOPNOTSUPP); #endif } @@ -240,11 +240,11 @@ static inline void capa_put(struct obd_capa *ocapa) } if (atomic_dec_and_test(&ocapa->c_refc)) { - LASSERT(cfs_list_empty(&ocapa->c_list)); + LASSERT(list_empty(&ocapa->c_list)); if (ocapa->c_site == CAPA_SITE_CLIENT) { - LASSERT(cfs_list_empty(&ocapa->u.cli.lli_list)); + LASSERT(list_empty(&ocapa->u.cli.lli_list)); } else { - cfs_hlist_node_t *hnode; + struct hlist_node *hnode; hnode = &ocapa->u.tgt.c_hash; LASSERT(!hnode->next && !hnode->pprev); @@ -294,8 +294,8 @@ static inline int capa_opc_supported(struct lustre_capa *capa, __u64 opc) } struct filter_capa_key { - cfs_list_t k_list; - struct lustre_capa_key k_key; + struct list_head k_list; + struct lustre_capa_key k_key; }; enum lc_auth_id { diff --git a/lustre/include/lustre_disk.h b/lustre/include/lustre_disk.h index d66015d..8df2204 100644 --- a/lustre/include/lustre_disk.h +++ b/lustre/include/lustre_disk.h @@ -521,9 +521,9 @@ struct lustre_sb_info { /****************** mount lookup info *********************/ struct lustre_mount_info { - char *lmi_name; - struct super_block *lmi_sb; - cfs_list_t lmi_list_chain; + char *lmi_name; + struct super_block *lmi_sb; + struct list_head lmi_list_chain; }; /****************** prototypes *********************/ diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index de9863f..757c88d 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -407,7 +407,7 @@ struct ldlm_namespace { * Position in global namespace list linking all namespaces on * the node. */ - cfs_list_t ns_list_chain; + struct list_head ns_list_chain; /** * List of unused locks for this namespace. This list is also called @@ -419,7 +419,7 @@ struct ldlm_namespace { * to release from the head of this list. * Locks are linked via l_lru field in \see struct ldlm_lock. */ - cfs_list_t ns_unused_list; + struct list_head ns_unused_list; /** Number of locks in the LRU list above */ int ns_nr_unused; @@ -582,7 +582,7 @@ typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data); /** Work list for sending GL ASTs to multiple locks. */ struct ldlm_glimpse_work { struct ldlm_lock *gl_lock; /* lock to glimpse */ - cfs_list_t gl_list; /* linkage to other gl work structs */ + struct list_head gl_list; /* linkage to other gl work structs */ __u32 gl_flags;/* see LDLM_GL_WORK_* below */ union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in * glimpse callback request */ @@ -594,7 +594,7 @@ struct ldlm_glimpse_work { /** Interval node data for each LDLM_EXTENT lock. */ struct ldlm_interval { struct interval_node li_node; /* node for tree management */ - cfs_list_t li_group; /* the locks which have the same + struct list_head li_group; /* the locks which have the same * policy - group of the policy */ }; #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node) @@ -698,13 +698,13 @@ struct ldlm_lock { * List item for client side LRU list. * Protected by ns_lock in struct ldlm_namespace. */ - cfs_list_t l_lru; + struct list_head l_lru; /** * Linkage to resource's lock queues according to current lock state. * (could be granted, waiting or converting) * Protected by lr_lock in struct ldlm_resource. */ - cfs_list_t l_res_link; + struct list_head l_res_link; /** * Tree node for ldlm_extent. */ @@ -713,12 +713,12 @@ struct ldlm_lock { * Per export hash of locks. * Protected by per-bucket exp->exp_lock_hash locks. */ - cfs_hlist_node_t l_exp_hash; + struct hlist_node l_exp_hash; /** * Per export hash of flock locks. * Protected by per-bucket exp->exp_flock_hash locks. */ - cfs_hlist_node_t l_exp_flock_hash; + struct hlist_node l_exp_flock_hash; /** * Requested mode. * Protected by lr_lock. @@ -841,7 +841,7 @@ struct ldlm_lock { * expired_lock_thread.elt_expired_locks for further processing. * Protected by elt_lock. */ - cfs_list_t l_pending_chain; + struct list_head l_pending_chain; /** * Set when lock is sent a blocking AST. Time in seconds when timeout @@ -863,11 +863,11 @@ struct ldlm_lock { */ int l_bl_ast_run; /** List item ldlm_add_ast_work_item() for case of blocking ASTs. */ - cfs_list_t l_bl_ast; + struct list_head l_bl_ast; /** List item ldlm_add_ast_work_item() for case of completion ASTs. */ - cfs_list_t l_cp_ast; + struct list_head l_cp_ast; /** For ldlm_add_ast_work_item() for "revoke" AST used in COS. */ - cfs_list_t l_rk_ast; + struct list_head l_rk_ast; /** * Pointer to a conflicting lock that caused blocking AST to be sent @@ -879,8 +879,8 @@ struct ldlm_lock { * Protected by lr_lock, linkages to "skip lists". * For more explanations of skip lists see ldlm/ldlm_inodebits.c */ - cfs_list_t l_sl_mode; - cfs_list_t l_sl_policy; + struct list_head l_sl_mode; + struct list_head l_sl_policy; /** Reference tracking structure to debug leaked locks. */ struct lu_ref l_reference; @@ -889,7 +889,7 @@ struct ldlm_lock { /** number of export references taken */ int l_exp_refs_nr; /** link all locks referencing one export */ - cfs_list_t l_exp_refs_link; + struct list_head l_exp_refs_link; /** referenced export object */ struct obd_export *l_exp_refs_target; #endif @@ -899,7 +899,7 @@ struct ldlm_lock { * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock. */ - cfs_list_t l_exp_list; + struct list_head l_exp_list; }; /** @@ -920,7 +920,7 @@ struct ldlm_resource { * List item for list in namespace hash. * protected by ns_lock */ - cfs_hlist_node_t lr_hash; + struct hlist_node lr_hash; /** Spinlock to protect locks under this resource. */ spinlock_t lr_lock; @@ -929,13 +929,13 @@ struct ldlm_resource { * protected by lr_lock * @{ */ /** List of locks in granted state */ - cfs_list_t lr_granted; + struct list_head lr_granted; /** List of locks waiting to change their granted mode (converted) */ - cfs_list_t lr_converting; + struct list_head lr_converting; /** * List of locks that could not be granted due to conflicts and * that are waiting for conflicts to go away */ - cfs_list_t lr_waiting; + struct list_head lr_waiting; /** @} */ /* XXX No longer needed? Remove ASAP */ @@ -1039,13 +1039,13 @@ static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len) } struct ldlm_ast_work { - struct ldlm_lock *w_lock; - int w_blocking; - struct ldlm_lock_desc w_desc; - cfs_list_t w_list; - int w_flags; - void *w_data; - int w_datalen; + struct ldlm_lock *w_lock; + int w_blocking; + struct ldlm_lock_desc w_desc; + struct list_head w_list; + int w_flags; + void *w_data; + int w_datalen; }; /** @@ -1125,8 +1125,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, #endif typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags, - int first_enq, ldlm_error_t *err, - cfs_list_t *work_list); + int first_enq, ldlm_error_t *err, + struct list_head *work_list); /** * Return values for lock iterators. @@ -1176,7 +1176,8 @@ int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *, void *data, int flag); int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data); -int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list); +int ldlm_glimpse_locks(struct ldlm_resource *res, + struct list_head *gl_work_list); /** @} ldlm_srv_ast */ /** \defgroup ldlm_handlers Server LDLM handlers @@ -1291,17 +1292,17 @@ do { \ lock; \ }) -#define ldlm_lock_list_put(head, member, count) \ -({ \ - struct ldlm_lock *_lock, *_next; \ - int c = count; \ - cfs_list_for_each_entry_safe(_lock, _next, head, member) { \ - if (c-- == 0) \ - break; \ - cfs_list_del_init(&_lock->member); \ - LDLM_LOCK_RELEASE(_lock); \ - } \ - LASSERT(c <= 0); \ +#define ldlm_lock_list_put(head, member, count) \ +({ \ + struct ldlm_lock *_lock, *_next; \ + int c = count; \ + list_for_each_entry_safe(_lock, _next, head, member) { \ + if (c-- == 0) \ + break; \ + list_del_init(&_lock->member); \ + LDLM_LOCK_RELEASE(_lock); \ + } \ + LASSERT(c <= 0); \ }) struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock); @@ -1358,8 +1359,8 @@ struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res); int ldlm_resource_putref(struct ldlm_resource *res); void ldlm_resource_add_lock(struct ldlm_resource *res, - cfs_list_t *head, - struct ldlm_lock *lock); + struct list_head *head, + struct ldlm_lock *lock); void ldlm_resource_unlink_lock(struct ldlm_lock *lock); void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc); void ldlm_dump_all_namespaces(ldlm_side_t client, int level); @@ -1402,13 +1403,12 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, void *lvb, __u32 lvb_len, enum lvb_type lvb_type, struct lustre_handle *lockh, int async); int ldlm_prep_enqueue_req(struct obd_export *exp, - struct ptlrpc_request *req, - cfs_list_t *cancels, - int count); -int ldlm_prep_elc_req(struct obd_export *exp, - struct ptlrpc_request *req, - int version, int opc, int canceloff, - cfs_list_t *cancels, int count); + struct ptlrpc_request *req, + struct list_head *cancels, + int count); +int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, + int version, int opc, int canceloff, + struct list_head *cancels, int count); struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len); int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req, @@ -1442,16 +1442,16 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, ldlm_mode_t mode, ldlm_cancel_flags_t flags, void *opaque); -int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head, - int count, ldlm_cancel_flags_t flags); +int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head, + int count, ldlm_cancel_flags_t flags); int ldlm_cancel_resource_local(struct ldlm_resource *res, - cfs_list_t *cancels, + struct list_head *cancels, ldlm_policy_data_t *policy, ldlm_mode_t mode, __u64 lock_flags, ldlm_cancel_flags_t cancel_flags, void *opaque); -int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count, +int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, ldlm_cancel_flags_t flags); -int ldlm_cli_cancel_list(cfs_list_t *head, int count, +int ldlm_cli_cancel_list(struct list_head *head, int count, struct ptlrpc_request *req, ldlm_cancel_flags_t flags); /** @} ldlm_cli_api */ diff --git a/lustre/include/lustre_export.h b/lustre/include/lustre_export.h index 24e5c47..bb1237c 100644 --- a/lustre/include/lustre_export.h +++ b/lustre/include/lustre_export.h @@ -75,14 +75,14 @@ struct tg_export_data { struct mdt_export_data { struct tg_export_data med_ted; /** List of all files opened by client on this MDT */ - cfs_list_t med_open_head; + struct list_head med_open_head; spinlock_t med_open_lock; /* med_open_head, mfd_list */ struct mutex med_idmap_mutex; struct lustre_idmap_table *med_idmap; }; struct ec_export_data { /* echo client */ - cfs_list_t eced_locks; + struct list_head eced_locks; }; /* In-memory access to client data from OST struct */ @@ -93,7 +93,7 @@ struct filter_export_data { __u64 fed_lastid_gen; long fed_dirty; /* in bytes */ long fed_grant; /* in bytes */ - cfs_list_t fed_mod_list; /* files being modified */ + struct list_head fed_mod_list; /* files being modified */ long fed_pending; /* bytes just being written */ /* count of SOFT_SYNC RPCs, which will be reset after * ofd_soft_sync_limit number of RPCs, and trigger a sync. */ @@ -104,7 +104,7 @@ struct filter_export_data { }; struct mgs_export_data { - cfs_list_t med_clients; /* mgc fs client via this exp */ + struct list_head med_clients; /* mgc fs client via this exp */ spinlock_t med_lock; /* protect med_clients */ }; @@ -113,15 +113,15 @@ struct mgs_export_data { * It tracks access patterns to this export on a per-client-NID basis */ struct nid_stat { - lnet_nid_t nid; - cfs_hlist_node_t nid_hash; - cfs_list_t nid_list; + lnet_nid_t nid; + struct hlist_node nid_hash; + struct list_head nid_list; struct obd_device *nid_obd; struct proc_dir_entry *nid_proc; struct lprocfs_stats *nid_stats; struct lprocfs_stats *nid_ldlm_stats; - atomic_t nid_exp_ref_count; /* for obd_nid_stats_hash - exp_nid_stats */ + atomic_t nid_exp_ref_count; /* for obd_nid_stats_hash + exp_nid_stats */ }; #define nidstat_getref(nidstat) \ @@ -155,38 +155,38 @@ struct obd_export { * Subsequent client RPCs contain this handle id to identify * what export they are talking to. */ - struct portals_handle exp_handle; - atomic_t exp_refcount; + struct portals_handle exp_handle; + atomic_t exp_refcount; /** * Set of counters below is to track where export references are * kept. The exp_rpc_count is used for reconnect handling also, * the cb_count and locks_count are for debug purposes only for now. * The sum of them should be less than exp_refcount by 3 */ - atomic_t exp_rpc_count; /* RPC references */ - atomic_t exp_cb_count; /* Commit callback references */ + atomic_t exp_rpc_count; /* RPC references */ + atomic_t exp_cb_count; /* Commit callback references */ /** Number of queued replay requests to be processes */ - atomic_t exp_replay_count; - atomic_t exp_locks_count; /** Lock references */ + atomic_t exp_replay_count; + atomic_t exp_locks_count; /** Lock references */ #if LUSTRE_TRACKS_LOCK_EXP_REFS - cfs_list_t exp_locks_list; - spinlock_t exp_locks_list_guard; + struct list_head exp_locks_list; + spinlock_t exp_locks_list_guard; #endif /** UUID of client connected to this export */ - struct obd_uuid exp_client_uuid; + struct obd_uuid exp_client_uuid; /** To link all exports on an obd device */ - cfs_list_t exp_obd_chain; - cfs_hlist_node_t exp_uuid_hash; /** uuid-export hash*/ - cfs_hlist_node_t exp_nid_hash; /** nid-export hash */ + struct list_head exp_obd_chain; + struct hlist_node exp_uuid_hash; /** uuid-export hash*/ + struct hlist_node exp_nid_hash; /** nid-export hash */ /** * All exports eligible for ping evictor are linked into a list * through this field in "most time since last request on this export" * order * protected by obd_dev_lock */ - cfs_list_t exp_obd_chain_timed; - /** Obd device of this export */ - struct obd_device *exp_obd; + struct list_head exp_obd_chain_timed; + /** Obd device of this export */ + struct obd_device *exp_obd; /** * "reverse" import to send requests (e.g. from ldlm) back to client * exp_lock protect its change @@ -199,20 +199,20 @@ struct obd_export { __u32 exp_conn_cnt; /** Hash list of all ldlm locks granted on this export */ cfs_hash_t *exp_lock_hash; - /** + /** * Hash list for Posix lock deadlock detection, added with * ldlm_lock::l_exp_flock_hash. - */ - cfs_hash_t *exp_flock_hash; - cfs_list_t exp_outstanding_replies; - cfs_list_t exp_uncommitted_replies; - spinlock_t exp_uncommitted_replies_lock; - /** Last committed transno for this export */ - __u64 exp_last_committed; - /** When was last request received */ - cfs_time_t exp_last_request_time; - /** On replay all requests waiting for replay are linked here */ - cfs_list_t exp_req_replay_queue; + */ + cfs_hash_t *exp_flock_hash; + struct list_head exp_outstanding_replies; + struct list_head exp_uncommitted_replies; + spinlock_t exp_uncommitted_replies_lock; + /** Last committed transno for this export */ + __u64 exp_last_committed; + /** When was last request received */ + cfs_time_t exp_last_request_time; + /** On replay all requests waiting for replay are linked here */ + struct list_head exp_req_replay_queue; /** * protects exp_flags, exp_outstanding_replies and the change * of exp_imp_reverse @@ -247,13 +247,13 @@ struct obd_export { cfs_time_t exp_flvr_expire[2]; /* seconds */ /** protects exp_hp_rpcs */ - spinlock_t exp_rpc_lock; - cfs_list_t exp_hp_rpcs; /* (potential) HP RPCs */ - cfs_list_t exp_reg_rpcs; /* RPC being handled */ + spinlock_t exp_rpc_lock; + struct list_head exp_hp_rpcs; /* (potential) HP RPCs */ + struct list_head exp_reg_rpcs; /* RPC being handled */ - /** blocking dlm lock list, protected by exp_bl_list_lock */ - cfs_list_t exp_bl_list; - spinlock_t exp_bl_list_lock; + /** blocking dlm lock list, protected by exp_bl_list_lock */ + struct list_head exp_bl_list; + spinlock_t exp_bl_list_lock; /** Target specific data */ union { diff --git a/lustre/include/lustre_fld.h b/lustre/include/lustre_fld.h index b09fca8..d018941 100644 --- a/lustre/include/lustre_fld.h +++ b/lustre/include/lustre_fld.h @@ -63,7 +63,7 @@ enum { }; struct lu_fld_target { - cfs_list_t ft_chain; + struct list_head ft_chain; struct obd_export *ft_exp; struct lu_server_fld *ft_srv; __u64 ft_idx; @@ -105,13 +105,13 @@ struct lu_server_fld { }; struct lu_client_fld { - /** - * Client side proc entry. */ - cfs_proc_dir_entry_t *lcf_proc_dir; + /** + * Client side proc entry. */ + struct proc_dir_entry *lcf_proc_dir; - /** - * List of exports client FLD knows about. */ - cfs_list_t lcf_targets; + /** + * List of exports client FLD knows about. */ + struct list_head lcf_targets; /** * Current hash to be used to chose an export. */ diff --git a/lustre/include/lustre_handles.h b/lustre/include/lustre_handles.h index 496887e..ac70270 100644 --- a/lustre/include/lustre_handles.h +++ b/lustre/include/lustre_handles.h @@ -77,7 +77,7 @@ struct portals_handle_ops { * ldlm_lock. If it's not at the top, you'll want to use container_of() * to compute the start of the structure based on the handle field. */ struct portals_handle { - cfs_list_t h_link; + struct list_head h_link; __u64 h_cookie; const void *h_owner; struct portals_handle_ops *h_ops; diff --git a/lustre/include/lustre_idmap.h b/lustre/include/lustre_idmap.h index d18325e..a7d3841 100644 --- a/lustre/include/lustre_idmap.h +++ b/lustre/include/lustre_idmap.h @@ -74,8 +74,8 @@ enum lustre_idmap_idx { }; struct lustre_idmap_table { - spinlock_t lit_lock; - cfs_list_t lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE]; + spinlock_t lit_lock; + struct list_head lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE]; }; struct lu_ucred; diff --git a/lustre/include/lustre_import.h b/lustre/include/lustre_import.h index 1182308..a582f48 100644 --- a/lustre/include/lustre_import.h +++ b/lustre/include/lustre_import.h @@ -71,7 +71,7 @@ struct adaptive_timeout { }; struct ptlrpc_at_array { - cfs_list_t *paa_reqs_array; /** array to hold requests */ + struct list_head *paa_reqs_array; /** array to hold requests */ __u32 paa_size; /** the size of array */ __u32 paa_count; /** the total count of reqs */ time_t paa_deadline; /** the earliest deadline of reqs */ @@ -132,9 +132,9 @@ enum obd_import_event { * Definition of import connection structure */ struct obd_import_conn { - /** Item for linking connections together */ - cfs_list_t oic_item; - /** Pointer to actual PortalRPC connection */ + /** Item for linking connections together */ + struct list_head oic_item; + /** Pointer to actual PortalRPC connection */ struct ptlrpc_connection *oic_conn; /** uuid of remote side */ struct obd_uuid oic_uuid; @@ -165,19 +165,19 @@ struct obd_import { struct ptlrpc_connection *imp_connection; /** PortalRPC client structure for this import */ struct ptlrpc_client *imp_client; - /** List element for linking into pinger chain */ - cfs_list_t imp_pinger_chain; - /** List element for linking into chain for destruction */ - cfs_list_t imp_zombie_chain; + /** List element for linking into pinger chain */ + struct list_head imp_pinger_chain; + /** List element for linking into chain for destruction */ + struct list_head imp_zombie_chain; /** * Lists of requests that are retained for replay, waiting for a reply, * or waiting for recovery to complete, respectively. * @{ */ - cfs_list_t imp_replay_list; - cfs_list_t imp_sending_list; - cfs_list_t imp_delayed_list; + struct list_head imp_replay_list; + struct list_head imp_sending_list; + struct list_head imp_delayed_list; /** @} */ /** @@ -187,12 +187,12 @@ struct obd_import { * The imp_replay_cursor is for accelerating searching during replay. * @{ */ - cfs_list_t imp_committed_list; - cfs_list_t *imp_replay_cursor; + struct list_head imp_committed_list; + struct list_head *imp_replay_cursor; /** @} */ - /** obd device for this import */ - struct obd_device *imp_obd; + /** obd device for this import */ + struct obd_device *imp_obd; /** * some seciruty-related fields @@ -252,7 +252,7 @@ struct obd_import { __u64 imp_last_success_conn; /** List of all possible connection for import. */ - cfs_list_t imp_conn_list; + struct list_head imp_conn_list; /** * Current connection. \a imp_connection is imp_conn_current->oic_conn */ diff --git a/lustre/include/lustre_log.h b/lustre/include/lustre_log.h index 9f74de2..4726ddd 100644 --- a/lustre/include/lustre_log.h +++ b/lustre/include/lustre_log.h @@ -80,15 +80,16 @@ enum llog_open_param { }; struct plain_handle_data { - cfs_list_t phd_entry; - struct llog_handle *phd_cat_handle; - struct llog_cookie phd_cookie; /* cookie of this log in its cat */ + struct list_head phd_entry; + struct llog_handle *phd_cat_handle; + /* cookie of this log in its cat */ + struct llog_cookie phd_cookie; }; struct cat_handle_data { - cfs_list_t chd_head; - struct llog_handle *chd_current_log; /* currently open log */ - struct llog_handle *chd_next_log; /* llog to be used next */ + struct list_head chd_head; + struct llog_handle *chd_current_log;/* currently open log */ + struct llog_handle *chd_next_log; /* llog to be used next */ }; static inline void logid_to_fid(struct llog_logid *id, struct lu_fid *fid) diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index 69261bf..70600b9 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -518,7 +518,7 @@ */ struct ptlrpc_connection { /** linkage for connections hash table */ - cfs_hlist_node_t c_hash; + struct hlist_node c_hash; /** Our own lnet nid for this connection */ lnet_nid_t c_self; /** Remote side nid for this connection */ @@ -574,26 +574,26 @@ typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *); * returned. */ struct ptlrpc_request_set { - atomic_t set_refcount; + atomic_t set_refcount; /** number of in queue requests */ - atomic_t set_new_count; + atomic_t set_new_count; /** number of uncompleted requests */ - atomic_t set_remaining; + atomic_t set_remaining; /** wait queue to wait on for request events */ - wait_queue_head_t set_waitq; - wait_queue_head_t *set_wakeup_ptr; + wait_queue_head_t set_waitq; + wait_queue_head_t *set_wakeup_ptr; /** List of requests in the set */ - cfs_list_t set_requests; + struct list_head set_requests; /** * List of completion callbacks to be called when the set is completed * This is only used if \a set_interpret is NULL. * Links struct ptlrpc_set_cbdata. */ - cfs_list_t set_cblist; + struct list_head set_cblist; /** Completion callback, if only one. */ - set_interpreter_func set_interpret; + set_interpreter_func set_interpret; /** opaq argument passed to completion \a set_interpret callback. */ - void *set_arg; + void *set_arg; /** * Lock for \a set_new_requests manipulations * locked so that any old caller can communicate requests to @@ -601,29 +601,29 @@ struct ptlrpc_request_set { */ spinlock_t set_new_req_lock; /** List of new yet unsent requests. Only used with ptlrpcd now. */ - cfs_list_t set_new_requests; + struct list_head set_new_requests; /** rq_status of requests that have been freed already */ - int set_rc; + int set_rc; /** Additional fields used by the flow control extension */ /** Maximum number of RPCs in flight */ - int set_max_inflight; + int set_max_inflight; /** Callback function used to generate RPCs */ - set_producer_func set_producer; + set_producer_func set_producer; /** opaq argument passed to the producer callback */ - void *set_producer_arg; + void *set_producer_arg; }; /** * Description of a single ptrlrpc_set callback */ struct ptlrpc_set_cbdata { - /** List linkage item */ - cfs_list_t psc_item; - /** Pointer to interpreting function */ - set_interpreter_func psc_interpret; - /** Opaq argument to pass to the callback */ - void *psc_data; + /** List linkage item */ + struct list_head psc_item; + /** Pointer to interpreting function */ + set_interpreter_func psc_interpret; + /** Opaq argument to pass to the callback */ + void *psc_data; }; struct ptlrpc_bulk_desc; @@ -650,20 +650,20 @@ struct ptlrpc_cb_id { * added to the state for replay/failover consistency guarantees. */ struct ptlrpc_reply_state { - /** Callback description */ - struct ptlrpc_cb_id rs_cb_id; - /** Linkage for list of all reply states in a system */ - cfs_list_t rs_list; - /** Linkage for list of all reply states on same export */ - cfs_list_t rs_exp_list; - /** Linkage for list of all reply states for same obd */ - cfs_list_t rs_obd_list; + /** Callback description */ + struct ptlrpc_cb_id rs_cb_id; + /** Linkage for list of all reply states in a system */ + struct list_head rs_list; + /** Linkage for list of all reply states on same export */ + struct list_head rs_exp_list; + /** Linkage for list of all reply states for same obd */ + struct list_head rs_obd_list; #if RS_DEBUG - cfs_list_t rs_debug_list; + struct list_head rs_debug_list; #endif - /** A spinlock to protect the reply state flags */ + /** A spinlock to protect the reply state flags */ spinlock_t rs_lock; - /** Reply state flags */ + /** Reply state flags */ unsigned long rs_difficult:1; /* ACK/commit stuff */ unsigned long rs_no_ack:1; /* no ACK, even for difficult requests */ @@ -738,13 +738,13 @@ typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env, */ struct ptlrpc_request_pool { /** Locks the list */ - spinlock_t prp_lock; - /** list of ptlrpc_request structs */ - cfs_list_t prp_req_list; - /** Maximum message size that would fit into a rquest from this pool */ - int prp_rq_size; - /** Function to allocate more requests for this pool */ - void (*prp_populate)(struct ptlrpc_request_pool *, int); + spinlock_t prp_lock; + /** list of ptlrpc_request structs */ + struct list_head prp_req_list; + /** Maximum message size that would fit into a rquest from this pool */ + int prp_rq_size; + /** Function to allocate more requests for this pool */ + void (*prp_populate)(struct ptlrpc_request_pool *, int); }; struct lu_context; @@ -1058,7 +1058,7 @@ struct ptlrpc_nrs { /** * List of registered policies */ - cfs_list_t nrs_policy_list; + struct list_head nrs_policy_list; /** * List of policies with queued requests. Policies that have any * outstanding requests are queued here, and this list is queried @@ -1067,7 +1067,7 @@ struct ptlrpc_nrs { * point transition away from the * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained. */ - cfs_list_t nrs_policy_queued; + struct list_head nrs_policy_queued; /** * Service partition for this NRS head */ @@ -1173,7 +1173,7 @@ struct ptlrpc_nrs_pol_desc { /** * Link into nrs_core::nrs_policies */ - cfs_list_t pd_list; + struct list_head pd_list; /** * NRS operations for this policy */ @@ -1309,12 +1309,12 @@ struct ptlrpc_nrs_policy { * Linkage into the NRS head's list of policies, * ptlrpc_nrs:nrs_policy_list */ - cfs_list_t pol_list; + struct list_head pol_list; /** * Linkage into the NRS head's list of policies with enqueued * requests ptlrpc_nrs:nrs_policy_queued */ - cfs_list_t pol_list_queued; + struct list_head pol_list_queued; /** * Current state of this policy */ @@ -1416,7 +1416,7 @@ struct nrs_fifo_head { /** * List of queued requests. */ - cfs_list_t fh_list; + struct list_head fh_list; /** * For debugging purposes. */ @@ -1424,7 +1424,7 @@ struct nrs_fifo_head { }; struct nrs_fifo_req { - cfs_list_t fr_list; + struct list_head fr_list; __u64 fr_sequence; }; @@ -1466,7 +1466,7 @@ struct nrs_crrn_net { */ struct nrs_crrn_client { struct ptlrpc_nrs_resource cc_res; - cfs_hlist_node_t cc_hnode; + struct hlist_node cc_hnode; lnet_nid_t cc_nid; /** * The round number against which this client is currently scheduling @@ -1626,7 +1626,7 @@ struct nrs_orr_data { */ struct nrs_orr_object { struct ptlrpc_nrs_resource oo_res; - cfs_hlist_node_t oo_hnode; + struct hlist_node oo_hnode; /** * The round number against which requests are being scheduled for this * object or OST @@ -1790,34 +1790,34 @@ struct ptlrpc_hpreq_ops { */ struct ptlrpc_request { /* Request type: one of PTL_RPC_MSG_* */ - int rq_type; + int rq_type; /** Result of request processing */ - int rq_status; - /** - * Linkage item through which this request is included into - * sending/delayed lists on client and into rqbd list on server - */ - cfs_list_t rq_list; - /** - * Server side list of incoming unserved requests sorted by arrival - * time. Traversed from time to time to notice about to expire - * requests and sent back "early replies" to clients to let them - * know server is alive and well, just very busy to service their - * requests in time - */ - cfs_list_t rq_timed_list; - /** server-side history, used for debuging purposes. */ - cfs_list_t rq_history_list; - /** server-side per-export list */ - cfs_list_t rq_exp_list; - /** server-side hp handlers */ - struct ptlrpc_hpreq_ops *rq_ops; + int rq_status; + /** + * Linkage item through which this request is included into + * sending/delayed lists on client and into rqbd list on server + */ + struct list_head rq_list; + /** + * Server side list of incoming unserved requests sorted by arrival + * time. Traversed from time to time to notice about to expire + * requests and sent back "early replies" to clients to let them + * know server is alive and well, just very busy to service their + * requests in time + */ + struct list_head rq_timed_list; + /** server-side history, used for debuging purposes. */ + struct list_head rq_history_list; + /** server-side per-export list */ + struct list_head rq_exp_list; + /** server-side hp handlers */ + struct ptlrpc_hpreq_ops *rq_ops; /** initial thread servicing this request */ - struct ptlrpc_thread *rq_svc_thread; + struct ptlrpc_thread *rq_svc_thread; /** history sequence # */ - __u64 rq_history_seq; + __u64 rq_history_seq; /** \addtogroup nrs * @{ */ @@ -1900,17 +1900,17 @@ struct ptlrpc_request { * there. * Also see \a rq_replay comment above. */ - cfs_list_t rq_replay_list; + struct list_head rq_replay_list; - /** - * security and encryption data - * @{ */ - struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ - struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ - cfs_list_t rq_ctx_chain; /**< link to waited ctx */ + /** + * security and encryption data + * @{ */ + struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ + struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ + struct list_head rq_ctx_chain; /**< link to waited ctx */ - struct sptlrpc_flavor rq_flvr; /**< for client & server */ - enum lustre_sec_part rq_sp_from; + struct sptlrpc_flavor rq_flvr; /**< for client & server */ + enum lustre_sec_part rq_sp_from; /* client/server security flags */ unsigned int @@ -2034,7 +2034,7 @@ struct ptlrpc_request { /** Per-request waitq introduced by bug 21938 for recovery waiting */ wait_queue_head_t rq_set_waitq; /** Link item for request set lists */ - cfs_list_t rq_set_chain; + struct list_head rq_set_chain; /** Link back to the request set */ struct ptlrpc_request_set *rq_set; /** Async completion handler, called when reply is received */ @@ -2244,16 +2244,16 @@ do { \ * Structure that defines a single page of a bulk transfer */ struct ptlrpc_bulk_page { - /** Linkage to list of pages in a bulk */ - cfs_list_t bp_link; - /** - * Number of bytes in a page to transfer starting from \a bp_pageoffset - */ - int bp_buflen; - /** offset within a page */ - int bp_pageoffset; - /** The page itself */ - struct page *bp_page; + /** Linkage to list of pages in a bulk */ + struct list_head bp_link; + /** + * Number of bytes in a page to transfer starting from \a bp_pageoffset + */ + int bp_buflen; + /** offset within a page */ + int bp_pageoffset; + /** The page itself */ + struct page *bp_page; }; #define BULK_GET_SOURCE 0 @@ -2334,7 +2334,7 @@ struct ptlrpc_thread { /** * List of active threads in svc->srv_threads */ - cfs_list_t t_link; + struct list_head t_link; /** * thread-private data (preallocated memory) */ @@ -2429,23 +2429,23 @@ static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread, * More than one request can fit into the buffer. */ struct ptlrpc_request_buffer_desc { - /** Link item for rqbds on a service */ - cfs_list_t rqbd_list; - /** History of requests for this buffer */ - cfs_list_t rqbd_reqs; - /** Back pointer to service for which this buffer is registered */ - struct ptlrpc_service_part *rqbd_svcpt; - /** LNet descriptor */ - lnet_handle_md_t rqbd_md_h; - int rqbd_refcount; - /** The buffer itself */ - char *rqbd_buffer; - struct ptlrpc_cb_id rqbd_cbid; - /** - * This "embedded" request structure is only used for the - * last request to fit into the buffer - */ - struct ptlrpc_request rqbd_req; + /** Link item for rqbds on a service */ + struct list_head rqbd_list; + /** History of requests for this buffer */ + struct list_head rqbd_reqs; + /** Back pointer to service for which this buffer is registered */ + struct ptlrpc_service_part *rqbd_svcpt; + /** LNet descriptor */ + lnet_handle_md_t rqbd_md_h; + int rqbd_refcount; + /** The buffer itself */ + char *rqbd_buffer; + struct ptlrpc_cb_id rqbd_cbid; + /** + * This "embedded" request structure is only used for the + * last request to fit into the buffer + */ + struct ptlrpc_request rqbd_req; }; typedef int (*svc_handler_t)(struct ptlrpc_request *req); @@ -2496,9 +2496,9 @@ struct ptlrpc_service_ops { struct ptlrpc_service { /** serialize /proc operations */ spinlock_t srv_lock; - /** most often accessed fields */ - /** chain thru all services */ - cfs_list_t srv_list; + /** most often accessed fields */ + /** chain thru all services */ + struct list_head srv_list; /** service operations table */ struct ptlrpc_service_ops srv_ops; /** only statically allocated strings here; we don't clean them */ @@ -2506,7 +2506,7 @@ struct ptlrpc_service { /** only statically allocated strings here; we don't clean them */ char *srv_thread_name; /** service thread list */ - cfs_list_t srv_threads; + struct list_head srv_threads; /** threads # should be created for each partition on initializing */ int srv_nthrs_cpt_init; /** limit of threads number for each partition */ @@ -2587,7 +2587,7 @@ struct ptlrpc_service_part { /** # running threads */ int scp_nthrs_running; /** service threads list */ - cfs_list_t scp_threads; + struct list_head scp_threads; /** * serialize the following fields, used for protecting @@ -2604,11 +2604,11 @@ struct ptlrpc_service_part { /** # incoming reqs */ int scp_nreqs_incoming; /** request buffers to be reposted */ - cfs_list_t scp_rqbd_idle; + struct list_head scp_rqbd_idle; /** req buffers receiving */ - cfs_list_t scp_rqbd_posted; + struct list_head scp_rqbd_posted; /** incoming reqs */ - cfs_list_t scp_req_incoming; + struct list_head scp_req_incoming; /** timeout before re-posting reqs, in tick */ cfs_duration_t scp_rqbd_timeout; /** @@ -2618,9 +2618,9 @@ struct ptlrpc_service_part { wait_queue_head_t scp_waitq; /** request history */ - cfs_list_t scp_hist_reqs; + struct list_head scp_hist_reqs; /** request buffer history */ - cfs_list_t scp_hist_rqbds; + struct list_head scp_hist_rqbds; /** # request buffers in history */ int scp_hist_nrqbds; /** sequence number for request */ @@ -2672,13 +2672,13 @@ struct ptlrpc_service_part { */ spinlock_t scp_rep_lock __cfs_cacheline_aligned; /** all the active replies */ - cfs_list_t scp_rep_active; + struct list_head scp_rep_active; #ifndef __KERNEL__ /** replies waiting for service */ - cfs_list_t scp_rep_queue; + struct list_head scp_rep_queue; #endif /** List of free reply_states */ - cfs_list_t scp_rep_idle; + struct list_head scp_rep_idle; /** waitq to run, when adding stuff to srv_free_rs_list */ wait_queue_head_t scp_rep_waitq; /** # 'difficult' replies */ @@ -3476,9 +3476,9 @@ typedef int (*timeout_cb_t)(struct timeout_item *, void *); int ptlrpc_pinger_add_import(struct obd_import *imp); int ptlrpc_pinger_del_import(struct obd_import *imp); int ptlrpc_add_timeout_client(int time, enum timeout_event event, - timeout_cb_t cb, void *data, - cfs_list_t *obd_list); -int ptlrpc_del_timeout_client(cfs_list_t *obd_list, + timeout_cb_t cb, void *data, + struct list_head *obd_list); +int ptlrpc_del_timeout_client(struct list_head *obd_list, enum timeout_event event); struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp); int ptlrpc_obd_ping(struct obd_device *obd); diff --git a/lustre/include/lustre_nodemap.h b/lustre/include/lustre_nodemap.h index 61e9dc8..23a5f8b 100644 --- a/lustre/include/lustre_nodemap.h +++ b/lustre/include/lustre_nodemap.h @@ -84,7 +84,7 @@ struct lu_nodemap { /* attached client members of this nodemap */ struct list_head nm_exports; /* access by nodemap name */ - cfs_hlist_node_t nm_hash; + struct hlist_node nm_hash; }; void nodemap_activate(const bool value); diff --git a/lustre/include/lustre_nrs_tbf.h b/lustre/include/lustre_nrs_tbf.h index 0a3baa5..02ebfd4 100644 --- a/lustre/include/lustre_nrs_tbf.h +++ b/lustre/include/lustre_nrs_tbf.h @@ -45,14 +45,14 @@ struct nrs_tbf_cmd; struct nrs_tbf_jobid { char *tj_id; - cfs_list_t tj_linkage; + struct list_head tj_linkage; }; struct nrs_tbf_client { /** Resource object for policy instance. */ struct ptlrpc_nrs_resource tc_res; /** Node in the hash table. */ - cfs_hlist_node_t tc_hnode; + struct hlist_node tc_hnode; /** NID of the client. */ lnet_nid_t tc_nid; /** Jobid of the client. */ @@ -60,7 +60,7 @@ struct nrs_tbf_client { /** Reference number of the client. */ atomic_t tc_ref; /** Likage to rule. */ - cfs_list_t tc_linkage; + struct list_head tc_linkage; /** Pointer to rule. */ struct nrs_tbf_rule *tc_rule; /** Generation of the rule matched. */ @@ -76,7 +76,7 @@ struct nrs_tbf_client { /** Time check-point. */ __u64 tc_check_time; /** List of queued requests. */ - cfs_list_t tc_list; + struct list_head tc_list; /** Node in binary heap. */ cfs_binheap_node_t tc_node; /** Whether the client is in heap. */ @@ -87,7 +87,7 @@ struct nrs_tbf_client { * Linkage into LRU list. Protected bucket lock of * nrs_tbf_head::th_cli_hash. */ - cfs_list_t tc_lru; + struct list_head tc_lru; }; #define MAX_TBF_NAME (16) @@ -101,13 +101,13 @@ struct nrs_tbf_rule { /** Head belongs to. */ struct nrs_tbf_head *tr_head; /** Likage to head. */ - cfs_list_t tr_linkage; + struct list_head tr_linkage; /** Nid list of the rule. */ - cfs_list_t tr_nids; + struct list_head tr_nids; /** Nid list string of the rule.*/ char *tr_nids_str; /** Jobid list of the rule. */ - cfs_list_t tr_jobids; + struct list_head tr_jobids; /** Jobid list string of the rule.*/ char *tr_jobids_str; /** RPC/s limit. */ @@ -117,7 +117,7 @@ struct nrs_tbf_rule { /** Token bucket depth. */ __u64 tr_depth; /** List of client. */ - cfs_list_t tr_cli_list; + struct list_head tr_cli_list; /** Flags of the rule. */ __u32 tr_flags; /** Usage Reference count taken on the rule. */ @@ -155,7 +155,7 @@ struct nrs_tbf_bucket { * LRU list, updated on each access to client. Protected by * bucket lock of nrs_tbf_head::th_cli_hash. */ - cfs_list_t ntb_lru; + struct list_head ntb_lru; }; /** @@ -169,7 +169,7 @@ struct nrs_tbf_head { /** * List of rules. */ - cfs_list_t th_list; + struct list_head th_list; /** * Lock to protect the list of rules. */ @@ -232,9 +232,9 @@ struct nrs_tbf_cmd { enum nrs_tbf_cmd_type tc_cmd; char *tc_name; __u64 tc_rpc_rate; - cfs_list_t tc_nids; + struct list_head tc_nids; char *tc_nids_str; - cfs_list_t tc_jobids; + struct list_head tc_jobids; char *tc_jobids_str; __u32 tc_valid_types; __u32 tc_rule_flags; @@ -244,7 +244,7 @@ struct nrs_tbf_req { /** * Linkage to queue. */ - cfs_list_t tr_list; + struct list_head tr_list; /** * Sequence of the request. */ diff --git a/lustre/include/lustre_sec.h b/lustre/include/lustre_sec.h index 5954292..f2096f9 100644 --- a/lustre/include/lustre_sec.h +++ b/lustre/include/lustre_sec.h @@ -531,17 +531,17 @@ struct ptlrpc_ctx_ops { PTLRPC_CTX_ERROR) struct ptlrpc_cli_ctx { - cfs_hlist_node_t cc_cache; /* linked into ctx cache */ - atomic_t cc_refcount; + struct hlist_node cc_cache; /* linked into ctx cache */ + atomic_t cc_refcount; struct ptlrpc_sec *cc_sec; struct ptlrpc_ctx_ops *cc_ops; - cfs_time_t cc_expire; /* in seconds */ - unsigned int cc_early_expire:1; - unsigned long cc_flags; - struct vfs_cred cc_vcred; + cfs_time_t cc_expire; /* in seconds */ + unsigned int cc_early_expire:1; + unsigned long cc_flags; + struct vfs_cred cc_vcred; spinlock_t cc_lock; - cfs_list_t cc_req_list; /* waiting reqs linked here */ - cfs_list_t cc_gc_chain; /* linked to gc chain */ + struct list_head cc_req_list; /* waiting reqs linked here */ + struct list_head cc_gc_chain; /* linked to gc chain */ }; /** @@ -856,12 +856,12 @@ struct ptlrpc_sec { struct obd_import *ps_import; spinlock_t ps_lock; - /* - * garbage collection - */ - cfs_list_t ps_gc_list; - cfs_time_t ps_gc_interval; /* in seconds */ - cfs_time_t ps_gc_next; /* in seconds */ + /* + * garbage collection + */ + struct list_head ps_gc_list; + cfs_time_t ps_gc_interval; /* in seconds */ + cfs_time_t ps_gc_next; /* in seconds */ }; static inline int sec_is_reverse(struct ptlrpc_sec *sec) diff --git a/lustre/include/md_object.h b/lustre/include/md_object.h index 0b44b64..f8d0f46 100644 --- a/lustre/include/md_object.h +++ b/lustre/include/md_object.h @@ -853,7 +853,7 @@ struct lu_local_obj_desc { __u32 llod_oid; int llod_is_index; const struct dt_index_features *llod_feat; - cfs_list_t llod_linkage; + struct list_head llod_linkage; }; int lustre_buf2som(void *buf, int rc, struct md_som_data *msd); diff --git a/lustre/include/obd.h b/lustre/include/obd.h index 2046f69..cf0eac2 100644 --- a/lustre/include/obd.h +++ b/lustre/include/obd.h @@ -180,16 +180,16 @@ struct obd_info { }; struct obd_type { - struct list_head typ_chain; - struct obd_ops *typ_dt_ops; - struct md_ops *typ_md_ops; - struct proc_dir_entry *typ_procroot; - struct proc_dir_entry *typ_procsym; - __u32 typ_sym_filter; - char *typ_name; - int typ_refcnt; - struct lu_device_type *typ_lu; - spinlock_t obd_type_lock; + struct list_head typ_chain; + struct obd_ops *typ_dt_ops; + struct md_ops *typ_md_ops; + struct proc_dir_entry *typ_procroot; + struct proc_dir_entry *typ_procsym; + __u32 typ_sym_filter; + char *typ_name; + int typ_refcnt; + struct lu_device_type *typ_lu; + spinlock_t obd_type_lock; }; struct brw_page { @@ -200,12 +200,12 @@ struct brw_page { }; struct timeout_item { - enum timeout_event ti_event; - cfs_time_t ti_timeout; - timeout_cb_t ti_cb; - void *ti_cb_data; - cfs_list_t ti_obd_list; - cfs_list_t ti_chain; + enum timeout_event ti_event; + cfs_time_t ti_timeout; + timeout_cb_t ti_cb; + void *ti_cb_data; + struct list_head ti_obd_list; + struct list_head ti_chain; }; #define OBD_MAX_RIF_DEFAULT 8 @@ -226,7 +226,7 @@ enum { struct mdc_rpc_lock; struct obd_import; struct client_obd { - struct rw_semaphore cl_sem; + struct rw_semaphore cl_sem; struct obd_uuid cl_target_uuid; struct obd_import *cl_import; /* ptlrpc connection state */ int cl_conn_count; @@ -252,16 +252,16 @@ struct client_obd { * be used to add a page into cache. As a solution, we reserve maximum * grant before trying to dirty a page and unreserve the rest. * See osc_{reserve|unreserve}_grant for details. */ - long cl_reserved_grant; - cfs_list_t cl_cache_waiters; /* waiting for cache/grant */ - cfs_time_t cl_next_shrink_grant; /* jiffies */ - cfs_list_t cl_grant_shrink_list; /* Timeout event list */ - int cl_grant_shrink_interval; /* seconds */ + long cl_reserved_grant; + struct list_head cl_cache_waiters; /* waiting for cache/grant */ + cfs_time_t cl_next_shrink_grant; /* jiffies */ + struct list_head cl_grant_shrink_list; /* Timeout event list */ + int cl_grant_shrink_interval; /* seconds */ /* A chunk is an optimal size used by osc_extent to determine * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */ - int cl_chunkbits; - int cl_extent_tax; /* extent overhead, by bytes */ + int cl_chunkbits; + int cl_extent_tax; /* extent overhead, by bytes */ /* keep track of objects that have lois that contain pages which * have been queued for async brw. this lock also protects the @@ -284,49 +284,49 @@ struct client_obd { * NB by Jinshan: though field names are still _loi_, but actually * osc_object{}s are in the list. */ - client_obd_lock_t cl_loi_list_lock; - cfs_list_t cl_loi_ready_list; - cfs_list_t cl_loi_hp_ready_list; - cfs_list_t cl_loi_write_list; - cfs_list_t cl_loi_read_list; - int cl_r_in_flight; - int cl_w_in_flight; + client_obd_lock_t cl_loi_list_lock; + struct list_head cl_loi_ready_list; + struct list_head cl_loi_hp_ready_list; + struct list_head cl_loi_write_list; + struct list_head cl_loi_read_list; + int cl_r_in_flight; + int cl_w_in_flight; /* just a sum of the loi/lop pending numbers to be exported by /proc */ - atomic_t cl_pending_w_pages; - atomic_t cl_pending_r_pages; - __u32 cl_max_pages_per_rpc; - int cl_max_rpcs_in_flight; - struct obd_histogram cl_read_rpc_hist; - struct obd_histogram cl_write_rpc_hist; - struct obd_histogram cl_read_page_hist; - struct obd_histogram cl_write_page_hist; - struct obd_histogram cl_read_offset_hist; - struct obd_histogram cl_write_offset_hist; + atomic_t cl_pending_w_pages; + atomic_t cl_pending_r_pages; + __u32 cl_max_pages_per_rpc; + int cl_max_rpcs_in_flight; + struct obd_histogram cl_read_rpc_hist; + struct obd_histogram cl_write_rpc_hist; + struct obd_histogram cl_read_page_hist; + struct obd_histogram cl_write_page_hist; + struct obd_histogram cl_read_offset_hist; + struct obd_histogram cl_write_offset_hist; /* lru for osc caching pages */ struct cl_client_cache *cl_cache; - cfs_list_t cl_lru_osc; /* member of cl_cache->ccc_lru */ + struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */ atomic_t *cl_lru_left; atomic_t cl_lru_busy; atomic_t cl_lru_shrinkers; atomic_t cl_lru_in_list; - cfs_list_t cl_lru_list; /* lru page list */ + struct list_head cl_lru_list; /* lru page list */ client_obd_lock_t cl_lru_list_lock; /* page list protector */ atomic_t cl_unstable_count; /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ - atomic_t cl_destroy_in_flight; - wait_queue_head_t cl_destroy_waitq; + atomic_t cl_destroy_in_flight; + wait_queue_head_t cl_destroy_waitq; struct mdc_rpc_lock *cl_rpc_lock; struct mdc_rpc_lock *cl_close_lock; /* mgc datastruct */ - struct mutex cl_mgc_mutex; + struct mutex cl_mgc_mutex; struct local_oid_storage *cl_mgc_los; - struct dt_object *cl_mgc_configs_dir; - atomic_t cl_mgc_refcount; - struct obd_export *cl_mgc_mgsexp; + struct dt_object *cl_mgc_configs_dir; + atomic_t cl_mgc_refcount; + struct obd_export *cl_mgc_mgsexp; /* checksumming for data sent over the network */ unsigned int cl_checksum:1; /* 0 = disabled, 1 = enabled */ @@ -364,12 +364,12 @@ struct obd_id_info { }; struct echo_client_obd { - struct obd_export *ec_exp; /* the local connection to osc/lov */ + struct obd_export *ec_exp; /* the local connection to osc/lov */ spinlock_t ec_lock; - cfs_list_t ec_objects; - cfs_list_t ec_locks; - int ec_nstripes; - __u64 ec_unique; + struct list_head ec_objects; + struct list_head ec_locks; + int ec_nstripes; + __u64 ec_unique; }; /* Generic subset of OSTs */ @@ -385,7 +385,7 @@ struct ost_pool { #define OBD_STATFS_CACHE_SECONDS 1 struct lov_tgt_desc { - cfs_list_t ltd_kill; + struct list_head ltd_kill; struct obd_uuid ltd_uuid; struct obd_device *ltd_obd; struct obd_export *ltd_exp; @@ -623,11 +623,11 @@ struct obd_device { cfs_hash_t *obd_nid_hash; /* nid stats body */ cfs_hash_t *obd_nid_stats_hash; - cfs_list_t obd_nid_stats; - atomic_t obd_refcount; - cfs_list_t obd_exports; - cfs_list_t obd_unlinked_exports; - cfs_list_t obd_delayed_exports; + struct list_head obd_nid_stats; + atomic_t obd_refcount; + struct list_head obd_exports; + struct list_head obd_unlinked_exports; + struct list_head obd_delayed_exports; struct list_head obd_lwp_list; int obd_num_exports; spinlock_t obd_nid_lock; @@ -647,37 +647,39 @@ struct obd_device { struct obd_notify_upcall obd_upcall; struct obd_export *obd_self_export; struct obd_export *obd_lwp_export; - /* list of exports in LRU order, for ping evictor, with obd_dev_lock */ - cfs_list_t obd_exports_timed; - time_t obd_eviction_timer; /* for ping evictor */ + /* list of exports in LRU order, for ping evictor, with obd_dev_lock */ + struct list_head obd_exports_timed; + time_t obd_eviction_timer; /* for ping evictor */ int obd_max_recoverable_clients; atomic_t obd_connected_clients; int obd_stale_clients; /* this lock protects all recovery list_heads, timer and * obd_next_recovery_transno value */ - spinlock_t obd_recovery_task_lock; - __u64 obd_next_recovery_transno; - int obd_replayed_requests; - int obd_requests_queued_for_recovery; - wait_queue_head_t obd_next_transno_waitq; + spinlock_t obd_recovery_task_lock; + __u64 obd_next_recovery_transno; + int obd_replayed_requests; + int obd_requests_queued_for_recovery; + wait_queue_head_t obd_next_transno_waitq; /* protected by obd_recovery_task_lock */ - struct timer_list obd_recovery_timer; - time_t obd_recovery_start; /* seconds */ - time_t obd_recovery_end; /* seconds, for lprocfs_status */ - int obd_recovery_time_hard; - int obd_recovery_timeout; - int obd_recovery_ir_factor; + struct timer_list obd_recovery_timer; + /* seconds */ + time_t obd_recovery_start; + /* seconds, for lprocfs_status */ + time_t obd_recovery_end; + int obd_recovery_time_hard; + int obd_recovery_timeout; + int obd_recovery_ir_factor; /* new recovery stuff from CMD2 */ - struct target_recovery_data obd_recovery_data; - int obd_replayed_locks; - atomic_t obd_req_replay_clients; - atomic_t obd_lock_replay_clients; + struct target_recovery_data obd_recovery_data; + int obd_replayed_locks; + atomic_t obd_req_replay_clients; + atomic_t obd_lock_replay_clients; /* all lists are protected by obd_recovery_task_lock */ - cfs_list_t obd_req_replay_queue; - cfs_list_t obd_lock_replay_queue; - cfs_list_t obd_final_req_queue; + struct list_head obd_req_replay_queue; + struct list_head obd_lock_replay_queue; + struct list_head obd_final_req_queue; union { #ifdef HAVE_SERVER_SUPPORT @@ -705,14 +707,14 @@ struct obd_device { struct lprocfs_seq_vars *obd_vars; atomic_t obd_evict_inprogress; wait_queue_head_t obd_evict_inprogress_waitq; - cfs_list_t obd_evict_list; /* protected with pet_lock */ + struct list_head obd_evict_list; /* protected with pet_lock */ /** * Ldlm pool part. Save last calculated SLV and Limit. */ rwlock_t obd_pool_lock; - int obd_pool_limit; - __u64 obd_pool_slv; + int obd_pool_limit; + __u64 obd_pool_slv; /** * A list of outstanding class_incref()'s against this obd. For diff --git a/lustre/include/obd_class.h b/lustre/include/obd_class.h index 331a9e6..f9ba3d8 100644 --- a/lustre/include/obd_class.h +++ b/lustre/include/obd_class.h @@ -73,6 +73,8 @@ /* OBD Device Declarations */ extern struct obd_device *obd_devs[MAX_OBD_DEVICES]; +extern struct list_head obd_types; +extern spinlock_t obd_types_lock; extern rwlock_t obd_dev_lock; /* OBD Operations Declarations */ @@ -198,7 +200,7 @@ enum { struct config_llog_data { struct ldlm_res_id cld_resid; struct config_llog_instance cld_cfg; - cfs_list_t cld_list_chain; + struct list_head cld_list_chain; atomic_t cld_refcount; struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */ struct config_llog_data *cld_params; /* common parameters log */ @@ -213,10 +215,10 @@ struct config_llog_data { }; struct lustre_profile { - cfs_list_t lp_list; - char *lp_profile; - char *lp_dt; - char *lp_md; + struct list_head lp_list; + char *lp_profile; + char *lp_dt; + char *lp_md; }; struct lustre_profile *class_get_profile(const char * prof); @@ -1898,7 +1900,7 @@ struct lwp_register_item { struct obd_export **lri_exp; register_lwp_cb lri_cb_func; void *lri_cb_data; - cfs_list_t lri_list; + struct list_head lri_list; char lri_name[MTI_NAME_MAXLEN]; }; diff --git a/lustre/include/obd_target.h b/lustre/include/obd_target.h index 968a508..330d44f 100644 --- a/lustre/include/obd_target.h +++ b/lustre/include/obd_target.h @@ -52,12 +52,12 @@ struct obd_device_target { struct filter_obd { /* NB this field MUST be first */ - struct obd_device_target fo_obt; + struct obd_device_target fo_obt; /* capability related */ - unsigned int fo_fl_oss_capa; - cfs_list_t fo_capa_keys; - cfs_hlist_head_t *fo_capa_hash; + unsigned int fo_fl_oss_capa; + struct list_head fo_capa_keys; + struct hlist_head *fo_capa_hash; }; struct echo_obd { diff --git a/lustre/obdclass/capa.c b/lustre/obdclass/capa.c index daa82bc..5b5b3b1 100644 --- a/lustre/obdclass/capa.c +++ b/lustre/obdclass/capa.c @@ -69,7 +69,7 @@ struct kmem_cache *capa_cachep; /* lock for capa hash/capa_list/fo_capa_keys */ DEFINE_SPINLOCK(capa_lock); -cfs_list_t capa_list[CAPA_SITE_MAX]; +struct list_head capa_list[CAPA_SITE_MAX]; static struct capa_hmac_alg capa_hmac_algs[] = { DEF_CAPA_HMAC_ALG("sha1", SHA1, 20, 20), @@ -83,20 +83,20 @@ EXPORT_SYMBOL(capa_list); EXPORT_SYMBOL(capa_lock); EXPORT_SYMBOL(capa_count); -cfs_hlist_head_t *init_capa_hash(void) +struct hlist_head *init_capa_hash(void) { - cfs_hlist_head_t *hash; + struct hlist_head *hash; int nr_hash, i; OBD_ALLOC(hash, PAGE_CACHE_SIZE); if (!hash) return NULL; - nr_hash = PAGE_CACHE_SIZE / sizeof(cfs_hlist_head_t); + nr_hash = PAGE_CACHE_SIZE / sizeof(struct hlist_head); LASSERT(nr_hash > NR_CAPAHASH); for (i = 0; i < NR_CAPAHASH; i++) - CFS_INIT_HLIST_HEAD(hash + i); + INIT_HLIST_HEAD(hash + i); return hash; } EXPORT_SYMBOL(init_capa_hash); @@ -109,18 +109,18 @@ static inline int capa_on_server(struct obd_capa *ocapa) static inline void capa_delete(struct obd_capa *ocapa) { - LASSERT(capa_on_server(ocapa)); - cfs_hlist_del_init(&ocapa->u.tgt.c_hash); - cfs_list_del_init(&ocapa->c_list); - capa_count[ocapa->c_site]--; - /* release the ref when alloc */ - capa_put(ocapa); + LASSERT(capa_on_server(ocapa)); + hlist_del_init(&ocapa->u.tgt.c_hash); + list_del_init(&ocapa->c_list); + capa_count[ocapa->c_site]--; + /* release the ref when alloc */ + capa_put(ocapa); } -void cleanup_capa_hash(cfs_hlist_head_t *hash) +void cleanup_capa_hash(struct hlist_head *hash) { int i; - cfs_hlist_node_t *pos, *next; + struct hlist_node *pos, *next; struct obd_capa *oc; spin_lock(&capa_lock); @@ -151,9 +151,9 @@ static inline int capa_is_to_expire(struct obd_capa *oc) } static struct obd_capa *find_capa(struct lustre_capa *capa, - cfs_hlist_head_t *head, int alive) + struct hlist_head *head, int alive) { - cfs_hlist_node_t *pos; + struct hlist_node *pos; struct obd_capa *ocapa; int len = alive ? offsetof(struct lustre_capa, lc_keyid):sizeof(*capa); @@ -174,30 +174,30 @@ static struct obd_capa *find_capa(struct lustre_capa *capa, } #define LRU_CAPA_DELETE_COUNT 12 -static inline void capa_delete_lru(cfs_list_t *head) +static inline void capa_delete_lru(struct list_head *head) { - struct obd_capa *ocapa; - cfs_list_t *node = head->next; - int count = 0; + struct obd_capa *ocapa; + struct list_head *node = head->next; + int count = 0; - /* free LRU_CAPA_DELETE_COUNT unused capa from head */ - while (count++ < LRU_CAPA_DELETE_COUNT) { - ocapa = cfs_list_entry(node, struct obd_capa, c_list); - node = node->next; + /* free LRU_CAPA_DELETE_COUNT unused capa from head */ + while (count++ < LRU_CAPA_DELETE_COUNT) { + ocapa = list_entry(node, struct obd_capa, c_list); + node = node->next; if (atomic_read(&ocapa->c_refc)) - continue; + continue; - DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru"); - capa_delete(ocapa); - } + DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru"); + capa_delete(ocapa); + } } /* add or update */ -struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa) +struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa) { - cfs_hlist_head_t *head = hash + capa_hashfn(&capa->lc_fid); + struct hlist_head *head = hash + capa_hashfn(&capa->lc_fid); struct obd_capa *ocapa, *old = NULL; - cfs_list_t *list = &capa_list[CAPA_SITE_SERVER]; + struct list_head *list = &capa_list[CAPA_SITE_SERVER]; ocapa = alloc_capa(CAPA_SITE_SERVER); if (IS_ERR(ocapa)) @@ -208,8 +208,8 @@ struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa) if (!old) { ocapa->c_capa = *capa; set_capa_expiry(ocapa); - cfs_hlist_add_head(&ocapa->u.tgt.c_hash, head); - cfs_list_add_tail(&ocapa->c_list, list); + hlist_add_head(&ocapa->u.tgt.c_hash, head); + list_add_tail(&ocapa->c_list, list); capa_get(ocapa); capa_count[CAPA_SITE_SERVER]++; if (capa_count[CAPA_SITE_SERVER] > CAPA_HASH_SIZE) @@ -225,7 +225,7 @@ struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa) } EXPORT_SYMBOL(capa_add); -struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa, +struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa, int alive) { struct obd_capa *ocapa; @@ -233,8 +233,7 @@ struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa, spin_lock(&capa_lock); ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive); if (ocapa) { - cfs_list_move_tail(&ocapa->c_list, - &capa_list[CAPA_SITE_SERVER]); + list_move_tail(&ocapa->c_list, &capa_list[CAPA_SITE_SERVER]); capa_get(ocapa); } spin_unlock(&capa_lock); diff --git a/lustre/obdclass/cl_io.c b/lustre/obdclass/cl_io.c index 7825e08..8878ed8 100644 --- a/lustre/obdclass/cl_io.c +++ b/lustre/obdclass/cl_io.c @@ -55,9 +55,9 @@ */ #define cl_io_for_each(slice, io) \ - cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage) + list_for_each_entry((slice), &io->ci_layers, cis_linkage) #define cl_io_for_each_reverse(slice, io) \ - cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage) + list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage) static inline int cl_io_type_is_valid(enum cl_io_type type) { @@ -109,10 +109,10 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); ENTRY; - while (!cfs_list_empty(&io->ci_layers)) { + while (!list_empty(&io->ci_layers)) { slice = container_of(io->ci_layers.prev, struct cl_io_slice, cis_linkage); - cfs_list_del_init(&slice->cis_linkage); + list_del_init(&slice->cis_linkage); if (slice->cis_iop->op[io->ci_type].cio_fini != NULL) slice->cis_iop->op[io->ci_type].cio_fini(env, slice); /* @@ -161,10 +161,10 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io, ENTRY; io->ci_type = iot; - CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_todo); - CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_curr); - CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_done); - CFS_INIT_LIST_HEAD(&io->ci_layers); + INIT_LIST_HEAD(&io->ci_lockset.cls_todo); + INIT_LIST_HEAD(&io->ci_lockset.cls_curr); + INIT_LIST_HEAD(&io->ci_lockset.cls_done); + INIT_LIST_HEAD(&io->ci_layers); result = 0; cl_object_for_each(scan, obj) { @@ -299,34 +299,33 @@ static void cl_io_locks_sort(struct cl_io *io) done = 1; prev = NULL; - cfs_list_for_each_entry_safe(curr, temp, - &io->ci_lockset.cls_todo, - cill_linkage) { - if (prev != NULL) { - switch (cl_lock_descr_sort(&prev->cill_descr, - &curr->cill_descr)) { - case 0: - /* - * IMPOSSIBLE: Identical locks are - * already removed at - * this point. - */ - default: - LBUG(); - case +1: - cfs_list_move_tail(&curr->cill_linkage, - &prev->cill_linkage); - done = 0; - continue; /* don't change prev: it's - * still "previous" */ - case -1: /* already in order */ - break; - } - } - prev = curr; - } - } while (!done); - EXIT; + list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo, + cill_linkage) { + if (prev != NULL) { + switch (cl_lock_descr_sort(&prev->cill_descr, + &curr->cill_descr)) { + case 0: + /* + * IMPOSSIBLE: Identical locks are + * already removed at + * this point. + */ + default: + LBUG(); + case +1: + list_move_tail(&curr->cill_linkage, + &prev->cill_linkage); + done = 0; + continue; /* don't change prev: it's + * still "previous" */ + case -1: /* already in order */ + break; + } + } + prev = curr; + } + } while (!done); + EXIT; } /** @@ -335,37 +334,36 @@ static void cl_io_locks_sort(struct cl_io *io) * \retval +ve there is a matching lock in the \a queue * \retval 0 there are no matching locks in the \a queue */ -int cl_queue_match(const cfs_list_t *queue, +int cl_queue_match(const struct list_head *queue, const struct cl_lock_descr *need) { - struct cl_io_lock_link *scan; + struct cl_io_lock_link *scan; + ENTRY; - ENTRY; - cfs_list_for_each_entry(scan, queue, cill_linkage) { - if (cl_lock_descr_match(&scan->cill_descr, need)) - RETURN(+1); - } - RETURN(0); + list_for_each_entry(scan, queue, cill_linkage) { + if (cl_lock_descr_match(&scan->cill_descr, need)) + RETURN(+1); + } + RETURN(0); } EXPORT_SYMBOL(cl_queue_match); -static int cl_queue_merge(const cfs_list_t *queue, +static int cl_queue_merge(const struct list_head *queue, const struct cl_lock_descr *need) { - struct cl_io_lock_link *scan; - - ENTRY; - cfs_list_for_each_entry(scan, queue, cill_linkage) { - if (cl_lock_descr_cmp(&scan->cill_descr, need)) - continue; - cl_lock_descr_merge(&scan->cill_descr, need); - CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", - scan->cill_descr.cld_mode, scan->cill_descr.cld_start, - scan->cill_descr.cld_end); - RETURN(+1); - } - RETURN(0); + struct cl_io_lock_link *scan; + ENTRY; + list_for_each_entry(scan, queue, cill_linkage) { + if (cl_lock_descr_cmp(&scan->cill_descr, need)) + continue; + cl_lock_descr_merge(&scan->cill_descr, need); + CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", + scan->cill_descr.cld_mode, scan->cill_descr.cld_start, + scan->cill_descr.cld_end); + RETURN(+1); + } + RETURN(0); } static int cl_lockset_match(const struct cl_lockset *set, @@ -395,12 +393,11 @@ static int cl_lockset_lock_one(const struct lu_env *env, if (!IS_ERR(lock)) { link->cill_lock = lock; - cfs_list_move(&link->cill_linkage, &set->cls_curr); + list_move(&link->cill_linkage, &set->cls_curr); if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) { result = cl_wait(env, lock); if (result == 0) - cfs_list_move(&link->cill_linkage, - &set->cls_done); + list_move(&link->cill_linkage, &set->cls_done); } else result = 0; } else @@ -414,7 +411,7 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io, struct cl_lock *lock = link->cill_lock; ENTRY; - cfs_list_del_init(&link->cill_linkage); + list_del_init(&link->cill_linkage); if (lock != NULL) { cl_lock_release(env, lock, "io", io); link->cill_lock = NULL; @@ -427,36 +424,35 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io, static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, struct cl_lockset *set) { - struct cl_io_lock_link *link; - struct cl_io_lock_link *temp; - struct cl_lock *lock; - int result; + struct cl_io_lock_link *link; + struct cl_io_lock_link *temp; + struct cl_lock *lock; + int result; - ENTRY; - result = 0; - cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) { - if (!cl_lockset_match(set, &link->cill_descr)) { - /* XXX some locking to guarantee that locks aren't - * expanded in between. */ - result = cl_lockset_lock_one(env, io, set, link); - if (result != 0) - break; - } else - cl_lock_link_fini(env, io, link); - } - if (result == 0) { - cfs_list_for_each_entry_safe(link, temp, - &set->cls_curr, cill_linkage) { - lock = link->cill_lock; - result = cl_wait(env, lock); - if (result == 0) - cfs_list_move(&link->cill_linkage, - &set->cls_done); - else - break; - } - } - RETURN(result); + ENTRY; + result = 0; + list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) { + if (!cl_lockset_match(set, &link->cill_descr)) { + /* XXX some locking to guarantee that locks aren't + * expanded in between. */ + result = cl_lockset_lock_one(env, io, set, link); + if (result != 0) + break; + } else + cl_lock_link_fini(env, io, link); + } + if (result == 0) { + list_for_each_entry_safe(link, temp, &set->cls_curr, + cill_linkage) { + lock = link->cill_lock; + result = cl_wait(env, lock); + if (result == 0) + list_move(&link->cill_linkage, &set->cls_done); + else + break; + } + } + RETURN(result); } /** @@ -512,23 +508,23 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io) ENTRY; set = &io->ci_lockset; - cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) - cl_lock_link_fini(env, io, link); + list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) + cl_lock_link_fini(env, io, link); - cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage) - cl_lock_link_fini(env, io, link); + list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage) + cl_lock_link_fini(env, io, link); - cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) { - cl_unuse(env, link->cill_lock); - cl_lock_link_fini(env, io, link); - } - cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL) - scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); - } - io->ci_state = CIS_UNLOCKED; - LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired); - EXIT; + list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) { + cl_unuse(env, link->cill_lock); + cl_lock_link_fini(env, io, link); + } + cl_io_for_each_reverse(scan, io) { + if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL) + scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); + } + io->ci_state = CIS_UNLOCKED; + LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired); + EXIT; } EXPORT_SYMBOL(cl_io_unlock); @@ -626,7 +622,7 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) result = +1; else { - cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo); + list_add(&link->cill_linkage, &io->ci_lockset.cls_todo); result = 0; } RETURN(result); @@ -829,7 +825,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, /* * If ->cio_submit() failed, no pages were sent. */ - LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages))); + LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages))); RETURN(result); } EXPORT_SYMBOL(cl_io_submit_rw); @@ -869,7 +865,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, rc = cl_sync_io_wait(env, io, &queue->c2_qout, anchor, timeout); } else { - LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages)); + LASSERT(list_empty(&queue->c2_qout.pl_pages)); cl_page_list_for_each(pg, &queue->c2_qin) pg->cp_sync_io = NULL; } @@ -972,13 +968,13 @@ void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, struct cl_object *obj, const struct cl_io_operations *ops) { - cfs_list_t *linkage = &slice->cis_linkage; + struct list_head *linkage = &slice->cis_linkage; LASSERT((linkage->prev == NULL && linkage->next == NULL) || - cfs_list_empty(linkage)); + list_empty(linkage)); ENTRY; - cfs_list_add_tail(linkage, &io->ci_layers); + list_add_tail(linkage, &io->ci_layers); slice->cis_io = io; slice->cis_obj = obj; slice->cis_iop = ops; @@ -994,7 +990,7 @@ void cl_page_list_init(struct cl_page_list *plist) { ENTRY; plist->pl_nr = 0; - CFS_INIT_LIST_HEAD(&plist->pl_pages); + INIT_LIST_HEAD(&plist->pl_pages); plist->pl_owner = current; EXIT; } @@ -1011,8 +1007,8 @@ void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page) LASSERT(page->cp_owner != NULL); LINVRNT(plist->pl_owner == current); - LASSERT(cfs_list_empty(&page->cp_batch)); - cfs_list_add_tail(&page->cp_batch, &plist->pl_pages); + LASSERT(list_empty(&page->cp_batch)); + list_add_tail(&page->cp_batch, &plist->pl_pages); ++plist->pl_nr; lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist); cl_page_get(page); @@ -1031,7 +1027,7 @@ void cl_page_list_del(const struct lu_env *env, LINVRNT(plist->pl_owner == current); ENTRY; - cfs_list_del_init(&page->cp_batch); + list_del_init(&page->cp_batch); --plist->pl_nr; lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist); cl_page_put(env, page); @@ -1050,7 +1046,7 @@ void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src, LINVRNT(src->pl_owner == current); ENTRY; - cfs_list_move_tail(&page->cp_batch, &dst->pl_pages); + list_move_tail(&page->cp_batch, &dst->pl_pages); --src->pl_nr; ++dst->pl_nr; lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue", @@ -1070,7 +1066,7 @@ void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src, LINVRNT(src->pl_owner == current); ENTRY; - cfs_list_move(&page->cp_batch, &dst->pl_pages); + list_move(&page->cp_batch, &dst->pl_pages); --src->pl_nr; ++dst->pl_nr; lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue", @@ -1115,7 +1111,7 @@ void cl_page_list_disown(const struct lu_env *env, cl_page_list_for_each_safe(page, temp, plist) { LASSERT(plist->pl_nr > 0); - cfs_list_del_init(&page->cp_batch); + list_del_init(&page->cp_batch); --plist->pl_nr; /* * cl_page_disown0 rather than usual cl_page_disown() is used, @@ -1327,7 +1323,7 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice, const struct cl_req_operations *ops) { ENTRY; - cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers); + list_add_tail(&slice->crs_linkage, &req->crq_layers); slice->crs_dev = dev; slice->crs_ops = ops; slice->crs_req = req; @@ -1339,9 +1335,9 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req) { unsigned i; - LASSERT(cfs_list_empty(&req->crq_pages)); + LASSERT(list_empty(&req->crq_pages)); LASSERT(req->crq_nrpages == 0); - LINVRNT(cfs_list_empty(&req->crq_layers)); + LINVRNT(list_empty(&req->crq_layers)); LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL)); ENTRY; @@ -1370,7 +1366,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req, ENTRY; result = 0; - cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { + list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev); if (dev->cd_ops->cdo_req_init != NULL) { result = dev->cd_ops->cdo_req_init(env, @@ -1394,10 +1390,10 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc) /* * for the lack of list_for_each_entry_reverse_safe()... */ - while (!cfs_list_empty(&req->crq_layers)) { - slice = cfs_list_entry(req->crq_layers.prev, - struct cl_req_slice, crs_linkage); - cfs_list_del_init(&slice->crs_linkage); + while (!list_empty(&req->crq_layers)) { + slice = list_entry(req->crq_layers.prev, + struct cl_req_slice, crs_linkage); + list_del_init(&slice->crs_linkage); if (slice->crs_ops->cro_completion != NULL) slice->crs_ops->cro_completion(env, slice, rc); } @@ -1453,13 +1449,13 @@ void cl_req_page_add(const struct lu_env *env, ENTRY; - LASSERT(cfs_list_empty(&page->cp_flight)); + LASSERT(list_empty(&page->cp_flight)); LASSERT(page->cp_req == NULL); CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n", req, req->crq_type, req->crq_nrpages); - cfs_list_add_tail(&page->cp_flight, &req->crq_pages); + list_add_tail(&page->cp_flight, &req->crq_pages); ++req->crq_nrpages; page->cp_req = req; obj = cl_object_top(page->cp_obj); @@ -1486,10 +1482,10 @@ void cl_req_page_done(const struct lu_env *env, struct cl_page *page) ENTRY; - LASSERT(!cfs_list_empty(&page->cp_flight)); + LASSERT(!list_empty(&page->cp_flight)); LASSERT(req->crq_nrpages > 0); - cfs_list_del_init(&page->cp_flight); + list_del_init(&page->cp_flight); --req->crq_nrpages; page->cp_req = NULL; EXIT; @@ -1515,7 +1511,7 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req) LASSERT(req->crq_o[i].ro_obj != NULL); result = 0; - cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) { + list_for_each_entry(slice, &req->crq_layers, crs_linkage) { if (slice->crs_ops->cro_prep != NULL) { result = slice->crs_ops->cro_prep(env, slice); if (result != 0) @@ -1538,14 +1534,14 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, struct cl_page *page; int i; - LASSERT(!cfs_list_empty(&req->crq_pages)); + LASSERT(!list_empty(&req->crq_pages)); ENTRY; /* Take any page to use as a model. */ - page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight); + page = list_entry(req->crq_pages.next, struct cl_page, cp_flight); for (i = 0; i < req->crq_nrobjs; ++i) { - cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) { + list_for_each_entry(slice, &req->crq_layers, crs_linkage) { const struct cl_page_slice *scan; const struct cl_object *obj; diff --git a/lustre/obdclass/cl_lock.c b/lustre/obdclass/cl_lock.c index 1f8b0e8..6aaf676 100644 --- a/lustre/obdclass/cl_lock.c +++ b/lustre/obdclass/cl_lock.c @@ -204,12 +204,12 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, struct cl_object *obj, const struct cl_lock_operations *ops) { - ENTRY; - slice->cls_lock = lock; - cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers); - slice->cls_obj = obj; - slice->cls_ops = ops; - EXIT; + ENTRY; + slice->cls_lock = lock; + list_add_tail(&slice->cls_linkage, &lock->cll_layers); + slice->cls_obj = obj; + slice->cls_ops = ops; + EXIT; } EXPORT_SYMBOL(cl_lock_slice_add); @@ -269,12 +269,12 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock) ENTRY; cl_lock_trace(D_DLMTRACE, env, "free lock", lock); - while (!cfs_list_empty(&lock->cll_layers)) { + while (!list_empty(&lock->cll_layers)) { struct cl_lock_slice *slice; - slice = cfs_list_entry(lock->cll_layers.next, - struct cl_lock_slice, cls_linkage); - cfs_list_del_init(lock->cll_layers.next); + slice = list_entry(lock->cll_layers.next, + struct cl_lock_slice, cls_linkage); + list_del_init(lock->cll_layers.next); slice->cls_ops->clo_fini(env, slice); } CS_LOCK_DEC(obj, total); @@ -311,7 +311,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock) if (atomic_dec_and_test(&lock->cll_ref)) { if (lock->cll_state == CLS_FREEING) { - LASSERT(cfs_list_empty(&lock->cll_linkage)); + LASSERT(list_empty(&lock->cll_linkage)); cl_lock_free(env, lock); } CS_LOCK_DEC(obj, busy); @@ -387,9 +387,9 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, cl_object_get(obj); lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock); - CFS_INIT_LIST_HEAD(&lock->cll_layers); - CFS_INIT_LIST_HEAD(&lock->cll_linkage); - CFS_INIT_LIST_HEAD(&lock->cll_inclosure); + INIT_LIST_HEAD(&lock->cll_layers); + INIT_LIST_HEAD(&lock->cll_linkage); + INIT_LIST_HEAD(&lock->cll_inclosure); lu_ref_init(&lock->cll_reference); lu_ref_init(&lock->cll_holders); mutex_init(&lock->cll_guard); @@ -400,20 +400,19 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, CS_LOCK_INC(obj, total); CS_LOCK_INC(obj, create); cl_lock_lockdep_init(lock); - cfs_list_for_each_entry(obj, &head->loh_layers, - co_lu.lo_linkage) { - int err; - - err = obj->co_ops->coo_lock_init(env, obj, lock, io); - if (err != 0) { - cl_lock_finish(env, lock); - lock = ERR_PTR(err); - break; - } - } - } else - lock = ERR_PTR(-ENOMEM); - RETURN(lock); + list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) { + int err; + + err = obj->co_ops->coo_lock_init(env, obj, lock, io); + if (err != 0) { + cl_lock_finish(env, lock); + lock = ERR_PTR(err); + break; + } + } + } else + lock = ERR_PTR(-ENOMEM); + RETURN(lock); } /** @@ -481,7 +480,7 @@ static int cl_lock_fits_into(const struct lu_env *env, LINVRNT(cl_lock_invariant_trusted(env, lock)); ENTRY; - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_fits_into != NULL && !slice->cls_ops->clo_fits_into(env, slice, need, io)) RETURN(0); @@ -502,7 +501,7 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env, head = cl_object_header(obj); assert_spin_locked(&head->coh_lock_guard); CS_LOCK_INC(obj, lookup); - cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) { + list_for_each_entry(lock, &head->coh_locks, cll_linkage) { int matched; matched = cl_lock_ext_match(&lock->cll_descr, need) && @@ -645,7 +644,7 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, LINVRNT(cl_lock_invariant_trusted(NULL, lock)); ENTRY; - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype) RETURN(slice); } @@ -802,8 +801,8 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock) const struct cl_lock_slice *slice; lock->cll_flags |= CLF_CANCELLED; - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { + list_for_each_entry_reverse(slice, &lock->cll_layers, + cls_linkage) { if (slice->cls_ops->clo_cancel != NULL) slice->cls_ops->clo_cancel(env, slice); } @@ -837,26 +836,26 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock) if (in_cache) /* coh_locks cache holds a refcount. */ cl_lock_put(env, lock); - /* - * From now on, no new references to this lock can be acquired - * by cl_lock_lookup(). - */ - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_delete != NULL) - slice->cls_ops->clo_delete(env, slice); - } - /* - * From now on, no new references to this lock can be acquired - * by layer-specific means (like a pointer from struct - * ldlm_lock in osc, or a pointer from top-lock to sub-lock in - * lov). - * - * Lock will be finally freed in cl_lock_put() when last of - * existing references goes away. - */ - } - EXIT; + /* + * From now on, no new references to this lock can be acquired + * by cl_lock_lookup(). + */ + list_for_each_entry_reverse(slice, &lock->cll_layers, + cls_linkage) { + if (slice->cls_ops->clo_delete != NULL) + slice->cls_ops->clo_delete(env, slice); + } + /* + * From now on, no new references to this lock can be acquired + * by layer-specific means (like a pointer from struct + * ldlm_lock in osc, or a pointer from top-lock to sub-lock in + * lov). + * + * Lock will be finally freed in cl_lock_put() when last of + * existing references goes away. + */ + } + EXIT; } /** @@ -1009,7 +1008,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_is_mutexed(lock)); LINVRNT(cl_lock_invariant(env, lock)); - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) if (slice->cls_ops->clo_state != NULL) slice->cls_ops->clo_state(env, slice, state); wake_up_all(&lock->cll_wq); @@ -1067,29 +1066,29 @@ EXPORT_SYMBOL(cl_lock_state_set); static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock) { - const struct cl_lock_slice *slice; - int result; - - do { - result = 0; + const struct cl_lock_slice *slice; + int result; - LINVRNT(cl_lock_is_mutexed(lock)); - LINVRNT(cl_lock_invariant(env, lock)); - LASSERT(lock->cll_state == CLS_INTRANSIT); + do { + result = 0; - result = -ENOSYS; - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_unuse != NULL) { - result = slice->cls_ops->clo_unuse(env, slice); - if (result != 0) - break; - } - } - LASSERT(result != -ENOSYS); - } while (result == CLO_REPEAT); + LINVRNT(cl_lock_is_mutexed(lock)); + LINVRNT(cl_lock_invariant(env, lock)); + LASSERT(lock->cll_state == CLS_INTRANSIT); + + result = -ENOSYS; + list_for_each_entry_reverse(slice, &lock->cll_layers, + cls_linkage) { + if (slice->cls_ops->clo_unuse != NULL) { + result = slice->cls_ops->clo_unuse(env, slice); + if (result != 0) + break; + } + } + LASSERT(result != -ENOSYS); + } while (result == CLO_REPEAT); - return result; + return result; } /** @@ -1113,7 +1112,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic) result = -ENOSYS; state = cl_lock_intransit(env, lock); - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_use != NULL) { result = slice->cls_ops->clo_use(env, slice); if (result != 0) @@ -1166,7 +1165,7 @@ static int cl_enqueue_kick(const struct lu_env *env, ENTRY; result = -ENOSYS; - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_enqueue != NULL) { result = slice->cls_ops->clo_enqueue(env, slice, io, flags); @@ -1496,7 +1495,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock) break; result = -ENOSYS; - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_wait != NULL) { result = slice->cls_ops->clo_wait(env, slice); if (result != 0) @@ -1569,7 +1568,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock) LINVRNT(cl_lock_invariant(env, lock)); pound = 0; - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_weigh != NULL) { ounce = slice->cls_ops->clo_weigh(env, slice); pound += ounce; @@ -1606,7 +1605,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_is_mutexed(lock)); LINVRNT(cl_lock_invariant(env, lock)); - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_modify != NULL) { result = slice->cls_ops->clo_modify(env, slice, desc); if (result != 0) @@ -1639,7 +1638,7 @@ void cl_lock_closure_init(const struct lu_env *env, LINVRNT(cl_lock_is_mutexed(origin)); LINVRNT(cl_lock_invariant(env, origin)); - CFS_INIT_LIST_HEAD(&closure->clc_list); + INIT_LIST_HEAD(&closure->clc_list); closure->clc_origin = origin; closure->clc_wait = wait; closure->clc_nr = 0; @@ -1668,7 +1667,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, result = cl_lock_enclosure(env, lock, closure); if (result == 0) { - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_closure != NULL) { result = slice->cls_ops->clo_closure(env, slice, closure); @@ -1701,10 +1700,10 @@ int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock, * If lock->cll_inclosure is not empty, lock is already in * this closure. */ - if (cfs_list_empty(&lock->cll_inclosure)) { + if (list_empty(&lock->cll_inclosure)) { cl_lock_get_trust(lock); lu_ref_add(&lock->cll_reference, "closure", closure); - cfs_list_add(&lock->cll_inclosure, &closure->clc_list); + list_add(&lock->cll_inclosure, &closure->clc_list); closure->clc_nr++; } else cl_lock_mutex_put(env, lock); @@ -1734,19 +1733,19 @@ EXPORT_SYMBOL(cl_lock_enclosure); void cl_lock_disclosure(const struct lu_env *env, struct cl_lock_closure *closure) { - struct cl_lock *scan; - struct cl_lock *temp; - - cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin); - cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list, - cll_inclosure){ - cfs_list_del_init(&scan->cll_inclosure); - cl_lock_mutex_put(env, scan); - lu_ref_del(&scan->cll_reference, "closure", closure); - cl_lock_put(env, scan); - closure->clc_nr--; - } - LASSERT(closure->clc_nr == 0); + struct cl_lock *scan; + struct cl_lock *temp; + + cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin); + list_for_each_entry_safe(scan, temp, &closure->clc_list, + cll_inclosure){ + list_del_init(&scan->cll_inclosure); + cl_lock_mutex_put(env, scan); + lu_ref_del(&scan->cll_reference, "closure", closure); + cl_lock_put(env, scan); + closure->clc_nr--; + } + LASSERT(closure->clc_nr == 0); } EXPORT_SYMBOL(cl_lock_disclosure); @@ -1754,7 +1753,7 @@ EXPORT_SYMBOL(cl_lock_disclosure); void cl_lock_closure_fini(struct cl_lock_closure *closure) { LASSERT(closure->clc_nr == 0); - LASSERT(cfs_list_empty(&closure->clc_list)); + LASSERT(list_empty(&closure->clc_list)); } EXPORT_SYMBOL(cl_lock_closure_fini); @@ -1877,7 +1876,7 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, spin_lock(&head->coh_lock_guard); /* It is fine to match any group lock since there could be only one * with a uniq gid and it conflicts with all other lock modes too */ - cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) { + list_for_each_entry(scan, &head->coh_locks, cll_linkage) { if (scan != except && (scan->cll_descr.cld_mode == CLM_GROUP || cl_lock_ext_match(&scan->cll_descr, need)) && @@ -1919,7 +1918,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel) head = cl_object_header(obj); spin_lock(&head->coh_lock_guard); - while (!cfs_list_empty(&head->coh_locks)) { + while (!list_empty(&head->coh_locks)) { lock = container_of(head->coh_locks.next, struct cl_lock, cll_linkage); cl_lock_get_trust(lock); @@ -2170,7 +2169,7 @@ void cl_lock_print(const struct lu_env *env, void *cookie, cl_lock_descr_print(env, cookie, printer, &lock->cll_descr); (*printer)(env, cookie, " {\n"); - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { (*printer)(env, cookie, " %s@%p: ", slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name, slice); diff --git a/lustre/obdclass/cl_object.c b/lustre/obdclass/cl_object.c index ac46d07..357e73f 100644 --- a/lustre/obdclass/cl_object.c +++ b/lustre/obdclass/cl_object.c @@ -84,7 +84,7 @@ int cl_object_header_init(struct cl_object_header *h) spin_lock_init(&h->coh_attr_guard); lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class); lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class); - CFS_INIT_LIST_HEAD(&h->coh_locks); + INIT_LIST_HEAD(&h->coh_locks); h->coh_page_bufsize = 0; } RETURN(result); @@ -96,7 +96,7 @@ EXPORT_SYMBOL(cl_object_header_init); */ void cl_object_header_fini(struct cl_object_header *h) { - LASSERT(cfs_list_empty(&h->coh_locks)); + LASSERT(list_empty(&h->coh_locks)); lu_object_header_fini(&h->coh_lu); } EXPORT_SYMBOL(cl_object_header_fini); @@ -219,7 +219,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; - cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { + list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { if (obj->co_ops->coo_attr_get != NULL) { result = obj->co_ops->coo_attr_get(env, obj, attr); if (result != 0) { @@ -249,20 +249,19 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, assert_spin_locked(cl_object_attr_guard(obj)); ENTRY; - top = obj->co_lu.lo_header; - result = 0; - cfs_list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_set != NULL) { - result = obj->co_ops->coo_attr_set(env, obj, attr, v); - if (result != 0) { - if (result > 0) - result = 0; - break; - } - } - } - RETURN(result); + top = obj->co_lu.lo_header; + result = 0; + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_attr_set != NULL) { + result = obj->co_ops->coo_attr_set(env, obj, attr, v); + if (result != 0) { + if (result > 0) + result = 0; + break; + } + } + } + RETURN(result); } EXPORT_SYMBOL(cl_object_attr_set); @@ -283,8 +282,7 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, ENTRY; top = obj->co_lu.lo_header; result = 0; - cfs_list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { if (obj->co_ops->coo_glimpse != NULL) { result = obj->co_ops->coo_glimpse(env, obj, lvb); if (result != 0) @@ -312,7 +310,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj, ENTRY; top = obj->co_lu.lo_header; result = 0; - cfs_list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { + list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { if (obj->co_ops->coo_conf_set != NULL) { result = obj->co_ops->coo_conf_set(env, obj, conf); if (result != 0) @@ -335,7 +333,7 @@ void cl_object_prune(const struct lu_env *env, struct cl_object *obj) top = obj->co_lu.lo_header; result = 0; - cfs_list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) { + list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) { if (o->co_ops->coo_prune != NULL) { result = o->co_ops->coo_prune(env, o); if (result != 0) @@ -384,7 +382,7 @@ int cl_object_has_locks(struct cl_object *obj) int has; spin_lock(&head->coh_lock_guard); - has = cfs_list_empty(&head->coh_locks); + has = list_empty(&head->coh_locks); spin_unlock(&head->coh_lock_guard); return (has == 0); @@ -537,7 +535,7 @@ EXPORT_SYMBOL(cl_site_stats_print); * bz20044, bz22683. */ -static CFS_LIST_HEAD(cl_envs); +static struct list_head cl_envs; static unsigned cl_envs_cached_nr = 0; static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit * for now. */ @@ -555,7 +553,7 @@ struct cl_env { * This allows cl_env to be entered into cl_env_hash which implements * the current thread -> client environment lookup. */ - cfs_hlist_node_t ce_node; + struct hlist_node ce_node; #endif /** * Owner for the current cl_env. @@ -574,7 +572,7 @@ struct cl_env { * Linkage into global list of all client environments. Used for * garbage collection. */ - cfs_list_t ce_linkage; + struct list_head ce_linkage; /* * */ @@ -627,14 +625,15 @@ static unsigned cl_env_hops_hash(cfs_hash_t *lh, #endif } -static void *cl_env_hops_obj(cfs_hlist_node_t *hn) +static void *cl_env_hops_obj(struct hlist_node *hn) { - struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node); - LASSERT(cle->ce_magic == &cl_env_init0); - return (void *)cle; + struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node); + + LASSERT(cle->ce_magic == &cl_env_init0); + return (void *)cle; } -static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn) +static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn) { struct cl_env *cle = cl_env_hops_obj(hn); @@ -642,9 +641,9 @@ static int cl_env_hops_keycmp(const void *key, cfs_hlist_node_t *hn) return (key == cle->ce_owner); } -static void cl_env_hops_noop(cfs_hash_t *hs, cfs_hlist_node_t *hn) +static void cl_env_hops_noop(cfs_hash_t *hs, struct hlist_node *hn) { - struct cl_env *cle = cfs_hlist_entry(hn, struct cl_env, ce_node); + struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node); LASSERT(cle->ce_magic == &cl_env_init0); } @@ -763,7 +762,7 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) if (cle != NULL) { int rc; - CFS_INIT_LIST_HEAD(&cle->ce_linkage); + INIT_LIST_HEAD(&cle->ce_linkage); cle->ce_magic = &cl_env_init0; env = &cle->ce_lu; rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags); @@ -804,12 +803,12 @@ static struct lu_env *cl_env_obtain(void *debug) ENTRY; spin_lock(&cl_envs_guard); - LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs))); + LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs))); if (cl_envs_cached_nr > 0) { int rc; cle = container_of(cl_envs.next, struct cl_env, ce_linkage); - cfs_list_del_init(&cle->ce_linkage); + list_del_init(&cle->ce_linkage); cl_envs_cached_nr--; spin_unlock(&cl_envs_guard); @@ -931,9 +930,9 @@ unsigned cl_env_cache_purge(unsigned nr) ENTRY; spin_lock(&cl_envs_guard); - for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) { + for (; !list_empty(&cl_envs) && nr > 0; --nr) { cle = container_of(cl_envs.next, struct cl_env, ce_linkage); - cfs_list_del_init(&cle->ce_linkage); + list_del_init(&cle->ce_linkage); LASSERT(cl_envs_cached_nr > 0); cl_envs_cached_nr--; spin_unlock(&cl_envs_guard); @@ -941,7 +940,7 @@ unsigned cl_env_cache_purge(unsigned nr) cl_env_fini(cle); spin_lock(&cl_envs_guard); } - LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs))); + LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs))); spin_unlock(&cl_envs_guard); RETURN(nr); } @@ -979,7 +978,7 @@ void cl_env_put(struct lu_env *env, int *refcheck) (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD && (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) { spin_lock(&cl_envs_guard); - cfs_list_add(&cle->ce_linkage, &cl_envs); + list_add(&cle->ce_linkage, &cl_envs); cl_envs_cached_nr++; spin_unlock(&cl_envs_guard); } else @@ -1125,7 +1124,7 @@ static int cl_env_percpu_init(void) cle = &cl_env_percpu[i]; env = &cle->ce_lu; - CFS_INIT_LIST_HEAD(&cle->ce_linkage); + INIT_LIST_HEAD(&cle->ce_linkage); cle->ce_magic = &cl_env_init0; rc = lu_env_init(env, LCT_CL_THREAD | tags); if (rc == 0) { @@ -1339,11 +1338,13 @@ static struct lu_kmem_descr cl_object_caches[] = { */ int cl_global_init(void) { - int result; + int result; - result = cl_env_store_init(); - if (result) - return result; + INIT_LIST_HEAD(&cl_envs); + + result = cl_env_store_init(); + if (result) + return result; result = lu_kmem_init(cl_object_caches); if (result) diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index 96c5295..6e70d46 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -123,7 +123,7 @@ cl_page_at_trusted(const struct cl_page *page, const struct cl_page_slice *slice; ENTRY; - cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { + list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype) RETURN(slice); } @@ -135,18 +135,18 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) struct cl_object *obj = page->cp_obj; int pagesize = cl_object_header(obj)->coh_page_bufsize; - PASSERT(env, page, cfs_list_empty(&page->cp_batch)); + PASSERT(env, page, list_empty(&page->cp_batch)); PASSERT(env, page, page->cp_owner == NULL); PASSERT(env, page, page->cp_req == NULL); PASSERT(env, page, page->cp_state == CPS_FREEING); ENTRY; - while (!cfs_list_empty(&page->cp_layers)) { + while (!list_empty(&page->cp_layers)) { struct cl_page_slice *slice; - slice = cfs_list_entry(page->cp_layers.next, - struct cl_page_slice, cpl_linkage); - cfs_list_del_init(page->cp_layers.next); + slice = list_entry(page->cp_layers.next, + struct cl_page_slice, cpl_linkage); + list_del_init(page->cp_layers.next); if (unlikely(slice->cpl_ops->cpo_fini != NULL)) slice->cpl_ops->cpo_fini(env, slice); } @@ -190,13 +190,13 @@ struct cl_page *cl_page_alloc(const struct lu_env *env, page->cp_vmpage = vmpage; cl_page_state_set_trust(page, CPS_CACHED); page->cp_type = type; - CFS_INIT_LIST_HEAD(&page->cp_layers); - CFS_INIT_LIST_HEAD(&page->cp_batch); - CFS_INIT_LIST_HEAD(&page->cp_flight); + INIT_LIST_HEAD(&page->cp_layers); + INIT_LIST_HEAD(&page->cp_batch); + INIT_LIST_HEAD(&page->cp_flight); lu_ref_init(&page->cp_reference); head = o->co_lu.lo_header; - cfs_list_for_each_entry(o, &head->loh_layers, - co_lu.lo_linkage) { + list_for_each_entry(o, &head->loh_layers, + co_lu.lo_linkage) { if (o->co_ops->coo_page_init != NULL) { result = o->co_ops->coo_page_init(env, o, page, ind); @@ -388,7 +388,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page) LASSERT(atomic_read(&page->cp_ref) == 0); PASSERT(env, page, page->cp_owner == NULL); - PASSERT(env, page, cfs_list_empty(&page->cp_batch)); + PASSERT(env, page, list_empty(&page->cp_batch)); /* * Page is no longer reachable by other threads. Tear * it down. @@ -434,27 +434,27 @@ EXPORT_SYMBOL(cl_page_at); #define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname) -#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \ -({ \ - const struct lu_env *__env = (_env); \ - struct cl_page *__page = (_page); \ - const struct cl_page_slice *__scan; \ - int __result; \ - ptrdiff_t __op = (_op); \ - int (*__method)_proto; \ +#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \ +({ \ + const struct lu_env *__env = (_env); \ + struct cl_page *__page = (_page); \ + const struct cl_page_slice *__scan; \ + int __result; \ + ptrdiff_t __op = (_op); \ + int (*__method)_proto; \ \ - __result = 0; \ - cfs_list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \ - __method = *(void **)((char *)__scan->cpl_ops + __op); \ - if (__method != NULL) { \ + __result = 0; \ + list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \ + __method = *(void **)((char *)__scan->cpl_ops + __op); \ + if (__method != NULL) { \ __result = (*__method)(__env, __scan, ## __VA_ARGS__); \ - if (__result != 0) \ - break; \ - } \ - } \ - if (__result > 0) \ - __result = 0; \ - __result; \ + if (__result != 0) \ + break; \ + } \ + } \ + if (__result > 0) \ + __result = 0; \ + __result; \ }) #define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \ @@ -468,7 +468,7 @@ EXPORT_SYMBOL(cl_page_at); \ __result = 0; \ list_for_each_entry_reverse(__scan, &__page->cp_layers, \ - cpl_linkage) { \ + cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + __op); \ if (__method != NULL) { \ __result = (*__method)(__env, __scan, ## __VA_ARGS__); \ @@ -489,8 +489,7 @@ do { \ ptrdiff_t __op = (_op); \ void (*__method)_proto; \ \ - cfs_list_for_each_entry(__scan, &__page->cp_layers, \ - cpl_linkage) { \ + list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + __op); \ if (__method != NULL) \ (*__method)(__env, __scan, ## __VA_ARGS__); \ @@ -506,8 +505,8 @@ do { \ void (*__method)_proto; \ \ /* get to the bottom page. */ \ - cfs_list_for_each_entry_reverse(__scan, &__page->cp_layers, \ - cpl_linkage) { \ + list_for_each_entry_reverse(__scan, &__page->cp_layers, \ + cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + __op); \ if (__method != NULL) \ (*__method)(__env, __scan, ## __VA_ARGS__); \ diff --git a/lustre/obdclass/class_obd.c b/lustre/obdclass/class_obd.c index ed02f4a..da38c9a 100644 --- a/lustre/obdclass/class_obd.c +++ b/lustre/obdclass/class_obd.c @@ -63,7 +63,7 @@ atomic_t libcfs_kmemory = {0}; struct obd_device *obd_devs[MAX_OBD_DEVICES]; EXPORT_SYMBOL(obd_devs); -cfs_list_t obd_types; +struct list_head obd_types; DEFINE_RWLOCK(obd_dev_lock); __u64 obd_max_pages = 0; @@ -514,7 +514,6 @@ int obd_init_checks(void) #define obd_init_checks() do {} while(0) #endif -extern spinlock_t obd_types_lock; extern int class_procfs_init(void); extern int class_procfs_clean(void); @@ -529,7 +528,7 @@ int init_obdclass(void) int lustre_register_fs(void); for (i = CAPA_SITE_CLIENT; i < CAPA_SITE_MAX; i++) - CFS_INIT_LIST_HEAD(&capa_list[i]); + INIT_LIST_HEAD(&capa_list[i]); #endif LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n"); @@ -561,7 +560,7 @@ int init_obdclass(void) if (err) return err; - CFS_INIT_LIST_HEAD(&obd_types); + INIT_LIST_HEAD(&obd_types); err = misc_register(&obd_psdev); if (err) { diff --git a/lustre/obdclass/dt_object.c b/lustre/obdclass/dt_object.c index 75f44f3..088c1cb 100644 --- a/lustre/obdclass/dt_object.c +++ b/lustre/obdclass/dt_object.c @@ -67,13 +67,13 @@ EXPORT_SYMBOL(dt_key); */ void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb) { - cfs_list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks); + list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks); } EXPORT_SYMBOL(dt_txn_callback_add); void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb) { - cfs_list_del_init(&cb->dtc_linkage); + list_del_init(&cb->dtc_linkage); } EXPORT_SYMBOL(dt_txn_callback_del); @@ -86,7 +86,7 @@ int dt_txn_hook_start(const struct lu_env *env, if (th->th_local) return 0; - cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) { + list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) { if (cb->dtc_txn_start == NULL || !(cb->dtc_tag & env->le_ctx.lc_tags)) continue; @@ -107,7 +107,7 @@ int dt_txn_hook_stop(const struct lu_env *env, struct thandle *txn) if (txn->th_local) return 0; - cfs_list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) { + list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) { if (cb->dtc_txn_stop == NULL || !(cb->dtc_tag & env->le_ctx.lc_tags)) continue; @@ -121,24 +121,23 @@ EXPORT_SYMBOL(dt_txn_hook_stop); void dt_txn_hook_commit(struct thandle *txn) { - struct dt_txn_callback *cb; + struct dt_txn_callback *cb; - if (txn->th_local) - return; + if (txn->th_local) + return; - cfs_list_for_each_entry(cb, &txn->th_dev->dd_txn_callbacks, - dtc_linkage) { - if (cb->dtc_txn_commit) - cb->dtc_txn_commit(txn, cb->dtc_cookie); - } + list_for_each_entry(cb, &txn->th_dev->dd_txn_callbacks, + dtc_linkage) { + if (cb->dtc_txn_commit) + cb->dtc_txn_commit(txn, cb->dtc_cookie); + } } EXPORT_SYMBOL(dt_txn_hook_commit); int dt_device_init(struct dt_device *dev, struct lu_device_type *t) { - - CFS_INIT_LIST_HEAD(&dev->dd_txn_callbacks); - return lu_device_init(&dev->dd_lu_dev, t); + INIT_LIST_HEAD(&dev->dd_txn_callbacks); + return lu_device_init(&dev->dd_lu_dev, t); } EXPORT_SYMBOL(dt_device_init); diff --git a/lustre/obdclass/genops.c b/lustre/obdclass/genops.c index 109394c..37757a8 100644 --- a/lustre/obdclass/genops.c +++ b/lustre/obdclass/genops.c @@ -46,7 +46,6 @@ #include #include -extern cfs_list_t obd_types; spinlock_t obd_types_lock; struct kmem_cache *obd_device_cachep; @@ -54,8 +53,8 @@ struct kmem_cache *obdo_cachep; EXPORT_SYMBOL(obdo_cachep); struct kmem_cache *import_cachep; -cfs_list_t obd_zombie_imports; -cfs_list_t obd_zombie_exports; +struct list_head obd_zombie_imports; +struct list_head obd_zombie_exports; spinlock_t obd_zombie_impexp_lock; static void obd_zombie_impexp_notify(void); static void obd_zombie_export_add(struct obd_export *exp); @@ -97,12 +96,12 @@ static void obd_device_free(struct obd_device *obd) struct obd_type *class_search_type(const char *name) { - cfs_list_t *tmp; + struct list_head *tmp; struct obd_type *type; spin_lock(&obd_types_lock); - cfs_list_for_each(tmp, &obd_types) { - type = cfs_list_entry(tmp, struct obd_type, typ_chain); + list_for_each(tmp, &obd_types) { + type = list_entry(tmp, struct obd_type, typ_chain); if (strcmp(type->typ_name, name) == 0) { spin_unlock(&obd_types_lock); return type; @@ -230,7 +229,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops, } spin_lock(&obd_types_lock); - cfs_list_add(&type->typ_chain, &obd_types); + list_add(&type->typ_chain, &obd_types); spin_unlock(&obd_types_lock); RETURN (0); @@ -297,7 +296,7 @@ int class_unregister_type(const char *name) lu_device_type_fini(type->typ_lu); spin_lock(&obd_types_lock); - cfs_list_del(&type->typ_chain); + list_del(&type->typ_chain); spin_unlock(&obd_types_lock); OBD_FREE(type->typ_name, strlen(name) + 1); if (type->typ_dt_ops != NULL) @@ -811,10 +810,10 @@ static void class_export_destroy(struct obd_export *exp) if (exp->exp_connection) ptlrpc_put_connection_superhack(exp->exp_connection); - LASSERT(cfs_list_empty(&exp->exp_outstanding_replies)); - LASSERT(cfs_list_empty(&exp->exp_uncommitted_replies)); - LASSERT(cfs_list_empty(&exp->exp_req_replay_queue)); - LASSERT(cfs_list_empty(&exp->exp_hp_rpcs)); + LASSERT(list_empty(&exp->exp_outstanding_replies)); + LASSERT(list_empty(&exp->exp_uncommitted_replies)); + LASSERT(list_empty(&exp->exp_req_replay_queue)); + LASSERT(list_empty(&exp->exp_hp_rpcs)); obd_destroy_export(exp); class_decref(obd, "export", exp); @@ -849,7 +848,7 @@ void class_export_put(struct obd_export *exp) atomic_read(&exp->exp_refcount) - 1); if (atomic_dec_and_test(&exp->exp_refcount)) { - LASSERT(!cfs_list_empty(&exp->exp_obd_chain)); + LASSERT(!list_empty(&exp->exp_obd_chain)); CDEBUG(D_IOCTL, "final put %p/%s\n", exp, exp->exp_client_uuid.uuid); @@ -884,26 +883,26 @@ struct obd_export *class_new_export(struct obd_device *obd, atomic_set(&export->exp_cb_count, 0); atomic_set(&export->exp_locks_count, 0); #if LUSTRE_TRACKS_LOCK_EXP_REFS - CFS_INIT_LIST_HEAD(&export->exp_locks_list); + INIT_LIST_HEAD(&export->exp_locks_list); spin_lock_init(&export->exp_locks_list_guard); #endif atomic_set(&export->exp_replay_count, 0); export->exp_obd = obd; - CFS_INIT_LIST_HEAD(&export->exp_outstanding_replies); + INIT_LIST_HEAD(&export->exp_outstanding_replies); spin_lock_init(&export->exp_uncommitted_replies_lock); - CFS_INIT_LIST_HEAD(&export->exp_uncommitted_replies); - CFS_INIT_LIST_HEAD(&export->exp_req_replay_queue); - CFS_INIT_LIST_HEAD(&export->exp_handle.h_link); - CFS_INIT_LIST_HEAD(&export->exp_hp_rpcs); - CFS_INIT_LIST_HEAD(&export->exp_reg_rpcs); + INIT_LIST_HEAD(&export->exp_uncommitted_replies); + INIT_LIST_HEAD(&export->exp_req_replay_queue); + INIT_LIST_HEAD(&export->exp_handle.h_link); + INIT_LIST_HEAD(&export->exp_hp_rpcs); + INIT_LIST_HEAD(&export->exp_reg_rpcs); class_handle_hash(&export->exp_handle, &export_handle_ops); export->exp_last_request_time = cfs_time_current_sec(); spin_lock_init(&export->exp_lock); spin_lock_init(&export->exp_rpc_lock); - CFS_INIT_HLIST_NODE(&export->exp_uuid_hash); - CFS_INIT_HLIST_NODE(&export->exp_nid_hash); + INIT_HLIST_NODE(&export->exp_uuid_hash); + INIT_HLIST_NODE(&export->exp_nid_hash); spin_lock_init(&export->exp_bl_list_lock); - CFS_INIT_LIST_HEAD(&export->exp_bl_list); + INIT_LIST_HEAD(&export->exp_bl_list); export->exp_sp_peer = LUSTRE_SP_ANY; export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID; @@ -936,9 +935,9 @@ struct obd_export *class_new_export(struct obd_device *obd, } class_incref(obd, "export", export); - cfs_list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports); - cfs_list_add_tail(&export->exp_obd_chain_timed, - &export->exp_obd->obd_exports_timed); + list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports); + list_add_tail(&export->exp_obd_chain_timed, + &export->exp_obd->obd_exports_timed); export->exp_obd->obd_num_exports++; spin_unlock(&obd->obd_dev_lock); cfs_hash_putref(hash); @@ -950,7 +949,7 @@ exit_err: if (hash) cfs_hash_putref(hash); class_handle_unhash(&export->exp_handle); - LASSERT(cfs_hlist_unhashed(&export->exp_uuid_hash)); + LASSERT(hlist_unhashed(&export->exp_uuid_hash)); obd_destroy_export(export); OBD_FREE_PTR(export); return ERR_PTR(rc); @@ -963,13 +962,13 @@ void class_unlink_export(struct obd_export *exp) spin_lock(&exp->exp_obd->obd_dev_lock); /* delete an uuid-export hashitem from hashtables */ - if (!cfs_hlist_unhashed(&exp->exp_uuid_hash)) + if (!hlist_unhashed(&exp->exp_uuid_hash)) cfs_hash_del(exp->exp_obd->obd_uuid_hash, &exp->exp_client_uuid, &exp->exp_uuid_hash); - cfs_list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports); - cfs_list_del_init(&exp->exp_obd_chain_timed); + list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports); + list_del_init(&exp->exp_obd_chain_timed); exp->exp_obd->obd_num_exports--; spin_unlock(&exp->exp_obd->obd_dev_lock); class_export_put(exp); @@ -988,12 +987,12 @@ void class_import_destroy(struct obd_import *imp) ptlrpc_put_connection_superhack(imp->imp_connection); - while (!cfs_list_empty(&imp->imp_conn_list)) { - struct obd_import_conn *imp_conn; + while (!list_empty(&imp->imp_conn_list)) { + struct obd_import_conn *imp_conn; - imp_conn = cfs_list_entry(imp->imp_conn_list.next, - struct obd_import_conn, oic_item); - cfs_list_del_init(&imp_conn->oic_item); + imp_conn = list_entry(imp->imp_conn_list.next, + struct obd_import_conn, oic_item); + list_del_init(&imp_conn->oic_item); ptlrpc_put_connection_superhack(imp_conn->oic_conn); OBD_FREE(imp_conn, sizeof(*imp_conn)); } @@ -1026,9 +1025,9 @@ EXPORT_SYMBOL(class_import_get); void class_import_put(struct obd_import *imp) { - ENTRY; + ENTRY; - LASSERT(cfs_list_empty(&imp->imp_zombie_chain)); + LASSERT(list_empty(&imp->imp_zombie_chain)); LASSERT_ATOMIC_GT_LT(&imp->imp_refcount, 0, LI_POISON); CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp, @@ -1066,12 +1065,12 @@ struct obd_import *class_new_import(struct obd_device *obd) if (imp == NULL) return NULL; - CFS_INIT_LIST_HEAD(&imp->imp_pinger_chain); - CFS_INIT_LIST_HEAD(&imp->imp_zombie_chain); - CFS_INIT_LIST_HEAD(&imp->imp_replay_list); - CFS_INIT_LIST_HEAD(&imp->imp_sending_list); - CFS_INIT_LIST_HEAD(&imp->imp_delayed_list); - CFS_INIT_LIST_HEAD(&imp->imp_committed_list); + INIT_LIST_HEAD(&imp->imp_pinger_chain); + INIT_LIST_HEAD(&imp->imp_zombie_chain); + INIT_LIST_HEAD(&imp->imp_replay_list); + INIT_LIST_HEAD(&imp->imp_sending_list); + INIT_LIST_HEAD(&imp->imp_delayed_list); + INIT_LIST_HEAD(&imp->imp_committed_list); imp->imp_replay_cursor = &imp->imp_committed_list; spin_lock_init(&imp->imp_lock); imp->imp_last_success_conn = 0; @@ -1085,8 +1084,8 @@ struct obd_import *class_new_import(struct obd_device *obd) atomic_set(&imp->imp_inflight, 0); atomic_set(&imp->imp_replay_inflight, 0); atomic_set(&imp->imp_inval_count, 0); - CFS_INIT_LIST_HEAD(&imp->imp_conn_list); - CFS_INIT_LIST_HEAD(&imp->imp_handle.h_link); + INIT_LIST_HEAD(&imp->imp_conn_list); + INIT_LIST_HEAD(&imp->imp_handle.h_link); class_handle_hash(&imp->imp_handle, &import_handle_ops); init_imp_at(&imp->imp_at); @@ -1126,7 +1125,7 @@ void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock) exp, lock, lock->l_exp_refs_target); } if ((lock->l_exp_refs_nr ++) == 0) { - cfs_list_add(&lock->l_exp_refs_link, &exp->exp_locks_list); + list_add(&lock->l_exp_refs_link, &exp->exp_locks_list); lock->l_exp_refs_target = exp; } CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n", @@ -1145,7 +1144,7 @@ void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock) lock, lock->l_exp_refs_target, exp); } if (-- lock->l_exp_refs_nr == 0) { - cfs_list_del_init(&lock->l_exp_refs_link); + list_del_init(&lock->l_exp_refs_link); lock->l_exp_refs_target = NULL; } CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n", @@ -1248,14 +1247,14 @@ int class_disconnect(struct obd_export *export) * all end up in here, and if any of them race we shouldn't * call extra class_export_puts(). */ if (already_disconnected) { - LASSERT(cfs_hlist_unhashed(&export->exp_nid_hash)); + LASSERT(hlist_unhashed(&export->exp_nid_hash)); GOTO(no_disconn, already_disconnected); } CDEBUG(D_IOCTL, "disconnect: cookie "LPX64"\n", export->exp_handle.h_cookie); - if (!cfs_hlist_unhashed(&export->exp_nid_hash)) + if (!hlist_unhashed(&export->exp_nid_hash)) cfs_hash_del(export->exp_obd->obd_nid_hash, &export->exp_connection->c_peer.nid, &export->exp_nid_hash); @@ -1282,7 +1281,7 @@ int class_connected_export(struct obd_export *exp) } EXPORT_SYMBOL(class_connected_export); -static void class_disconnect_export_list(cfs_list_t *list, +static void class_disconnect_export_list(struct list_head *list, enum obd_option flags) { int rc; @@ -1291,11 +1290,11 @@ static void class_disconnect_export_list(cfs_list_t *list, /* It's possible that an export may disconnect itself, but * nothing else will be added to this list. */ - while (!cfs_list_empty(list)) { - exp = cfs_list_entry(list->next, struct obd_export, - exp_obd_chain); - /* need for safe call CDEBUG after obd_disconnect */ - class_export_get(exp); + while (!list_empty(list)) { + exp = list_entry(list->next, struct obd_export, + exp_obd_chain); + /* need for safe call CDEBUG after obd_disconnect */ + class_export_get(exp); spin_lock(&exp->exp_lock); exp->exp_flags = flags; @@ -1308,7 +1307,7 @@ static void class_disconnect_export_list(cfs_list_t *list, exp); /* Need to delete this now so we don't end up pointing * to work_list later when this export is cleaned up. */ - cfs_list_del_init(&exp->exp_obd_chain); + list_del_init(&exp->exp_obd_chain); class_export_put(exp); continue; } @@ -1330,17 +1329,17 @@ static void class_disconnect_export_list(cfs_list_t *list, void class_disconnect_exports(struct obd_device *obd) { - cfs_list_t work_list; + struct list_head work_list; ENTRY; /* Move all of the exports from obd_exports to a work list, en masse. */ - CFS_INIT_LIST_HEAD(&work_list); + INIT_LIST_HEAD(&work_list); spin_lock(&obd->obd_dev_lock); - cfs_list_splice_init(&obd->obd_exports, &work_list); - cfs_list_splice_init(&obd->obd_delayed_exports, &work_list); + list_splice_init(&obd->obd_exports, &work_list); + list_splice_init(&obd->obd_delayed_exports, &work_list); spin_unlock(&obd->obd_dev_lock); - if (!cfs_list_empty(&work_list)) { + if (!list_empty(&work_list)) { CDEBUG(D_HA, "OBD device %d (%p) has exports, " "disconnecting them\n", obd->obd_minor, obd); class_disconnect_export_list(&work_list, @@ -1357,15 +1356,15 @@ EXPORT_SYMBOL(class_disconnect_exports); void class_disconnect_stale_exports(struct obd_device *obd, int (*test_export)(struct obd_export *)) { - cfs_list_t work_list; + struct list_head work_list; struct obd_export *exp, *n; int evicted = 0; ENTRY; - CFS_INIT_LIST_HEAD(&work_list); + INIT_LIST_HEAD(&work_list); spin_lock(&obd->obd_dev_lock); - cfs_list_for_each_entry_safe(exp, n, &obd->obd_exports, - exp_obd_chain) { + list_for_each_entry_safe(exp, n, &obd->obd_exports, + exp_obd_chain) { /* don't count self-export as client */ if (obd_uuid_equals(&exp->exp_client_uuid, &exp->exp_obd->obd_uuid)) @@ -1384,7 +1383,7 @@ void class_disconnect_stale_exports(struct obd_device *obd, exp->exp_failed = 1; spin_unlock(&exp->exp_lock); - cfs_list_move(&exp->exp_obd_chain, &work_list); + list_move(&exp->exp_obd_chain, &work_list); evicted++; CDEBUG(D_HA, "%s: disconnect stale client %s@%s\n", obd->obd_name, exp->exp_client_uuid.uuid, @@ -1553,8 +1552,8 @@ static void print_export_data(struct obd_export *exp, const char *status, int nreplies = 0; spin_lock(&exp->exp_lock); - cfs_list_for_each_entry(rs, &exp->exp_outstanding_replies, - rs_exp_list) { + list_for_each_entry(rs, &exp->exp_outstanding_replies, + rs_exp_list) { if (nreplies == 0) first_reply = rs; nreplies++; @@ -1581,15 +1580,15 @@ void dump_exports(struct obd_device *obd, int locks) struct obd_export *exp; spin_lock(&obd->obd_dev_lock); - cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) + list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) print_export_data(exp, "ACTIVE", locks); - cfs_list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain) + list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain) print_export_data(exp, "UNLINKED", locks); - cfs_list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain) + list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain) print_export_data(exp, "DELAYED", locks); spin_unlock(&obd->obd_dev_lock); spin_lock(&obd_zombie_impexp_lock); - cfs_list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain) + list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain) print_export_data(exp, "ZOMBIE", locks); spin_unlock(&obd_zombie_impexp_lock); } @@ -1598,9 +1597,9 @@ EXPORT_SYMBOL(dump_exports); void obd_exports_barrier(struct obd_device *obd) { int waited = 2; - LASSERT(cfs_list_empty(&obd->obd_exports)); + LASSERT(list_empty(&obd->obd_exports)); spin_lock(&obd->obd_dev_lock); - while (!cfs_list_empty(&obd->obd_unlinked_exports)) { + while (!list_empty(&obd->obd_unlinked_exports)) { spin_unlock(&obd->obd_dev_lock); schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, cfs_time_seconds(waited)); @@ -1634,21 +1633,21 @@ void obd_zombie_impexp_cull(void) do { spin_lock(&obd_zombie_impexp_lock); - import = NULL; - if (!cfs_list_empty(&obd_zombie_imports)) { - import = cfs_list_entry(obd_zombie_imports.next, - struct obd_import, - imp_zombie_chain); - cfs_list_del_init(&import->imp_zombie_chain); - } + import = NULL; + if (!list_empty(&obd_zombie_imports)) { + import = list_entry(obd_zombie_imports.next, + struct obd_import, + imp_zombie_chain); + list_del_init(&import->imp_zombie_chain); + } - export = NULL; - if (!cfs_list_empty(&obd_zombie_exports)) { - export = cfs_list_entry(obd_zombie_exports.next, - struct obd_export, - exp_obd_chain); - cfs_list_del_init(&export->exp_obd_chain); - } + export = NULL; + if (!list_empty(&obd_zombie_exports)) { + export = list_entry(obd_zombie_exports.next, + struct obd_export, + exp_obd_chain); + list_del_init(&export->exp_obd_chain); + } spin_unlock(&obd_zombie_impexp_lock); @@ -1701,12 +1700,12 @@ static int obd_zombie_impexp_check(void *arg) */ static void obd_zombie_export_add(struct obd_export *exp) { spin_lock(&exp->exp_obd->obd_dev_lock); - LASSERT(!cfs_list_empty(&exp->exp_obd_chain)); - cfs_list_del_init(&exp->exp_obd_chain); + LASSERT(!list_empty(&exp->exp_obd_chain)); + list_del_init(&exp->exp_obd_chain); spin_unlock(&exp->exp_obd->obd_dev_lock); spin_lock(&obd_zombie_impexp_lock); zombies_count++; - cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports); + list_add(&exp->exp_obd_chain, &obd_zombie_exports); spin_unlock(&obd_zombie_impexp_lock); obd_zombie_impexp_notify(); @@ -1719,9 +1718,9 @@ static void obd_zombie_import_add(struct obd_import *imp) { LASSERT(imp->imp_sec == NULL); LASSERT(imp->imp_rq_pool == NULL); spin_lock(&obd_zombie_impexp_lock); - LASSERT(cfs_list_empty(&imp->imp_zombie_chain)); + LASSERT(list_empty(&imp->imp_zombie_chain)); zombies_count++; - cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports); + list_add(&imp->imp_zombie_chain, &obd_zombie_imports); spin_unlock(&obd_zombie_impexp_lock); obd_zombie_impexp_notify(); @@ -1828,8 +1827,9 @@ int obd_zombie_impexp_init(void) struct task_struct *task; #endif - CFS_INIT_LIST_HEAD(&obd_zombie_imports); - CFS_INIT_LIST_HEAD(&obd_zombie_exports); + INIT_LIST_HEAD(&obd_zombie_imports); + + INIT_LIST_HEAD(&obd_zombie_exports); spin_lock_init(&obd_zombie_impexp_lock); init_completion(&obd_zombie_start); init_completion(&obd_zombie_stop); diff --git a/lustre/obdclass/idmap.c b/lustre/obdclass/idmap.c index 146b40e..df6c57b 100644 --- a/lustre/obdclass/idmap.c +++ b/lustre/obdclass/idmap.c @@ -168,10 +168,10 @@ int lustre_in_group_p(struct lu_ucred *mu, gid_t grp) EXPORT_SYMBOL(lustre_in_group_p); struct lustre_idmap_entry { - cfs_list_t lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */ - cfs_list_t lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */ - cfs_list_t lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */ - cfs_list_t lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */ + struct list_head lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */ + struct list_head lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */ + struct list_head lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */ + struct list_head lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */ uid_t lie_rmt_uid; /* remote uid */ uid_t lie_lcl_uid; /* local uid */ gid_t lie_rmt_gid; /* remote gid */ @@ -193,10 +193,10 @@ struct lustre_idmap_entry *idmap_entry_alloc(uid_t rmt_uid, uid_t lcl_uid, if (e == NULL) return NULL; - CFS_INIT_LIST_HEAD(&e->lie_rmt_uid_hash); - CFS_INIT_LIST_HEAD(&e->lie_lcl_uid_hash); - CFS_INIT_LIST_HEAD(&e->lie_rmt_gid_hash); - CFS_INIT_LIST_HEAD(&e->lie_lcl_gid_hash); + INIT_LIST_HEAD(&e->lie_rmt_uid_hash); + INIT_LIST_HEAD(&e->lie_lcl_uid_hash); + INIT_LIST_HEAD(&e->lie_rmt_gid_hash); + INIT_LIST_HEAD(&e->lie_lcl_gid_hash); e->lie_rmt_uid = rmt_uid; e->lie_lcl_uid = lcl_uid; e->lie_rmt_gid = rmt_gid; @@ -207,15 +207,11 @@ struct lustre_idmap_entry *idmap_entry_alloc(uid_t rmt_uid, uid_t lcl_uid, static void idmap_entry_free(struct lustre_idmap_entry *e) { - if (!cfs_list_empty(&e->lie_rmt_uid_hash)) - cfs_list_del(&e->lie_rmt_uid_hash); - if (!cfs_list_empty(&e->lie_lcl_uid_hash)) - cfs_list_del(&e->lie_lcl_uid_hash); - if (!cfs_list_empty(&e->lie_rmt_gid_hash)) - cfs_list_del(&e->lie_rmt_gid_hash); - if (!cfs_list_empty(&e->lie_lcl_gid_hash)) - cfs_list_del(&e->lie_lcl_gid_hash); - OBD_FREE_PTR(e); + list_del(&e->lie_rmt_uid_hash); + list_del(&e->lie_lcl_uid_hash); + list_del(&e->lie_rmt_gid_hash); + list_del(&e->lie_lcl_gid_hash); + OBD_FREE_PTR(e); } /* @@ -229,11 +225,11 @@ struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t, uid_t rmt_uid, uid_t lcl_uid, gid_t rmt_gid, gid_t lcl_gid) { - cfs_list_t *head; + struct list_head *head; struct lustre_idmap_entry *e; head = &t->lit_idmaps[RMT_UIDMAP_IDX][lustre_idmap_hashfunc(rmt_uid)]; - cfs_list_for_each_entry(e, head, lie_rmt_uid_hash) + list_for_each_entry(e, head, lie_rmt_uid_hash) if (e->lie_rmt_uid == rmt_uid) { if (e->lie_lcl_uid == lcl_uid) { if (e->lie_rmt_gid == rmt_gid && @@ -250,7 +246,7 @@ struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t, } head = &t->lit_idmaps[RMT_GIDMAP_IDX][lustre_idmap_hashfunc(rmt_gid)]; - cfs_list_for_each_entry(e, head, lie_rmt_gid_hash) + list_for_each_entry(e, head, lie_rmt_gid_hash) if (e->lie_rmt_gid == rmt_gid) { if (e->lie_lcl_gid == lcl_gid) { if (unlikely(e->lie_rmt_uid == rmt_uid && @@ -270,36 +266,36 @@ struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t, return NULL; } -static __u32 idmap_lookup_uid(cfs_list_t *hash, int reverse, +static __u32 idmap_lookup_uid(struct list_head *hash, int reverse, __u32 uid) { - cfs_list_t *head = &hash[lustre_idmap_hashfunc(uid)]; - struct lustre_idmap_entry *e; + struct list_head *head = &hash[lustre_idmap_hashfunc(uid)]; + struct lustre_idmap_entry *e; - if (!reverse) { - cfs_list_for_each_entry(e, head, lie_rmt_uid_hash) - if (e->lie_rmt_uid == uid) - return e->lie_lcl_uid; - } else { - cfs_list_for_each_entry(e, head, lie_lcl_uid_hash) - if (e->lie_lcl_uid == uid) - return e->lie_rmt_uid; - } + if (!reverse) { + list_for_each_entry(e, head, lie_rmt_uid_hash) + if (e->lie_rmt_uid == uid) + return e->lie_lcl_uid; + } else { + list_for_each_entry(e, head, lie_lcl_uid_hash) + if (e->lie_lcl_uid == uid) + return e->lie_rmt_uid; + } - return CFS_IDMAP_NOTFOUND; + return CFS_IDMAP_NOTFOUND; } -static __u32 idmap_lookup_gid(cfs_list_t *hash, int reverse, __u32 gid) +static __u32 idmap_lookup_gid(struct list_head *hash, int reverse, __u32 gid) { - cfs_list_t *head = &hash[lustre_idmap_hashfunc(gid)]; + struct list_head *head = &hash[lustre_idmap_hashfunc(gid)]; struct lustre_idmap_entry *e; if (!reverse) { - cfs_list_for_each_entry(e, head, lie_rmt_gid_hash) + list_for_each_entry(e, head, lie_rmt_gid_hash) if (e->lie_rmt_gid == gid) return e->lie_lcl_gid; } else { - cfs_list_for_each_entry(e, head, lie_lcl_gid_hash) + list_for_each_entry(e, head, lie_lcl_gid_hash) if (e->lie_lcl_gid == gid) return e->lie_rmt_gid; } @@ -324,32 +320,32 @@ int lustre_idmap_add(struct lustre_idmap_table *t, return -ENOMEM; spin_lock(&t->lit_lock); - e1 = idmap_search_entry(t, ruid, luid, rgid, lgid); - if (e1 == NULL) { - cfs_list_add_tail(&e0->lie_rmt_uid_hash, - &t->lit_idmaps[RMT_UIDMAP_IDX] - [lustre_idmap_hashfunc(ruid)]); - cfs_list_add_tail(&e0->lie_lcl_uid_hash, - &t->lit_idmaps[LCL_UIDMAP_IDX] - [lustre_idmap_hashfunc(luid)]); - cfs_list_add_tail(&e0->lie_rmt_gid_hash, - &t->lit_idmaps[RMT_GIDMAP_IDX] - [lustre_idmap_hashfunc(rgid)]); - cfs_list_add_tail(&e0->lie_lcl_gid_hash, - &t->lit_idmaps[LCL_GIDMAP_IDX] - [lustre_idmap_hashfunc(lgid)]); - } + e1 = idmap_search_entry(t, ruid, luid, rgid, lgid); + if (e1 == NULL) { + list_add_tail(&e0->lie_rmt_uid_hash, + &t->lit_idmaps[RMT_UIDMAP_IDX] + [lustre_idmap_hashfunc(ruid)]); + list_add_tail(&e0->lie_lcl_uid_hash, + &t->lit_idmaps[LCL_UIDMAP_IDX] + [lustre_idmap_hashfunc(luid)]); + list_add_tail(&e0->lie_rmt_gid_hash, + &t->lit_idmaps[RMT_GIDMAP_IDX] + [lustre_idmap_hashfunc(rgid)]); + list_add_tail(&e0->lie_lcl_gid_hash, + &t->lit_idmaps[LCL_GIDMAP_IDX] + [lustre_idmap_hashfunc(lgid)]); + } spin_unlock(&t->lit_lock); - if (e1 != NULL) { - idmap_entry_free(e0); - if (IS_ERR(e1)) - return PTR_ERR(e1); - } - } else if (IS_ERR(e0)) { - return PTR_ERR(e0); - } + if (e1 != NULL) { + idmap_entry_free(e0); + if (IS_ERR(e1)) + return PTR_ERR(e1); + } + } else if (IS_ERR(e0)) { + return PTR_ERR(e0); + } - return 0; + return 0; } EXPORT_SYMBOL(lustre_idmap_add); @@ -378,7 +374,7 @@ int lustre_idmap_lookup_uid(struct lu_ucred *mu, struct lustre_idmap_table *t, int reverse, uid_t uid) { - cfs_list_t *hash; + struct list_head *hash; if (mu && (mu->uc_valid == UCRED_OLD || mu->uc_valid == UCRED_NEW)) { if (!reverse) { @@ -410,7 +406,7 @@ EXPORT_SYMBOL(lustre_idmap_lookup_uid); int lustre_idmap_lookup_gid(struct lu_ucred *mu, struct lustre_idmap_table *t, int reverse, gid_t gid) { - cfs_list_t *hash; + struct list_head *hash; if (mu && (mu->uc_valid == UCRED_OLD || mu->uc_valid == UCRED_NEW)) { if (!reverse) { @@ -451,7 +447,7 @@ struct lustre_idmap_table *lustre_idmap_init(void) spin_lock_init(&t->lit_lock); for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++) for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++) - CFS_INIT_LIST_HEAD(&t->lit_idmaps[i][j]); + INIT_LIST_HEAD(&t->lit_idmaps[i][j]); return t; } @@ -459,7 +455,7 @@ EXPORT_SYMBOL(lustre_idmap_init); void lustre_idmap_fini(struct lustre_idmap_table *t) { - cfs_list_t *list; + struct list_head *list; struct lustre_idmap_entry *e; int i; LASSERT(t); @@ -467,10 +463,10 @@ void lustre_idmap_fini(struct lustre_idmap_table *t) list = t->lit_idmaps[RMT_UIDMAP_IDX]; spin_lock(&t->lit_lock); for (i = 0; i < CFS_IDMAP_HASHSIZE; i++) - while (!cfs_list_empty(&list[i])) { - e = cfs_list_entry(list[i].next, - struct lustre_idmap_entry, - lie_rmt_uid_hash); + while (!list_empty(&list[i])) { + e = list_entry(list[i].next, + struct lustre_idmap_entry, + lie_rmt_uid_hash); idmap_entry_free(e); } spin_unlock(&t->lit_lock); diff --git a/lustre/obdclass/llog.c b/lustre/obdclass/llog.c index 8202b41..cd2166d 100644 --- a/lustre/obdclass/llog.c +++ b/lustre/obdclass/llog.c @@ -69,7 +69,7 @@ struct llog_handle *llog_alloc_handle(void) init_rwsem(&loghandle->lgh_lock); spin_lock_init(&loghandle->lgh_hdr_lock); - CFS_INIT_LIST_HEAD(&loghandle->u.phd.phd_entry); + INIT_LIST_HEAD(&loghandle->u.phd.phd_entry); atomic_set(&loghandle->lgh_refcount, 1); return loghandle; @@ -87,9 +87,9 @@ void llog_free_handle(struct llog_handle *loghandle) goto out; if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN) - LASSERT(cfs_list_empty(&loghandle->u.phd.phd_entry)); + LASSERT(list_empty(&loghandle->u.phd.phd_entry)); else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) - LASSERT(cfs_list_empty(&loghandle->u.chd.chd_head)); + LASSERT(list_empty(&loghandle->u.chd.chd_head)); LASSERT(sizeof(*(loghandle->lgh_hdr)) == LLOG_CHUNK_SIZE); OBD_FREE(loghandle->lgh_hdr, LLOG_CHUNK_SIZE); out: @@ -252,8 +252,8 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, } } if (flags & LLOG_F_IS_CAT) { - LASSERT(cfs_list_empty(&handle->u.chd.chd_head)); - CFS_INIT_LIST_HEAD(&handle->u.chd.chd_head); + LASSERT(list_empty(&handle->u.chd.chd_head)); + INIT_LIST_HEAD(&handle->u.chd.chd_head); llh->llh_size = sizeof(struct llog_logid_rec); } else if (!(flags & LLOG_F_IS_PLAIN)) { CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n", diff --git a/lustre/obdclass/llog_cat.c b/lustre/obdclass/llog_cat.c index 6541d79..b34d5d3 100644 --- a/lustre/obdclass/llog_cat.c +++ b/lustre/obdclass/llog_cat.c @@ -136,8 +136,8 @@ int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle, RETURN(-EBADF); down_write(&cathandle->lgh_lock); - cfs_list_for_each_entry(loghandle, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { + list_for_each_entry(loghandle, &cathandle->u.chd.chd_head, + u.phd.phd_entry) { struct llog_logid *cgl = &loghandle->lgh_id; if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) && @@ -173,7 +173,7 @@ int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle, } down_write(&cathandle->lgh_lock); - cfs_list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head); + list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head); up_write(&cathandle->lgh_lock); loghandle->u.phd.phd_cat_handle = cathandle; @@ -194,13 +194,13 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle) ENTRY; - cfs_list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { + list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head, + u.phd.phd_entry) { struct llog_log_hdr *llh = loghandle->lgh_hdr; int index; /* unlink open-not-created llogs */ - cfs_list_del_init(&loghandle->u.phd.phd_entry); + list_del_init(&loghandle->u.phd.phd_entry); llh = loghandle->lgh_hdr; if (loghandle->lgh_obj != NULL && llh != NULL && (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) && @@ -370,8 +370,8 @@ int llog_cat_declare_add_rec(const struct lu_env *env, NULL, NULL, LLOG_OPEN_NEW); if (rc == 0) { cathandle->u.chd.chd_current_log = loghandle; - cfs_list_add_tail(&loghandle->u.phd.phd_entry, - &cathandle->u.chd.chd_head); + list_add_tail(&loghandle->u.phd.phd_entry, + &cathandle->u.chd.chd_head); } } up_write(&cathandle->lgh_lock); @@ -383,8 +383,8 @@ int llog_cat_declare_add_rec(const struct lu_env *env, NULL, NULL, LLOG_OPEN_NEW); if (rc == 0) { cathandle->u.chd.chd_next_log = loghandle; - cfs_list_add_tail(&loghandle->u.phd.phd_entry, - &cathandle->u.chd.chd_head); + list_add_tail(&loghandle->u.phd.phd_entry, + &cathandle->u.chd.chd_head); } } up_write(&cathandle->lgh_lock); @@ -763,7 +763,7 @@ int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle, down_write(&cathandle->lgh_lock); if (cathandle->u.chd.chd_current_log == loghandle) cathandle->u.chd.chd_current_log = NULL; - cfs_list_del_init(&loghandle->u.phd.phd_entry); + list_del_init(&loghandle->u.phd.phd_entry); up_write(&cathandle->lgh_lock); LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index); /* llog was opened and keep in a list, close it now */ diff --git a/lustre/obdclass/local_storage.c b/lustre/obdclass/local_storage.c index 89fc5df..5e43e16 100644 --- a/lustre/obdclass/local_storage.c +++ b/lustre/obdclass/local_storage.c @@ -35,7 +35,7 @@ #include "local_storage.h" /* all initialized local storages on this node are linked on this */ -static CFS_LIST_HEAD(ls_list_head); +static struct list_head ls_list_head = LIST_HEAD_INIT(ls_list_head); static DEFINE_MUTEX(ls_list_mutex); static int ls_object_init(const struct lu_env *env, struct lu_object *o, @@ -108,7 +108,7 @@ static struct ls_device *__ls_find_dev(struct dt_device *dev) { struct ls_device *ls, *ret = NULL; - cfs_list_for_each_entry(ls, &ls_list_head, ls_linkage) { + list_for_each_entry(ls, &ls_list_head, ls_linkage) { if (ls->ls_osd == dev) { atomic_inc(&ls->ls_refcount); ret = ls; @@ -156,7 +156,7 @@ struct ls_device *ls_device_get(struct dt_device *dev) GOTO(out_ls, ls = ERR_PTR(-ENOMEM)); atomic_set(&ls->ls_refcount, 1); - CFS_INIT_LIST_HEAD(&ls->ls_los_list); + INIT_LIST_HEAD(&ls->ls_los_list); mutex_init(&ls->ls_los_mutex); ls->ls_osd = dev; @@ -167,7 +167,7 @@ struct ls_device *ls_device_get(struct dt_device *dev) ls->ls_top_dev.dd_lu_dev.ld_site = dev->dd_lu_dev.ld_site; /* finally add ls to the list */ - cfs_list_add(&ls->ls_linkage, &ls_list_head); + list_add(&ls->ls_linkage, &ls_list_head); out_ls: mutex_unlock(&ls_list_mutex); RETURN(ls); @@ -181,8 +181,8 @@ void ls_device_put(const struct lu_env *env, struct ls_device *ls) mutex_lock(&ls_list_mutex); if (atomic_read(&ls->ls_refcount) == 0) { - LASSERT(cfs_list_empty(&ls->ls_los_list)); - cfs_list_del(&ls->ls_linkage); + LASSERT(list_empty(&ls->ls_los_list)); + list_del(&ls->ls_linkage); lu_site_purge(env, ls->ls_top_dev.dd_lu_dev.ld_site, ~0); lu_device_fini(&ls->ls_top_dev.dd_lu_dev); OBD_FREE_PTR(ls); @@ -664,7 +664,7 @@ struct local_oid_storage *dt_los_find(struct ls_device *ls, __u64 seq) { struct local_oid_storage *los, *ret = NULL; - cfs_list_for_each_entry(los, &ls->ls_los_list, los_list) { + list_for_each_entry(los, &ls->ls_los_list, los_list) { if (los->los_seq == seq) { atomic_inc(&los->los_refcount); ret = los; @@ -805,7 +805,7 @@ int local_oid_storage_init(const struct lu_env *env, struct dt_device *dev, mutex_init(&(*los)->los_id_lock); (*los)->los_dev = &ls->ls_top_dev; atomic_inc(&ls->ls_refcount); - cfs_list_add(&(*los)->los_list, &ls->ls_los_list); + list_add(&(*los)->los_list, &ls->ls_los_list); /* Use {seq, 0, 0} to create the LAST_ID file for every * sequence. OIDs start at LUSTRE_FID_INIT_OID. @@ -882,7 +882,7 @@ out_trans: } out_los: if (rc != 0) { - cfs_list_del(&(*los)->los_list); + list_del(&(*los)->los_list); atomic_dec(&ls->ls_refcount); OBD_FREE_PTR(*los); *los = NULL; @@ -925,7 +925,7 @@ void local_oid_storage_fini(const struct lu_env *env, if (los->los_obj) lu_object_put_nocache(env, &los->los_obj->do_lu); - cfs_list_del(&los->los_list); + list_del(&los->los_list); OBD_FREE_PTR(los); mutex_unlock(&ls->ls_los_mutex); ls_device_put(env, ls); diff --git a/lustre/obdclass/local_storage.h b/lustre/obdclass/local_storage.h index 9a7244e..6717e49 100644 --- a/lustre/obdclass/local_storage.h +++ b/lustre/obdclass/local_storage.h @@ -40,13 +40,13 @@ struct ls_device { struct dt_device ls_top_dev; /* all initialized ls_devices on this node linked by this */ - cfs_list_t ls_linkage; + struct list_head ls_linkage; /* how many handle's reference this local storage */ atomic_t ls_refcount; /* underlaying OSD device */ struct dt_device *ls_osd; /* list of all local OID storages */ - cfs_list_t ls_los_list; + struct list_head ls_los_list; struct mutex ls_los_mutex; }; diff --git a/lustre/obdclass/lprocfs_jobstats.c b/lustre/obdclass/lprocfs_jobstats.c index 23e1a8b..05b378e 100644 --- a/lustre/obdclass/lprocfs_jobstats.c +++ b/lustre/obdclass/lprocfs_jobstats.c @@ -67,13 +67,13 @@ */ struct job_stat { - cfs_hlist_node_t js_hash; - cfs_list_t js_list; - atomic_t js_refcount; - char js_jobid[JOBSTATS_JOBID_SIZE]; - time_t js_timestamp; /* seconds */ - struct lprocfs_stats *js_stats; - struct obd_job_stats *js_jobstats; + struct hlist_node js_hash; + struct list_head js_list; + atomic_t js_refcount; + char js_jobid[JOBSTATS_JOBID_SIZE]; + time_t js_timestamp; /* seconds */ + struct lprocfs_stats *js_stats; + struct obd_job_stats *js_jobstats; }; static unsigned job_stat_hash(cfs_hash_t *hs, const void *key, unsigned mask) @@ -81,30 +81,30 @@ static unsigned job_stat_hash(cfs_hash_t *hs, const void *key, unsigned mask) return cfs_hash_djb2_hash(key, strlen(key), mask); } -static void *job_stat_key(cfs_hlist_node_t *hnode) +static void *job_stat_key(struct hlist_node *hnode) { struct job_stat *job; - job = cfs_hlist_entry(hnode, struct job_stat, js_hash); + job = hlist_entry(hnode, struct job_stat, js_hash); return job->js_jobid; } -static int job_stat_keycmp(const void *key, cfs_hlist_node_t *hnode) +static int job_stat_keycmp(const void *key, struct hlist_node *hnode) { struct job_stat *job; - job = cfs_hlist_entry(hnode, struct job_stat, js_hash); + job = hlist_entry(hnode, struct job_stat, js_hash); return (strlen(job->js_jobid) == strlen(key)) && !strncmp(job->js_jobid, key, strlen(key)); } -static void *job_stat_object(cfs_hlist_node_t *hnode) +static void *job_stat_object(struct hlist_node *hnode) { - return cfs_hlist_entry(hnode, struct job_stat, js_hash); + return hlist_entry(hnode, struct job_stat, js_hash); } -static void job_stat_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void job_stat_get(cfs_hash_t *hs, struct hlist_node *hnode) { struct job_stat *job; - job = cfs_hlist_entry(hnode, struct job_stat, js_hash); + job = hlist_entry(hnode, struct job_stat, js_hash); atomic_inc(&job->js_refcount); } @@ -114,7 +114,7 @@ static void job_free(struct job_stat *job) LASSERT(job->js_jobstats); write_lock(&job->js_jobstats->ojs_lock); - cfs_list_del_init(&job->js_list); + list_del_init(&job->js_list); write_unlock(&job->js_jobstats->ojs_lock); lprocfs_free_stats(&job->js_stats); @@ -128,14 +128,14 @@ static void job_putref(struct job_stat *job) job_free(job); } -static void job_stat_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void job_stat_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) { struct job_stat *job; - job = cfs_hlist_entry(hnode, struct job_stat, js_hash); + job = hlist_entry(hnode, struct job_stat, js_hash); job_putref(job); } -static void job_stat_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void job_stat_exit(cfs_hash_t *hs, struct hlist_node *hnode) { CERROR("should not have any items\n"); } @@ -151,12 +151,12 @@ static cfs_hash_ops_t job_stats_hash_ops = { }; static int job_iter_callback(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *data) + struct hlist_node *hnode, void *data) { time_t oldest = *((time_t *)data); struct job_stat *job; - job = cfs_hlist_entry(hnode, struct job_stat, js_hash); + job = hlist_entry(hnode, struct job_stat, js_hash); if (!oldest || job->js_timestamp < oldest) cfs_hash_bd_del_locked(hs, bd, hnode); @@ -202,8 +202,8 @@ static struct job_stat *job_alloc(char *jobid, struct obd_job_stats *jobs) memcpy(job->js_jobid, jobid, JOBSTATS_JOBID_SIZE); job->js_timestamp = cfs_time_current_sec(); job->js_jobstats = jobs; - CFS_INIT_HLIST_NODE(&job->js_hash); - CFS_INIT_LIST_HEAD(&job->js_list); + INIT_HLIST_NODE(&job->js_hash); + INIT_LIST_HEAD(&job->js_list); atomic_set(&job->js_refcount, 1); return job; @@ -242,15 +242,15 @@ int lprocfs_job_stats_log(struct obd_device *obd, char *jobid, if (job2 != job) { job_putref(job); job = job2; - /* We cannot LASSERT(!cfs_list_empty(&job->js_list)) here, + /* We cannot LASSERT(!list_empty(&job->js_list)) here, * since we just lost the race for inserting "job" into the * ojs_list, and some other thread is doing it _right_now_. * Instead, be content the other thread is doing this, since * "job2" was initialized in job_alloc() already. LU-2163 */ } else { - LASSERT(cfs_list_empty(&job->js_list)); + LASSERT(list_empty(&job->js_list)); write_lock(&stats->ojs_lock); - cfs_list_add_tail(&job->js_list, &stats->ojs_list); + list_add_tail(&job->js_list, &stats->ojs_list); write_unlock(&stats->ojs_lock); } @@ -275,7 +275,7 @@ void lprocfs_job_stats_fini(struct obd_device *obd) cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback, &oldest); cfs_hash_putref(stats->ojs_hash); stats->ojs_hash = NULL; - LASSERT(cfs_list_empty(&stats->ojs_list)); + LASSERT(list_empty(&stats->ojs_list)); } EXPORT_SYMBOL(lprocfs_job_stats_fini); @@ -289,7 +289,7 @@ static void *lprocfs_jobstats_seq_start(struct seq_file *p, loff_t *pos) if (off == 0) return SEQ_START_TOKEN; off--; - cfs_list_for_each_entry(job, &stats->ojs_list, js_list) { + list_for_each_entry(job, &stats->ojs_list, js_list) { if (!off--) return job; } @@ -307,7 +307,7 @@ static void *lprocfs_jobstats_seq_next(struct seq_file *p, void *v, loff_t *pos) { struct obd_job_stats *stats = p->private; struct job_stat *job; - cfs_list_t *next; + struct list_head *next; ++*pos; if (v == SEQ_START_TOKEN) { @@ -318,7 +318,7 @@ static void *lprocfs_jobstats_seq_next(struct seq_file *p, void *v, loff_t *pos) } return next == &stats->ojs_list ? NULL : - cfs_list_entry(next, struct job_stat, js_list); + list_entry(next, struct job_stat, js_list); } /* @@ -513,7 +513,7 @@ int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num, if (stats->ojs_hash == NULL) RETURN(-ENOMEM); - CFS_INIT_LIST_HEAD(&stats->ojs_list); + INIT_LIST_HEAD(&stats->ojs_list); rwlock_init(&stats->ojs_lock); stats->ojs_cntr_num = cntr_num; stats->ojs_cntr_init_fn = init_fn; diff --git a/lustre/obdclass/lprocfs_status_server.c b/lustre/obdclass/lprocfs_status_server.c index 3ba8e70..5abee4e 100644 --- a/lustre/obdclass/lprocfs_status_server.c +++ b/lustre/obdclass/lprocfs_status_server.c @@ -217,7 +217,7 @@ void lprocfs_free_per_client_stats(struct obd_device *obd) /* not need locking because all clients is died */ while (!list_empty(&obd->obd_nid_stats)) { stat = list_entry(obd->obd_nid_stats.next, - struct nid_stat, nid_list); + struct nid_stat, nid_list); list_del_init(&stat->nid_list); cfs_hash_del(hash, &stat->nid, &stat->nid_hash); lprocfs_free_client_stats(stat); @@ -312,11 +312,12 @@ ssize_t lprocfs_nid_stats_clear_seq_write(struct file *file, const char *buffer, size_t count, loff_t *off) { - struct list_head free_list = LIST_HEAD_INIT(free_list); struct seq_file *m = file->private_data; struct obd_device *obd = m->private; struct nid_stat *client_stat; + struct list_head free_list; + INIT_LIST_HEAD(&free_list); cfs_hash_cond_del(obd->obd_nid_stats_hash, lprocfs_nid_stats_clear_write_cb, &free_list); @@ -446,16 +447,17 @@ EXPORT_SYMBOL(lprocfs_nid_stats_clear_read); int lprocfs_nid_stats_clear_write(struct file *file, const char *buffer, unsigned long count, void *data) { - struct list_head free_list = LIST_HEAD_INIT(free_list); struct obd_device *obd = (struct obd_device *)data; struct nid_stat *client_stat; + struct list_head free_list; + INIT_LIST_HEAD(&free_list); cfs_hash_cond_del(obd->obd_nid_stats_hash, lprocfs_nid_stats_clear_write_cb, &free_list); while (!list_empty(&free_list)) { client_stat = list_entry(free_list.next, struct nid_stat, - nid_list); + nid_list); list_del_init(&client_stat->nid_list); lprocfs_free_client_stats(client_stat); } diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index 86ccdc4..cc43d7c 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -60,8 +60,6 @@ #include #include -extern spinlock_t obd_types_lock; - enum { LU_CACHE_PERCENT_MAX = 50, LU_CACHE_PERCENT_DEFAULT = 20 @@ -121,10 +119,10 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) if (fid_is_zero(fid)) { LASSERT(top->loh_hash.next == NULL && top->loh_hash.pprev == NULL); - LASSERT(cfs_list_empty(&top->loh_lru)); + LASSERT(list_empty(&top->loh_lru)); if (!atomic_dec_and_test(&top->loh_ref)) return; - cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_release != NULL) o->lo_ops->loo_object_release(env, o); } @@ -153,14 +151,14 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) * When last reference is released, iterate over object * layers, and notify them that object is no longer busy. */ - cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_release != NULL) o->lo_ops->loo_object_release(env, o); } if (!lu_object_is_dying(top)) { - LASSERT(cfs_list_empty(&top->loh_lru)); - cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru); + LASSERT(list_empty(&top->loh_lru)); + list_add_tail(&top->loh_lru, &bkt->lsb_lru); cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); return; } @@ -213,7 +211,7 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o) cfs_hash_bd_t bd; cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); - cfs_list_del_init(&top->loh_lru); + list_del_init(&top->loh_lru); cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); cfs_hash_bd_unlock(obj_hash, &bd, 1); } @@ -233,7 +231,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, { struct lu_object *scan; struct lu_object *top; - cfs_list_t *layers; + struct list_head *layers; unsigned int init_mask = 0; unsigned int init_flag; int clean; @@ -263,7 +261,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, */ clean = 1; init_flag = 1; - cfs_list_for_each_entry(scan, layers, lo_linkage) { + list_for_each_entry(scan, layers, lo_linkage) { if (init_mask & init_flag) goto next; clean = 0; @@ -279,7 +277,7 @@ next: } } while (!clean); - cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) { + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_start != NULL) { result = scan->lo_ops->loo_object_start(env, scan); if (result != 0) { @@ -298,11 +296,11 @@ next: */ static void lu_object_free(const struct lu_env *env, struct lu_object *o) { - struct lu_site_bkt_data *bkt; - struct lu_site *site; - struct lu_object *scan; - cfs_list_t *layers; - cfs_list_t splice; + struct lu_site_bkt_data *bkt; + struct lu_site *site; + struct lu_object *scan; + struct list_head *layers; + struct list_head splice; site = o->lo_dev->ld_site; layers = &o->lo_header->loh_layers; @@ -310,7 +308,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) /* * First call ->loo_object_delete() method to release all resources. */ - cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) { + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_delete != NULL) scan->lo_ops->loo_object_delete(env, scan); } @@ -321,16 +319,16 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) * necessary, because lu_object_header is freed together with the * top-level slice. */ - CFS_INIT_LIST_HEAD(&splice); - cfs_list_splice_init(layers, &splice); - while (!cfs_list_empty(&splice)) { + INIT_LIST_HEAD(&splice); + list_splice_init(layers, &splice); + while (!list_empty(&splice)) { /* * Free layers in bottom-to-top order, so that object header * lives as long as possible and ->loo_object_free() methods * can look at its contents. */ o = container_of0(splice.prev, struct lu_object, lo_linkage); - cfs_list_del_init(&o->lo_linkage); + list_del_init(&o->lo_linkage); LASSERT(o->lo_ops->loo_object_free != NULL); o->lo_ops->loo_object_free(env, o); } @@ -349,7 +347,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) struct lu_site_bkt_data *bkt; cfs_hash_bd_t bd; cfs_hash_bd_t bd2; - cfs_list_t dispose; + struct list_head dispose; int did_sth; int start; int count; @@ -359,7 +357,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU)) RETURN(0); - CFS_INIT_LIST_HEAD(&dispose); + INIT_LIST_HEAD(&dispose); /* * Under LRU list lock, scan LRU list and move unreferenced objects to * the dispose list, removing them from LRU and hash table. @@ -380,7 +378,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1); bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); - cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { + list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { LASSERT(atomic_read(&h->loh_ref) == 0); cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2); @@ -388,7 +386,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) cfs_hash_bd_del_locked(s->ls_obj_hash, &bd2, &h->loh_hash); - cfs_list_move(&h->loh_lru, &dispose); + list_move(&h->loh_lru, &dispose); if (did_sth == 0) did_sth = 1; @@ -405,13 +403,13 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) * Free everything on the dispose list. This is safe against * races due to the reasons described in lu_object_put(). */ - while (!cfs_list_empty(&dispose)) { - h = container_of0(dispose.next, - struct lu_object_header, loh_lru); - cfs_list_del_init(&h->loh_lru); - lu_object_free(env, lu_object_top(h)); - lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); - } + while (!list_empty(&dispose)) { + h = container_of0(dispose.next, + struct lu_object_header, loh_lru); + list_del_init(&h->loh_lru); + lu_object_free(env, lu_object_top(h)); + lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); + } if (nr == 0) break; @@ -516,13 +514,13 @@ void lu_object_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct lu_object_header *hdr) { - (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", + (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref), - PFID(&hdr->loh_fid), - cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash", - cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \ - "" : " lru", - hdr->loh_attr & LOHA_EXISTS ? " exist":""); + PFID(&hdr->loh_fid), + hlist_unhashed(&hdr->loh_hash) ? "" : " hash", + list_empty((struct list_head *)&hdr->loh_lru) ? \ + "" : " lru", + hdr->loh_attr & LOHA_EXISTS ? " exist" : ""); } EXPORT_SYMBOL(lu_object_header_print); @@ -540,7 +538,7 @@ void lu_object_print(const struct lu_env *env, void *cookie, lu_object_header_print(env, cookie, printer, top); (*printer)(env, cookie, "{\n"); - cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) { + list_for_each_entry(o, &top->loh_layers, lo_linkage) { /* * print `.' \a depth times followed by type name and address */ @@ -565,7 +563,7 @@ int lu_object_invariant(const struct lu_object *o) struct lu_object_header *top; top = o->lo_header; - cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) { + list_for_each_entry(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_invariant != NULL && !o->lo_ops->loo_object_invariant(o)) return 0; @@ -580,10 +578,10 @@ static struct lu_object *htable_lookup(struct lu_site *s, wait_queue_t *waiter, __u64 *version) { - struct lu_site_bkt_data *bkt; - struct lu_object_header *h; - cfs_hlist_node_t *hnode; - __u64 ver = cfs_hash_bd_version_get(bd); + struct lu_site_bkt_data *bkt; + struct lu_object_header *h; + struct hlist_node *hnode; + __u64 ver = cfs_hash_bd_version_get(bd); if (*version == ver) return ERR_PTR(-ENOENT); @@ -602,7 +600,7 @@ static struct lu_object *htable_lookup(struct lu_site *s, if (likely(!lu_object_is_dying(h))) { cfs_hash_get(s->ls_obj_hash, hnode); lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); - cfs_list_del_init(&h->loh_lru); + list_del_init(&h->loh_lru); return lu_object_top(h); } @@ -623,7 +621,7 @@ static struct lu_object *htable_lookup_nowait(struct lu_site *s, cfs_hash_bd_t *bd, const struct lu_fid *f) { - cfs_hlist_node_t *hnode; + struct hlist_node *hnode; struct lu_object_header *h; /* cfs_hash_bd_peek_locked is a somehow "internal" function @@ -640,7 +638,7 @@ static struct lu_object *htable_lookup_nowait(struct lu_site *s, cfs_hash_get(s->ls_obj_hash, hnode); lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); - cfs_list_del_init(&h->loh_lru); + list_del_init(&h->loh_lru); return lu_object_top(h); } @@ -864,7 +862,7 @@ EXPORT_SYMBOL(lu_object_find_slice); /** * Global list of all device types. */ -static CFS_LIST_HEAD(lu_device_types); +static struct list_head lu_device_types; int lu_device_type_init(struct lu_device_type *ldt) { @@ -898,7 +896,7 @@ EXPORT_SYMBOL(lu_device_type_fini); /** * Global list of all sites on this node */ -static CFS_LIST_HEAD(lu_sites); +static struct list_head lu_sites; static DEFINE_MUTEX(lu_sites_guard); /** @@ -914,23 +912,23 @@ struct lu_site_print_arg { static int lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *data) + struct hlist_node *hnode, void *data) { - struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; - struct lu_object_header *h; + struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - if (!cfs_list_empty(&h->loh_layers)) { - const struct lu_object *o; + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + if (!list_empty(&h->loh_layers)) { + const struct lu_object *o; - o = lu_object_top(h); - lu_object_print(arg->lsp_env, arg->lsp_cookie, - arg->lsp_printer, o); - } else { - lu_object_header_print(arg->lsp_env, arg->lsp_cookie, - arg->lsp_printer, h); - } - return 0; + o = lu_object_top(h); + lu_object_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, o); + } else { + lu_object_header_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, h); + } + return 0; } /** @@ -1021,43 +1019,43 @@ static unsigned lu_obj_hop_hash(cfs_hash_t *hs, return hash & mask; } -static void *lu_obj_hop_object(cfs_hlist_node_t *hnode) +static void *lu_obj_hop_object(struct hlist_node *hnode) { - return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); + return hlist_entry(hnode, struct lu_object_header, loh_hash); } -static void *lu_obj_hop_key(cfs_hlist_node_t *hnode) +static void *lu_obj_hop_key(struct hlist_node *hnode) { - struct lu_object_header *h; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - return &h->loh_fid; + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return &h->loh_fid; } -static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode) +static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode) { - struct lu_object_header *h; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); } -static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode) { - struct lu_object_header *h; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); + h = hlist_entry(hnode, struct lu_object_header, loh_hash); if (atomic_add_return(1, &h->loh_ref) == 1) { - struct lu_site_bkt_data *bkt; - cfs_hash_bd_t bd; + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; - cfs_hash_bd_get(hs, &h->loh_fid, &bd); - bkt = cfs_hash_bd_extra_get(hs, &bd); - bkt->lsb_busy++; - } + cfs_hash_bd_get(hs, &h->loh_fid, &bd); + bkt = cfs_hash_bd_extra_get(hs, &bd); + bkt->lsb_busy++; + } } -static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) { LBUG(); /* we should never called it */ } @@ -1074,8 +1072,8 @@ cfs_hash_ops_t lu_site_hash_ops = { void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d) { spin_lock(&s->ls_ld_lock); - if (cfs_list_empty(&d->ld_linkage)) - cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage); + if (list_empty(&d->ld_linkage)) + list_add(&d->ld_linkage, &s->ls_ld_linkage); spin_unlock(&s->ls_ld_lock); } EXPORT_SYMBOL(lu_dev_add_linkage); @@ -1083,7 +1081,7 @@ EXPORT_SYMBOL(lu_dev_add_linkage); void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d) { spin_lock(&s->ls_ld_lock); - cfs_list_del_init(&d->ld_linkage); + list_del_init(&d->ld_linkage); spin_unlock(&s->ls_ld_lock); } EXPORT_SYMBOL(lu_dev_del_linkage); @@ -1100,6 +1098,8 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) int i; ENTRY; + INIT_LIST_HEAD(&lu_sites); + memset(s, 0, sizeof *s); mutex_init(&s->ls_purge_mutex); bits = lu_htable_order(top); @@ -1126,7 +1126,7 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); - CFS_INIT_LIST_HEAD(&bkt->lsb_lru); + INIT_LIST_HEAD(&bkt->lsb_lru); init_waitqueue_head(&bkt->lsb_marche_funebre); } @@ -1150,13 +1150,13 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, 0, "lru_purged", "lru_purged"); - CFS_INIT_LIST_HEAD(&s->ls_linkage); + INIT_LIST_HEAD(&s->ls_linkage); s->ls_top_dev = top; top->ld_site = s; lu_device_get(top); lu_ref_add(&top->ld_reference, "site-top", s); - CFS_INIT_LIST_HEAD(&s->ls_ld_linkage); + INIT_LIST_HEAD(&s->ls_ld_linkage); spin_lock_init(&s->ls_ld_lock); lu_dev_add_linkage(s, top); @@ -1171,7 +1171,7 @@ EXPORT_SYMBOL(lu_site_init); void lu_site_fini(struct lu_site *s) { mutex_lock(&lu_sites_guard); - cfs_list_del_init(&s->ls_linkage); + list_del_init(&s->ls_linkage); mutex_unlock(&lu_sites_guard); if (s->ls_obj_hash != NULL) { @@ -1200,7 +1200,7 @@ int lu_site_init_finish(struct lu_site *s) mutex_lock(&lu_sites_guard); result = lu_context_refill(&lu_shrink_env.le_ctx); if (result == 0) - cfs_list_add(&s->ls_linkage, &lu_sites); + list_add(&s->ls_linkage, &lu_sites); mutex_unlock(&lu_sites_guard); return result; } @@ -1278,7 +1278,7 @@ int lu_object_init(struct lu_object *o, struct lu_object_header *h, o->lo_dev = d; lu_device_get(d); lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o); - CFS_INIT_LIST_HEAD(&o->lo_linkage); + INIT_LIST_HEAD(&o->lo_linkage); return 0; } @@ -1291,7 +1291,7 @@ void lu_object_fini(struct lu_object *o) { struct lu_device *dev = o->lo_dev; - LASSERT(cfs_list_empty(&o->lo_linkage)); + LASSERT(list_empty(&o->lo_linkage)); if (dev != NULL) { lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, @@ -1310,7 +1310,7 @@ EXPORT_SYMBOL(lu_object_fini); */ void lu_object_add_top(struct lu_object_header *h, struct lu_object *o) { - cfs_list_move(&o->lo_linkage, &h->loh_layers); + list_move(&o->lo_linkage, &h->loh_layers); } EXPORT_SYMBOL(lu_object_add_top); @@ -1322,7 +1322,7 @@ EXPORT_SYMBOL(lu_object_add_top); */ void lu_object_add(struct lu_object *before, struct lu_object *o) { - cfs_list_move(&o->lo_linkage, &before->lo_linkage); + list_move(&o->lo_linkage, &before->lo_linkage); } EXPORT_SYMBOL(lu_object_add); @@ -1333,9 +1333,9 @@ int lu_object_header_init(struct lu_object_header *h) { memset(h, 0, sizeof *h); atomic_set(&h->loh_ref, 1); - CFS_INIT_HLIST_NODE(&h->loh_hash); - CFS_INIT_LIST_HEAD(&h->loh_lru); - CFS_INIT_LIST_HEAD(&h->loh_layers); + INIT_HLIST_NODE(&h->loh_hash); + INIT_LIST_HEAD(&h->loh_lru); + INIT_LIST_HEAD(&h->loh_layers); lu_ref_init(&h->loh_reference); return 0; } @@ -1346,9 +1346,9 @@ EXPORT_SYMBOL(lu_object_header_init); */ void lu_object_header_fini(struct lu_object_header *h) { - LASSERT(cfs_list_empty(&h->loh_layers)); - LASSERT(cfs_list_empty(&h->loh_lru)); - LASSERT(cfs_hlist_unhashed(&h->loh_hash)); + LASSERT(list_empty(&h->loh_layers)); + LASSERT(list_empty(&h->loh_lru)); + LASSERT(hlist_unhashed(&h->loh_hash)); lu_ref_fini(&h->loh_reference); } EXPORT_SYMBOL(lu_object_header_fini); @@ -1360,18 +1360,16 @@ EXPORT_SYMBOL(lu_object_header_fini); struct lu_object *lu_object_locate(struct lu_object_header *h, const struct lu_device_type *dtype) { - struct lu_object *o; + struct lu_object *o; - cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) { - if (o->lo_dev->ld_type == dtype) - return o; - } - return NULL; + list_for_each_entry(o, &h->loh_layers, lo_linkage) { + if (o->lo_dev->ld_type == dtype) + return o; + } + return NULL; } EXPORT_SYMBOL(lu_object_locate); - - /** * Finalize and free devices in the device stack. * @@ -1604,7 +1602,7 @@ EXPORT_SYMBOL(lu_context_key_get); /** * List of remembered contexts. XXX document me. */ -static CFS_LIST_HEAD(lu_context_remembered); +static struct list_head lu_context_remembered; /** * Destroy \a key in all remembered contexts. This is used to destroy key @@ -1626,8 +1624,8 @@ void lu_context_key_quiesce(struct lu_context_key *key) * XXX memory barrier has to go here. */ spin_lock(&lu_keys_guard); - cfs_list_for_each_entry(ctx, &lu_context_remembered, - lc_remember) + list_for_each_entry(ctx, &lu_context_remembered, + lc_remember) key_fini(ctx, key->lct_index); spin_unlock(&lu_keys_guard); ++key_set_version; @@ -1721,10 +1719,10 @@ int lu_context_init(struct lu_context *ctx, __u32 tags) ctx->lc_tags = tags; if (tags & LCT_REMEMBER) { spin_lock(&lu_keys_guard); - cfs_list_add(&ctx->lc_remember, &lu_context_remembered); + list_add(&ctx->lc_remember, &lu_context_remembered); spin_unlock(&lu_keys_guard); } else { - CFS_INIT_LIST_HEAD(&ctx->lc_remember); + INIT_LIST_HEAD(&ctx->lc_remember); } rc = keys_init(ctx); @@ -1744,13 +1742,13 @@ void lu_context_fini(struct lu_context *ctx) ctx->lc_state = LCS_FINALIZED; if ((ctx->lc_tags & LCT_REMEMBER) == 0) { - LASSERT(cfs_list_empty(&ctx->lc_remember)); + LASSERT(list_empty(&ctx->lc_remember)); keys_fini(ctx); } else { /* could race with key degister */ spin_lock(&lu_keys_guard); keys_fini(ctx); - cfs_list_del_init(&ctx->lc_remember); + list_del_init(&ctx->lc_remember); spin_unlock(&lu_keys_guard); } } @@ -1924,7 +1922,7 @@ static void lu_site_stats_get(cfs_hash_t *hs, cfs_hash_for_each_bucket(hs, &bd, i) { struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd); - cfs_hlist_head_t *hhead; + struct hlist_head *hhead; cfs_hash_bd_lock(hs, &bd, 1); stats->lss_busy += bkt->lsb_busy; @@ -1937,7 +1935,7 @@ static void lu_site_stats_get(cfs_hash_t *hs, } cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - if (!cfs_hlist_empty(hhead)) + if (!hlist_empty(hhead)) stats->lss_populated++; } cfs_hash_bd_unlock(hs, &bd, 1); @@ -2112,6 +2110,9 @@ int lu_global_init(void) CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys); + INIT_LIST_HEAD(&lu_device_types); + INIT_LIST_HEAD(&lu_context_remembered); + result = lu_ref_global_init(); if (result != 0) return result; diff --git a/lustre/obdclass/lu_ref.c b/lustre/obdclass/lu_ref.c index 1541949..94ad740 100644 --- a/lustre/obdclass/lu_ref.c +++ b/lustre/obdclass/lu_ref.c @@ -89,12 +89,12 @@ static struct lu_kmem_descr lu_ref_caches[] = { * * Protected by lu_ref_refs_guard. */ -static CFS_LIST_HEAD(lu_ref_refs); +static struct list_head lu_ref_refs; static spinlock_t lu_ref_refs_guard; static struct lu_ref lu_ref_marker = { .lf_guard = __SPIN_LOCK_UNLOCKED(lu_ref_marker.lf_guard), - .lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list), - .lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage) + .lf_list = LIST_HEAD_INIT(lu_ref_marker.lf_list), + .lf_linkage = LIST_HEAD_INIT(lu_ref_marker.lf_linkage) }; void lu_ref_print(const struct lu_ref *ref) @@ -103,7 +103,7 @@ void lu_ref_print(const struct lu_ref *ref) CERROR("lu_ref: %p %d %d %s:%d\n", ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line); - cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) { + list_for_each_entry(link, &ref->lf_list, ll_linkage) { CERROR(" link: %s %p\n", link->ll_scope, link->ll_source); } } @@ -119,7 +119,7 @@ void lu_ref_print_all(void) struct lu_ref *ref; spin_lock(&lu_ref_refs_guard); - cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) { + list_for_each_entry(ref, &lu_ref_refs, lf_linkage) { if (lu_ref_is_marker(ref)) continue; @@ -137,19 +137,19 @@ void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line) ref->lf_func = func; ref->lf_line = line; spin_lock_init(&ref->lf_guard); - CFS_INIT_LIST_HEAD(&ref->lf_list); + INIT_LIST_HEAD(&ref->lf_list); spin_lock(&lu_ref_refs_guard); - cfs_list_add(&ref->lf_linkage, &lu_ref_refs); + list_add(&ref->lf_linkage, &lu_ref_refs); spin_unlock(&lu_ref_refs_guard); } EXPORT_SYMBOL(lu_ref_init_loc); void lu_ref_fini(struct lu_ref *ref) { - REFASSERT(ref, cfs_list_empty(&ref->lf_list)); + REFASSERT(ref, list_empty(&ref->lf_list)); REFASSERT(ref, ref->lf_refs == 0); spin_lock(&lu_ref_refs_guard); - cfs_list_del_init(&ref->lf_linkage); + list_del_init(&ref->lf_linkage); spin_unlock(&lu_ref_refs_guard); } EXPORT_SYMBOL(lu_ref_fini); @@ -169,7 +169,7 @@ static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref, link->ll_scope = scope; link->ll_source = source; spin_lock(&ref->lf_guard); - cfs_list_add_tail(&link->ll_linkage, &ref->lf_list); + list_add_tail(&link->ll_linkage, &ref->lf_list); ref->lf_refs++; spin_unlock(&ref->lf_guard); } @@ -199,7 +199,7 @@ void lu_ref_add_at(struct lu_ref *ref, struct lu_ref_link *link, link->ll_scope = scope; link->ll_source = source; spin_lock(&ref->lf_guard); - cfs_list_add_tail(&link->ll_linkage, &ref->lf_list); + list_add_tail(&link->ll_linkage, &ref->lf_list); ref->lf_refs++; spin_unlock(&ref->lf_guard); } @@ -236,7 +236,7 @@ static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope, unsigned iterations; iterations = 0; - cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) { + list_for_each_entry(link, &ref->lf_list, ll_linkage) { ++iterations; if (lu_ref_link_eq(link, scope, source)) { if (iterations > lu_ref_chain_max_length) { @@ -257,7 +257,7 @@ void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source) spin_lock(&ref->lf_guard); link = lu_ref_find(ref, scope, source); if (link != NULL) { - cfs_list_del(&link->ll_linkage); + list_del(&link->ll_linkage); ref->lf_refs--; spin_unlock(&ref->lf_guard); OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link)); @@ -290,7 +290,7 @@ void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link, spin_lock(&ref->lf_guard); REFASSERT(ref, link->ll_ref == ref); REFASSERT(ref, lu_ref_link_eq(link, scope, source)); - cfs_list_del(&link->ll_linkage); + list_del(&link->ll_linkage); ref->lf_refs--; spin_unlock(&ref->lf_guard); } @@ -303,7 +303,7 @@ static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos) struct lu_ref *ref = seq->private; spin_lock(&lu_ref_refs_guard); - if (cfs_list_empty(&ref->lf_linkage)) + if (list_empty(&ref->lf_linkage)) ref = NULL; spin_unlock(&lu_ref_refs_guard); @@ -316,15 +316,15 @@ static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos) struct lu_ref *next; LASSERT(seq->private == p); - LASSERT(!cfs_list_empty(&ref->lf_linkage)); + LASSERT(!list_empty(&ref->lf_linkage)); spin_lock(&lu_ref_refs_guard); - next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage); + next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage); if (&next->lf_linkage == &lu_ref_refs) { p = NULL; } else { (*pos)++; - cfs_list_move(&ref->lf_linkage, &next->lf_linkage); + list_move(&ref->lf_linkage, &next->lf_linkage); } spin_unlock(&lu_ref_refs_guard); return p; @@ -342,7 +342,7 @@ static int lu_ref_seq_show(struct seq_file *seq, void *p) struct lu_ref *next; spin_lock(&lu_ref_refs_guard); - next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage); + next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage); if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) { spin_unlock(&lu_ref_refs_guard); return 0; @@ -359,7 +359,7 @@ static int lu_ref_seq_show(struct seq_file *seq, void *p) struct lu_ref_link *link; int i = 0; - cfs_list_for_each_entry(link, &next->lf_list, ll_linkage) + list_for_each_entry(link, &next->lf_list, ll_linkage) seq_printf(seq, " #%d link: %s %p\n", i++, link->ll_scope, link->ll_source); } @@ -384,10 +384,10 @@ static int lu_ref_seq_open(struct inode *inode, struct file *file) result = seq_open(file, &lu_ref_seq_ops); if (result == 0) { spin_lock(&lu_ref_refs_guard); - if (!cfs_list_empty(&marker->lf_linkage)) + if (!list_empty(&marker->lf_linkage)) result = -EAGAIN; else - cfs_list_add(&marker->lf_linkage, &lu_ref_refs); + list_add(&marker->lf_linkage, &lu_ref_refs); spin_unlock(&lu_ref_refs_guard); if (result == 0) { @@ -406,7 +406,7 @@ static int lu_ref_seq_release(struct inode *inode, struct file *file) struct lu_ref *ref = ((struct seq_file *)file->private_data)->private; spin_lock(&lu_ref_refs_guard); - cfs_list_del_init(&ref->lf_linkage); + list_del_init(&ref->lf_linkage); spin_unlock(&lu_ref_refs_guard); return seq_release(inode, file); @@ -429,6 +429,7 @@ int lu_ref_global_init(void) CDEBUG(D_CONSOLE, "lu_ref tracking is enabled. Performance isn't.\n"); + INIT_LIST_HEAD(&lu_ref_refs); spin_lock_init(&lu_ref_refs_guard); result = lu_kmem_init(lu_ref_caches); diff --git a/lustre/obdclass/lustre_handles.c b/lustre/obdclass/lustre_handles.c index 4833256..e4dc355 100644 --- a/lustre/obdclass/lustre_handles.c +++ b/lustre/obdclass/lustre_handles.c @@ -48,11 +48,11 @@ #include #ifndef __KERNEL__ -# define list_add_rcu cfs_list_add -# define list_del_rcu cfs_list_del -# define list_for_each_rcu cfs_list_for_each -# define list_for_each_safe_rcu cfs_list_for_each_safe -# define list_for_each_entry_rcu cfs_list_for_each_entry +# define list_add_rcu list_add +# define list_del_rcu list_del +# define list_for_each_rcu list_for_each +# define list_for_each_safe_rcu list_for_each_safe +# define list_for_each_entry_rcu list_for_each_entry # define rcu_read_lock() spin_lock(&bucket->lock) # define rcu_read_unlock() spin_unlock(&bucket->lock) #endif /* !__KERNEL__ */ @@ -62,8 +62,8 @@ static __u64 handle_base; static spinlock_t handle_base_lock; static struct handle_bucket { - spinlock_t lock; - cfs_list_t head; + spinlock_t lock; + struct list_head head; } *handle_hash; #define HANDLE_HASH_SIZE (1 << 16) @@ -80,7 +80,7 @@ void class_handle_hash(struct portals_handle *h, ENTRY; LASSERT(h != NULL); - LASSERT(cfs_list_empty(&h->h_link)); + LASSERT(list_empty(&h->h_link)); /* * This is fast, but simplistic cookie generation algorithm, it will @@ -118,7 +118,7 @@ EXPORT_SYMBOL(class_handle_hash); static void class_handle_unhash_nolock(struct portals_handle *h) { - if (cfs_list_empty(&h->h_link)) { + if (list_empty(&h->h_link)) { CERROR("removing an already-removed handle ("LPX64")\n", h->h_cookie); return; @@ -223,7 +223,7 @@ int class_handle_init(void) spin_lock_init(&handle_base_lock); for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash; bucket--) { - CFS_INIT_LIST_HEAD(&bucket->head); + INIT_LIST_HEAD(&bucket->head); spin_lock_init(&bucket->lock); } diff --git a/lustre/obdclass/lustre_peer.c b/lustre/obdclass/lustre_peer.c index 6ee43ab..46581a5 100644 --- a/lustre/obdclass/lustre_peer.c +++ b/lustre/obdclass/lustre_peer.c @@ -50,19 +50,19 @@ #define NIDS_MAX 32 struct uuid_nid_data { - cfs_list_t un_list; - struct obd_uuid un_uuid; - int un_nid_count; - lnet_nid_t un_nids[NIDS_MAX]; + struct list_head un_list; + struct obd_uuid un_uuid; + int un_nid_count; + lnet_nid_t un_nids[NIDS_MAX]; }; /* FIXME: This should probably become more elegant than a global linked list */ -static cfs_list_t g_uuid_list; +static struct list_head g_uuid_list; static spinlock_t g_uuid_lock; void class_init_uuidlist(void) { - CFS_INIT_LIST_HEAD(&g_uuid_list); + INIT_LIST_HEAD(&g_uuid_list); spin_lock_init(&g_uuid_lock); } @@ -80,7 +80,7 @@ int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index) obd_str2uuid(&tmp, uuid); spin_lock(&g_uuid_lock); - cfs_list_for_each_entry(data, &g_uuid_list, un_list) { + list_for_each_entry(data, &g_uuid_list, un_list) { if (obd_uuid_equals(&data->un_uuid, &tmp)) { if (index >= data->un_nid_count) break; @@ -116,7 +116,7 @@ int class_add_uuid(const char *uuid, __u64 nid) data->un_nid_count = 1; spin_lock(&g_uuid_lock); - cfs_list_for_each_entry(entry, &g_uuid_list, un_list) { + list_for_each_entry(entry, &g_uuid_list, un_list) { if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) { int i; @@ -133,7 +133,7 @@ int class_add_uuid(const char *uuid, __u64 nid) } } if (!found) - cfs_list_add(&data->un_list, &g_uuid_list); + list_add(&data->un_list, &g_uuid_list); spin_unlock(&g_uuid_lock); if (found) { @@ -150,43 +150,44 @@ EXPORT_SYMBOL(class_add_uuid); /* Delete the nids for one uuid if specified, otherwise delete all */ int class_del_uuid(const char *uuid) { - CFS_LIST_HEAD(deathrow); struct uuid_nid_data *data; + struct list_head deathrow; + + INIT_LIST_HEAD(&deathrow); spin_lock(&g_uuid_lock); - if (uuid != NULL) { - struct obd_uuid tmp; + if (uuid != NULL) { + struct obd_uuid tmp; - obd_str2uuid(&tmp, uuid); - cfs_list_for_each_entry(data, &g_uuid_list, un_list) { - if (obd_uuid_equals(&data->un_uuid, &tmp)) { - cfs_list_move(&data->un_list, &deathrow); - break; - } - } - } else - cfs_list_splice_init(&g_uuid_list, &deathrow); + obd_str2uuid(&tmp, uuid); + list_for_each_entry(data, &g_uuid_list, un_list) { + if (obd_uuid_equals(&data->un_uuid, &tmp)) { + list_move(&data->un_list, &deathrow); + break; + } + } + } else + list_splice_init(&g_uuid_list, &deathrow); spin_unlock(&g_uuid_lock); - if (uuid != NULL && cfs_list_empty(&deathrow)) { - CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid); - return -EINVAL; - } - - while (!cfs_list_empty(&deathrow)) { - data = cfs_list_entry(deathrow.next, struct uuid_nid_data, - un_list); - cfs_list_del(&data->un_list); + if (uuid != NULL && list_empty(&deathrow)) { + CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid); + return -EINVAL; + } - CDEBUG(D_INFO, "del uuid %s %s/%d\n", - obd_uuid2str(&data->un_uuid), - libcfs_nid2str(data->un_nids[0]), - data->un_nid_count); + while (!list_empty(&deathrow)) { + data = list_entry(deathrow.next, struct uuid_nid_data, + un_list); + list_del(&data->un_list); - OBD_FREE(data, sizeof(*data)); - } + CDEBUG(D_INFO, "del uuid %s %s/%d\n", + obd_uuid2str(&data->un_uuid), + libcfs_nid2str(data->un_nids[0]), + data->un_nid_count); - return 0; + OBD_FREE(data, sizeof(*data)); + } + return 0; } /* check if @nid exists in nid list of @uuid */ @@ -200,7 +201,7 @@ int class_check_uuid(struct obd_uuid *uuid, __u64 nid) obd_uuid2str(uuid), libcfs_nid2str(nid)); spin_lock(&g_uuid_lock); - cfs_list_for_each_entry(entry, &g_uuid_list, un_list) { + list_for_each_entry(entry, &g_uuid_list, un_list) { int i; if (!obd_uuid_equals(&entry->un_uuid, uuid)) diff --git a/lustre/obdclass/obd_config.c b/lustre/obdclass/obd_config.c index 3914193..c080610 100644 --- a/lustre/obdclass/obd_config.c +++ b/lustre/obdclass/obd_config.c @@ -388,11 +388,11 @@ int class_attach(struct lustre_cfg *lcfg) obd->obd_pool_limit = 0; obd->obd_pool_slv = 0; - CFS_INIT_LIST_HEAD(&obd->obd_exports); - CFS_INIT_LIST_HEAD(&obd->obd_unlinked_exports); - CFS_INIT_LIST_HEAD(&obd->obd_delayed_exports); - CFS_INIT_LIST_HEAD(&obd->obd_exports_timed); - CFS_INIT_LIST_HEAD(&obd->obd_nid_stats); + INIT_LIST_HEAD(&obd->obd_exports); + INIT_LIST_HEAD(&obd->obd_unlinked_exports); + INIT_LIST_HEAD(&obd->obd_delayed_exports); + INIT_LIST_HEAD(&obd->obd_exports_timed); + INIT_LIST_HEAD(&obd->obd_nid_stats); spin_lock_init(&obd->obd_nid_lock); spin_lock_init(&obd->obd_dev_lock); mutex_init(&obd->obd_dev_mutex); @@ -408,10 +408,10 @@ int class_attach(struct lustre_cfg *lcfg) spin_lock_init(&obd->obd_recovery_task_lock); init_waitqueue_head(&obd->obd_next_transno_waitq); init_waitqueue_head(&obd->obd_evict_inprogress_waitq); - CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue); - CFS_INIT_LIST_HEAD(&obd->obd_lock_replay_queue); - CFS_INIT_LIST_HEAD(&obd->obd_final_req_queue); - CFS_INIT_LIST_HEAD(&obd->obd_evict_list); + INIT_LIST_HEAD(&obd->obd_req_replay_queue); + INIT_LIST_HEAD(&obd->obd_lock_replay_queue); + INIT_LIST_HEAD(&obd->obd_final_req_queue); + INIT_LIST_HEAD(&obd->obd_evict_list); INIT_LIST_HEAD(&obd->obd_lwp_list); llog_group_init(&obd->obd_olg); @@ -528,7 +528,7 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg) GOTO(err_hash, err = PTR_ERR(exp)); obd->obd_self_export = exp; - cfs_list_del_init(&exp->exp_obd_chain_timed); + list_del_init(&exp->exp_obd_chain_timed); class_export_put(exp); err = obd_setup(obd, lcfg); @@ -823,14 +823,15 @@ int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg) RETURN(rc); } -CFS_LIST_HEAD(lustre_profile_list); +struct list_head lustre_profile_list = + LIST_HEAD_INIT(lustre_profile_list); struct lustre_profile *class_get_profile(const char * prof) { struct lustre_profile *lprof; ENTRY; - cfs_list_for_each_entry(lprof, &lustre_profile_list, lp_list) { + list_for_each_entry(lprof, &lustre_profile_list, lp_list) { if (!strcmp(lprof->lp_profile, prof)) { RETURN(lprof); } @@ -855,7 +856,7 @@ int class_add_profile(int proflen, char *prof, int osclen, char *osc, OBD_ALLOC(lprof, sizeof(*lprof)); if (lprof == NULL) RETURN(-ENOMEM); - CFS_INIT_LIST_HEAD(&lprof->lp_list); + INIT_LIST_HEAD(&lprof->lp_list); LASSERT(proflen == (strlen(prof) + 1)); OBD_ALLOC(lprof->lp_profile, proflen); @@ -877,7 +878,7 @@ int class_add_profile(int proflen, char *prof, int osclen, char *osc, memcpy(lprof->lp_md, mdc, mdclen); } - cfs_list_add(&lprof->lp_list, &lustre_profile_list); + list_add(&lprof->lp_list, &lustre_profile_list); RETURN(err); out: @@ -900,7 +901,7 @@ void class_del_profile(const char *prof) lprof = class_get_profile(prof); if (lprof) { - cfs_list_del(&lprof->lp_list); + list_del(&lprof->lp_list); OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1); OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1); if (lprof->lp_md) @@ -917,8 +918,8 @@ void class_del_profiles(void) struct lustre_profile *lprof, *n; ENTRY; - cfs_list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) { - cfs_list_del(&lprof->lp_list); + list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) { + list_del(&lprof->lp_list); OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1); OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1); if (lprof->lp_md) @@ -2033,13 +2034,13 @@ uuid_hash(cfs_hash_t *hs, const void *key, unsigned mask) } static void * -uuid_key(cfs_hlist_node_t *hnode) +uuid_key(struct hlist_node *hnode) { - struct obd_export *exp; + struct obd_export *exp; - exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash); + exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash); - return &exp->exp_client_uuid; + return &exp->exp_client_uuid; } /* @@ -2047,38 +2048,38 @@ uuid_key(cfs_hlist_node_t *hnode) * state with this function */ static int -uuid_keycmp(const void *key, cfs_hlist_node_t *hnode) +uuid_keycmp(const void *key, struct hlist_node *hnode) { struct obd_export *exp; LASSERT(key); - exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash); + exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash); return obd_uuid_equals(key, &exp->exp_client_uuid) && !exp->exp_failed; } static void * -uuid_export_object(cfs_hlist_node_t *hnode) +uuid_export_object(struct hlist_node *hnode) { - return cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash); + return hlist_entry(hnode, struct obd_export, exp_uuid_hash); } static void -uuid_export_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode) { struct obd_export *exp; - exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash); + exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash); class_export_get(exp); } static void -uuid_export_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +uuid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) { struct obd_export *exp; - exp = cfs_hlist_entry(hnode, struct obd_export, exp_uuid_hash); + exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash); class_export_put(exp); } @@ -2103,11 +2104,11 @@ nid_hash(cfs_hash_t *hs, const void *key, unsigned mask) } static void * -nid_key(cfs_hlist_node_t *hnode) +nid_key(struct hlist_node *hnode) { struct obd_export *exp; - exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash); + exp = hlist_entry(hnode, struct obd_export, exp_nid_hash); RETURN(&exp->exp_connection->c_peer.nid); } @@ -2117,38 +2118,38 @@ nid_key(cfs_hlist_node_t *hnode) * state with this function */ static int -nid_kepcmp(const void *key, cfs_hlist_node_t *hnode) +nid_kepcmp(const void *key, struct hlist_node *hnode) { struct obd_export *exp; LASSERT(key); - exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash); + exp = hlist_entry(hnode, struct obd_export, exp_nid_hash); RETURN(exp->exp_connection->c_peer.nid == *(lnet_nid_t *)key && !exp->exp_failed); } static void * -nid_export_object(cfs_hlist_node_t *hnode) +nid_export_object(struct hlist_node *hnode) { - return cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash); + return hlist_entry(hnode, struct obd_export, exp_nid_hash); } static void -nid_export_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode) { struct obd_export *exp; - exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash); + exp = hlist_entry(hnode, struct obd_export, exp_nid_hash); class_export_get(exp); } static void -nid_export_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +nid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) { struct obd_export *exp; - exp = cfs_hlist_entry(hnode, struct obd_export, exp_nid_hash); + exp = hlist_entry(hnode, struct obd_export, exp_nid_hash); class_export_put(exp); } @@ -2167,42 +2168,42 @@ static cfs_hash_ops_t nid_hash_ops = { */ static void * -nidstats_key(cfs_hlist_node_t *hnode) +nidstats_key(struct hlist_node *hnode) { struct nid_stat *ns; - ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash); + ns = hlist_entry(hnode, struct nid_stat, nid_hash); return &ns->nid; } static int -nidstats_keycmp(const void *key, cfs_hlist_node_t *hnode) +nidstats_keycmp(const void *key, struct hlist_node *hnode) { return *(lnet_nid_t *)nidstats_key(hnode) == *(lnet_nid_t *)key; } static void * -nidstats_object(cfs_hlist_node_t *hnode) +nidstats_object(struct hlist_node *hnode) { - return cfs_hlist_entry(hnode, struct nid_stat, nid_hash); + return hlist_entry(hnode, struct nid_stat, nid_hash); } static void -nidstats_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode) { struct nid_stat *ns; - ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash); + ns = hlist_entry(hnode, struct nid_stat, nid_hash); nidstat_getref(ns); } static void -nidstats_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +nidstats_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) { struct nid_stat *ns; - ns = cfs_hlist_entry(hnode, struct nid_stat, nid_hash); + ns = hlist_entry(hnode, struct nid_stat, nid_hash); nidstat_putref(ns); } diff --git a/lustre/obdclass/obd_mount_server.c b/lustre/obdclass/obd_mount_server.c index 0cefea2..fba448d 100644 --- a/lustre/obdclass/obd_mount_server.c +++ b/lustre/obdclass/obd_mount_server.c @@ -64,17 +64,18 @@ /*********** mount lookup *********/ DEFINE_MUTEX(lustre_mount_info_lock); -static CFS_LIST_HEAD(server_mount_info_list); +static struct list_head server_mount_info_list = + LIST_HEAD_INIT(server_mount_info_list); static struct lustre_mount_info *server_find_mount(const char *name) { - cfs_list_t *tmp; + struct list_head *tmp; struct lustre_mount_info *lmi; ENTRY; - cfs_list_for_each(tmp, &server_mount_info_list) { - lmi = cfs_list_entry(tmp, struct lustre_mount_info, - lmi_list_chain); + list_for_each(tmp, &server_mount_info_list) { + lmi = list_entry(tmp, struct lustre_mount_info, + lmi_list_chain); if (strcmp(name, lmi->lmi_name) == 0) RETURN(lmi); } @@ -113,7 +114,7 @@ static int server_register_mount(const char *name, struct super_block *sb) } lmi->lmi_name = name_cp; lmi->lmi_sb = sb; - cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list); + list_add(&lmi->lmi_list_chain, &server_mount_info_list); mutex_unlock(&lustre_mount_info_lock); @@ -139,7 +140,7 @@ static int server_deregister_mount(const char *name) CDEBUG(D_MOUNT, "deregister mount %p from %s\n", lmi->lmi_sb, name); OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1); - cfs_list_del(&lmi->lmi_list_chain); + list_del(&lmi->lmi_list_chain); OBD_FREE(lmi, sizeof(*lmi)); mutex_unlock(&lustre_mount_info_lock); @@ -374,7 +375,8 @@ cleanup: } EXPORT_SYMBOL(tgt_name2lwp_name); -static CFS_LIST_HEAD(lwp_register_list); +static struct list_head lwp_register_list = + LIST_HEAD_INIT(lwp_register_list); DEFINE_MUTEX(lwp_register_list_lock); int lustre_register_lwp_item(const char *lwpname, struct obd_export **exp, @@ -413,8 +415,8 @@ int lustre_register_lwp_item(const char *lwpname, struct obd_export **exp, lri->lri_exp = exp; lri->lri_cb_func = cb_func; lri->lri_cb_data = cb_data; - CFS_INIT_LIST_HEAD(&lri->lri_list); - cfs_list_add(&lri->lri_list, &lwp_register_list); + INIT_LIST_HEAD(&lri->lri_list); + list_add(&lri->lri_list, &lwp_register_list); if (*exp != NULL && cb_func != NULL) cb_func(cb_data); @@ -429,11 +431,11 @@ void lustre_deregister_lwp_item(struct obd_export **exp) struct lwp_register_item *lri, *tmp; mutex_lock(&lwp_register_list_lock); - cfs_list_for_each_entry_safe(lri, tmp, &lwp_register_list, lri_list) { + list_for_each_entry_safe(lri, tmp, &lwp_register_list, lri_list) { if (exp == lri->lri_exp) { if (*exp) class_export_put(*exp); - cfs_list_del(&lri->lri_list); + list_del(&lri->lri_list); OBD_FREE_PTR(lri); break; } @@ -489,7 +491,7 @@ static void lustre_notify_lwp_list(struct obd_export *exp) LASSERT(exp != NULL); mutex_lock(&lwp_register_list_lock); - cfs_list_for_each_entry_safe(lri, tmp, &lwp_register_list, lri_list) { + list_for_each_entry_safe(lri, tmp, &lwp_register_list, lri_list) { if (strcmp(exp->exp_obd->obd_name, lri->lri_name)) continue; if (*lri->lri_exp != NULL) -- 1.8.3.1