From 148a7fba3d289caf053db937c8a08f63528dada1 Mon Sep 17 00:00:00 2001 From: James Simmons Date: Wed, 27 Nov 2013 13:35:21 -0500 Subject: [PATCH] LU-3963 libcfs: cleanup list operations Cleanup list operations (libcfs/include/libcfs/list.h) All typed list operations for the libcfs core are converted to the kernel APIs. Signed-off-by: Liu Xuezhao Signed-off-by: Peng Tao Signed-off-by: James Simmons Change-Id: Ibcb92e5133312e8db4fdf2f9fa1b68a8c68a2e74 Reviewed-on: http://review.whamcloud.com/4780 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger Reviewed-by: Peng Tao --- contrib/scripts/libcfs_cleanup.sed | 96 +++++ libcfs/include/libcfs/darwin/darwin-sync.h | 2 +- libcfs/include/libcfs/libcfs_hash.h | 204 +++++----- libcfs/include/libcfs/libcfs_ioctl.h | 14 +- libcfs/include/libcfs/libcfs_private.h | 26 +- libcfs/include/libcfs/libcfs_string.h | 20 +- libcfs/include/libcfs/libcfs_workitem.h | 30 +- libcfs/include/libcfs/linux/kp30.h | 18 +- libcfs/include/libcfs/list.h | 486 ++++++++++------------ libcfs/include/libcfs/lucache.h | 14 +- libcfs/include/libcfs/posix/libcfs.h | 97 +++-- libcfs/include/libcfs/user-mem.h | 20 +- libcfs/include/libcfs/user-prim.h | 8 +- libcfs/include/libcfs/winnt/winnt-mem.h | 8 +- libcfs/include/libcfs/winnt/winnt-prim.h | 29 +- libcfs/include/libcfs/winnt/winnt-tcpip.h | 97 +++-- libcfs/libcfs/darwin/darwin-mem.c | 66 +-- libcfs/libcfs/darwin/darwin-prim.c | 98 +++-- libcfs/libcfs/darwin/darwin-sync.c | 4 +- libcfs/libcfs/darwin/darwin-tracefile.c | 10 +- libcfs/libcfs/hash.c | 621 ++++++++++++++--------------- libcfs/libcfs/kernel_user_comm.c | 50 +-- libcfs/libcfs/libcfs_string.c | 38 +- libcfs/libcfs/linux/linux-cpu.c | 2 +- libcfs/libcfs/lwt.c | 53 ++- libcfs/libcfs/module.c | 60 +-- libcfs/libcfs/nidstrings.c | 248 ++++++------ libcfs/libcfs/tracefile.c | 345 ++++++++-------- libcfs/libcfs/tracefile.h | 26 +- libcfs/libcfs/upcall_cache.c | 156 ++++---- libcfs/libcfs/user-prim.c | 4 +- libcfs/libcfs/watchdog.c | 80 ++-- libcfs/libcfs/winnt/winnt-mem.c | 14 +- libcfs/libcfs/winnt/winnt-prim.c | 42 +- libcfs/libcfs/winnt/winnt-proc.c | 26 +- libcfs/libcfs/winnt/winnt-sync.c | 15 +- libcfs/libcfs/winnt/winnt-tcpip.c | 101 +++-- libcfs/libcfs/workitem.c | 121 +++--- lustre/ptlrpc/nrs.c | 6 - lustre/ptlrpc/ptlrpc_internal.h | 7 - 40 files changed, 1680 insertions(+), 1682 deletions(-) diff --git a/contrib/scripts/libcfs_cleanup.sed b/contrib/scripts/libcfs_cleanup.sed index 556892c..7d5aa99 100644 --- a/contrib/scripts/libcfs_cleanup.sed +++ b/contrib/scripts/libcfs_cleanup.sed @@ -631,3 +631,99 @@ s/\bcfs_atomic_sub_return\b/atomic_sub_return/g /#[ \t]*define[ \t]*\batomic_sub_return\b *( *\w* *, *\w* *)[ \t]*\batomic_sub_return\b *( *\w* *, *\w* *)/d s/\bCFS_ATOMIC_INIT\b/ATOMIC_INIT/g /#[ \t]*define[ \t]*\bATOMIC_INIT\b *( *\w* *)[ \t]*\bATOMIC_INIT\b *( *\w* *)/d + +################################################################################ +# list operations +s/\bcfs_list_t\b/struct list_head/g +s/\b__cfs_list_add\b/__list_add/g +/#[ \t]*define[ \t]*\b__list_add\b *(.*)[ \t]*\b__list_add\b *(.*)/d +s/\bcfs_list_add\b/list_add/g +/#[ \t]*define[ \t]*\blist_add\b *(.*)[ \t]*\blist_add\b *(.*)/d +s/\bcfs_list_add_tail\b/list_add_tail/g +/#[ \t]*define[ \t]*\blist_add_tail\b *(.*)[ \t]*\blist_add_tail\b *(.*)/d +s/\b__cfs_list_del\b/__list_del/g +/#[ \t]*define[ \t]*\b__list_del\b *(.*)[ \t]*\b__list_del\b *(.*)/d +s/\bcfs_list_del\b/list_del/g +/#[ \t]*define[ \t]*\blist_del\b *(.*)[ \t]*\blist_del\b *(.*)/d +s/\bcfs_list_del_init\b/list_del_init/g +/#[ \t]*define[ \t]*\blist_del_init\b *(.*)[ \t]*\blist_del_init\b *(.*)/d +s/\bcfs_list_move\b/list_move/g +/#[ \t]*define[ \t]*\blist_move\b *(.*)[ \t]*\blist_move\b *(.*)/d +s/\bcfs_list_move_tail\b/list_move_tail/g +/#[ \t]*define[ \t]*\blist_move_tail\b *(.*)[ \t]*\blist_move_tail\b *(.*)/d +s/\bcfs_list_empty\b/list_empty/g +/#[ \t]*define[ \t]*\blist_empty\b *(.*)[ \t]*\blist_empty\b *(.*)/d +s/\bcfs_list_empty_careful\b/list_empty_careful/g +/#[ \t]*define[ \t]*\blist_empty_careful\b *(.*)[ \t]*\blist_empty_careful\b *(.*)/d +s/\b__cfs_list_splice\b/__list_splice/g +/#[ \t]*define[ \t]*\b__list_splice\b *(.*)[ \t]*\b__list_splice\b *(.*)/d +s/\bcfs_list_splice\b/list_splice/g +/#[ \t]*define[ \t]*\blist_splice\b *(.*)[ \t]*\blist_splice\b *(.*)/d +s/\bcfs_list_splice_init\b/list_splice_init/g +/#[ \t]*define[ \t]*\blist_splice_init\b *(.*)[ \t]*\blist_splice_init\b *(.*)/d +s/\bcfs_list_entry\b/list_entry/g +/#[ \t]*define[ \t]*\blist_entry\b *(.*)[ \t]*\blist_entry\b *(.*)/d +s/\bcfs_list_for_each\b/list_for_each/g +/#[ \t]*define[ \t]*\blist_for_each\b *(.*)[ \t]*\blist_for_each\b *(.*)/d +s/\bcfs_list_for_each_safe\b/list_for_each_safe/g +/#[ \t]*define[ \t]*\blist_for_each_safe\b *(.*)[ \t]*\blist_for_each_safe\b *(.*)/d +s/\bcfs_list_for_each_prev\b/list_for_each_prev/g +/#[ \t]*define[ \t]*\blist_for_each_prev\b *(.*)[ \t]*\blist_for_each_prev\b *(.*)/d +s/\bcfs_list_for_each_entry\b/list_for_each_entry/g +/#[ \t]*define[ \t]*\blist_for_each_entry\b *(.*)[ \t]*\blist_for_each_entry\b *(.*)/d +s/\bcfs_list_for_each_entry_reverse\b/list_for_each_entry_reverse/g +/#[ \t]*define[ \t]*\blist_for_each_entry_reverse\b *(.*)[ \t]*\blist_for_each_entry_reverse\b *(.*)/d +s/\bcfs_list_for_each_entry_safe_reverse\b/list_for_each_entry_safe_reverse/g +/#[ \t]*define[ \t]*\blist_for_each_entry_safe_reverse\b *(.*)[ \t]*\blist_for_each_entry_safe_reverse\b *(.*)/d +s/\bcfs_list_for_each_entry_safe\b/list_for_each_entry_safe/g +/#[ \t]*define[ \t]*\blist_for_each_entry_safe\b *(.*)[ \t]*\blist_for_each_entry_safe\b *(.*)/d +s/\bcfs_list_for_each_entry_safe_from\b/list_for_each_entry_safe_from/g +/#[ \t]*define[ \t]*\blist_for_each_entry_safe_from\b *(.*)[ \t]*\blist_for_each_entry_safe_from\b *(.*)/d +s/\bcfs_list_for_each_entry_continue\b/list_for_each_entry_continue/g +/#[ \t]*define[ \t]*\blist_for_each_entry_continue\b *(.*)[ \t]*\blist_for_each_entry_continue\b *(.*)/d +# LIST_HEAD defined in /usr/include/sys/queue.h +s/\bCFS_LIST_HEAD_INIT\b/LIST_HEAD_INIT/g +/#[ \t]*define[ \t]*\bLIST_HEAD_INIT\b *(.*)[ \t]*\bLIST_HEAD_INIT\b *(.*)/d +s/\bCFS_LIST_HEAD\b/LIST_HEAD/g +/#[ \t]*define[ \t]*\bLIST_HEAD\b *(.*)[ \t]*\bLIST_HEAD\b *(.*)/d +s/\bCFS_INIT_LIST_HEAD\b/INIT_LIST_HEAD/g +/#[ \t]*define[ \t]*\bINIT_LIST_HEAD\b *(.*)[ \t]*\bINIT_LIST_HEAD\b *(.*)/d +s/\bcfs_hlist_head_t\b/struct hlist_head/g +s/\bcfs_hlist_node_t\b/struct hlist_node/g +s/\bcfs_hlist_unhashed\b/hlist_unhashed/g +/#[ \t]*define[ \t]*\bhlist_unhashed\b *(.*)[ \t]*\bhlist_unhashed\b *(.*)/d +s/\bcfs_hlist_empty\b/hlist_empty/g +/#[ \t]*define[ \t]*\bhlist_empty\b *(.*)[ \t]*\bhlist_empty\b *(.*)/d +s/\b__cfs_hlist_del\b/__hlist_del/g +/#[ \t]*define[ \t]*\b__hlist_del\b *(.*)[ \t]*\b__hlist_del\b *(.*)/d +s/\bcfs_hlist_del\b/hlist_del/g +/#[ \t]*define[ \t]*\bhlist_del\b *(.*)[ \t]*\bhlist_del\b *(.*)/d +s/\bcfs_hlist_del_init\b/hlist_del_init/g +/#[ \t]*define[ \t]*\bhlist_del_init\b *(.*)[ \t]*\bhlist_del_init\b *(.*)/d +s/\bcfs_hlist_add_head\b/hlist_add_head/g +/#[ \t]*define[ \t]*\bhlist_add_head\b *(.*)[ \t]*\bhlist_add_head\b *(.*)/d +s/\bcfs_hlist_add_before\b/hlist_add_before/g +/#[ \t]*define[ \t]*\bhlist_add_before\b *(.*)[ \t]*\bhlist_add_before\b *(.*)/d +s/\bcfs_hlist_add_after\b/hlist_add_after/g +/#[ \t]*define[ \t]*\bhlist_add_after\b *(.*)[ \t]*\bhlist_add_after\b *(.*)/d +s/\bcfs_hlist_entry\b/hlist_entry/g +/#[ \t]*define[ \t]*\bhlist_entry\b *(.*)[ \t]*\bhlist_entry\b *(.*)/d +s/\bcfs_hlist_for_each\b/hlist_for_each/g +/#[ \t]*define[ \t]*\bhlist_for_each\b *(.*)[ \t]*\bhlist_for_each\b *(.*)/d +s/\bcfs_hlist_for_each_safe\b/hlist_for_each_safe/g +/#[ \t]*define[ \t]*\bhlist_for_each_safe\b *(.*)[ \t]*\bhlist_for_each_safe\b *(.*)/d +s/\bcfs_hlist_for_each_entry_continue\b/hlist_for_each_entry_continue/g +/#[ \t]*define[ \t]*\bhlist_for_each_entry_continue\b *(.*)[ \t]*\bhlist_for_each_entry_continue\b *(.*)/d +s/\bcfs_hlist_for_each_entry_from\b/hlist_for_each_entry_from/g +/#[ \t]*define[ \t]*\bhlist_for_each_entry_from\b *(.*)[ \t]*\bhlist_for_each_entry_from\b *(.*)/d +s/\bCFS_HLIST_HEAD_INIT\b/HLIST_HEAD_INIT/g +/#[ \t]*define[ \t]*\bHLIST_HEAD_INIT\b[ \t]*\bHLIST_HEAD_INIT\b/d +s/\bCFS_HLIST_HEAD\b/HLIST_HEAD/g +/#[ \t]*define[ \t]*\bHLIST_HEAD\b *(.*)[ \t]*\bHLIST_HEAD\b *(.*)/d +s/\bCFS_INIT_HLIST_HEAD\b/INIT_HLIST_HEAD/g +/#[ \t]*define[ \t]*\bINIT_HLIST_HEAD\b *(.*)[ \t]*\bINIT_HLIST_HEAD\b *(.*)/d +s/\bCFS_INIT_HLIST_NODE\b/INIT_HLIST_NODE/g +/#[ \t]*define[ \t]*\bINIT_HLIST_NODE\b *(.*)[ \t]*\bINIT_HLIST_NODE\b *(.*)/d +s/\bcfs_list_for_each_entry_safe_from\b/list_for_each_entry_safe_from/g +/cfs_list_for_each_entry_typed/{;N;s/\(cfs_list_for_each_entry_typed\)\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*/list_for_each_entry\2 \3 /} +/cfs_list_for_each_entry_safe_typed/{;N;s/\(cfs_list_for_each_entry_safe_typed\)\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*/list_for_each_entry_safe\2 \3 \4 /} diff --git a/libcfs/include/libcfs/darwin/darwin-sync.h b/libcfs/include/libcfs/darwin/darwin-sync.h index bca8d72..10e471a 100644 --- a/libcfs/include/libcfs/darwin/darwin-sync.h +++ b/libcfs/include/libcfs/darwin/darwin-sync.h @@ -302,7 +302,7 @@ void ksleep_wake_nr(struct ksleep_chan *chan, int nr); .flags = 0, \ .event = 0, \ .hits = 0, \ - .linkage = CFS_LIST_HEAD(name.linkage), \ + .linkage = LIST_HEAD_INIT(name.linkage),\ .magic = KSLEEP_LINK_MAGIC \ } diff --git a/libcfs/include/libcfs/libcfs_hash.h b/libcfs/include/libcfs/libcfs_hash.h index 5e7c9de..e7d2dc8 100644 --- a/libcfs/include/libcfs/libcfs_hash.h +++ b/libcfs/include/libcfs/libcfs_hash.h @@ -337,40 +337,40 @@ typedef struct cfs_hash_lock_ops { } cfs_hash_lock_ops_t; typedef struct cfs_hash_hlist_ops { - /** return hlist_head of hash-head of @bd */ - cfs_hlist_head_t *(*hop_hhead)(cfs_hash_t *hs, cfs_hash_bd_t *bd); - /** return hash-head size */ - int (*hop_hhead_size)(cfs_hash_t *hs); - /** add @hnode to hash-head of @bd */ - int (*hop_hnode_add)(cfs_hash_t *hs, - cfs_hash_bd_t *bd, cfs_hlist_node_t *hnode); - /** remove @hnode from hash-head of @bd */ - int (*hop_hnode_del)(cfs_hash_t *hs, - cfs_hash_bd_t *bd, cfs_hlist_node_t *hnode); + /** return hlist_head of hash-head of @bd */ + struct hlist_head *(*hop_hhead)(cfs_hash_t *hs, cfs_hash_bd_t *bd); + /** return hash-head size */ + int (*hop_hhead_size)(cfs_hash_t *hs); + /** add @hnode to hash-head of @bd */ + int (*hop_hnode_add)(cfs_hash_t *hs, cfs_hash_bd_t *bd, + struct hlist_node *hnode); + /** remove @hnode from hash-head of @bd */ + int (*hop_hnode_del)(cfs_hash_t *hs, cfs_hash_bd_t *bd, + struct hlist_node *hnode); } cfs_hash_hlist_ops_t; typedef struct cfs_hash_ops { - /** return hashed value from @key */ - unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask); - /** return key address of @hnode */ - void * (*hs_key)(cfs_hlist_node_t *hnode); - /** copy key from @hnode to @key */ - void (*hs_keycpy)(cfs_hlist_node_t *hnode, void *key); + /** return hashed value from @key */ + unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask); + /** return key address of @hnode */ + void * (*hs_key)(struct hlist_node *hnode); + /** copy key from @hnode to @key */ + void (*hs_keycpy)(struct hlist_node *hnode, void *key); /** * compare @key with key of @hnode * returns 1 on a match */ - int (*hs_keycmp)(const void *key, cfs_hlist_node_t *hnode); - /** return object address of @hnode, i.e: container_of(...hnode) */ - void * (*hs_object)(cfs_hlist_node_t *hnode); - /** get refcount of item, always called with holding bucket-lock */ - void (*hs_get)(cfs_hash_t *hs, cfs_hlist_node_t *hnode); - /** release refcount of item */ - void (*hs_put)(cfs_hash_t *hs, cfs_hlist_node_t *hnode); - /** release refcount of item, always called with holding bucket-lock */ - void (*hs_put_locked)(cfs_hash_t *hs, cfs_hlist_node_t *hnode); - /** it's called before removing of @hnode */ - void (*hs_exit)(cfs_hash_t *hs, cfs_hlist_node_t *hnode); + int (*hs_keycmp)(const void *key, struct hlist_node *hnode); + /** return object address of @hnode, i.e: container_of(...hnode) */ + void * (*hs_object)(struct hlist_node *hnode); + /** get refcount of item, always called with holding bucket-lock */ + void (*hs_get)(cfs_hash_t *hs, struct hlist_node *hnode); + /** release refcount of item */ + void (*hs_put)(cfs_hash_t *hs, struct hlist_node *hnode); + /** release refcount of item, always called with holding bucket-lock */ + void (*hs_put_locked)(cfs_hash_t *hs, struct hlist_node *hnode); + /** it's called before removing of @hnode */ + void (*hs_exit)(cfs_hash_t *hs, struct hlist_node *hnode); } cfs_hash_ops_t; /** total number of buckets in @hs */ @@ -512,64 +512,64 @@ cfs_hash_bkt_size(cfs_hash_t *hs) static inline unsigned cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask) { - return CFS_HOP(hs, hash)(hs, key, mask); + return CFS_HOP(hs, hash)(hs, key, mask); } static inline void * -cfs_hash_key(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode) { - return CFS_HOP(hs, key)(hnode); + return CFS_HOP(hs, key)(hnode); } static inline void -cfs_hash_keycpy(cfs_hash_t *hs, cfs_hlist_node_t *hnode, void *key) +cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key) { - if (CFS_HOP(hs, keycpy) != NULL) - CFS_HOP(hs, keycpy)(hnode, key); + if (CFS_HOP(hs, keycpy) != NULL) + CFS_HOP(hs, keycpy)(hnode, key); } /** * Returns 1 on a match, */ static inline int -cfs_hash_keycmp(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode) +cfs_hash_keycmp(cfs_hash_t *hs, const void *key, struct hlist_node *hnode) { - return CFS_HOP(hs, keycmp)(key, hnode); + return CFS_HOP(hs, keycmp)(key, hnode); } static inline void * -cfs_hash_object(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +cfs_hash_object(cfs_hash_t *hs, struct hlist_node *hnode) { - return CFS_HOP(hs, object)(hnode); + return CFS_HOP(hs, object)(hnode); } static inline void -cfs_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode) { - return CFS_HOP(hs, get)(hs, hnode); + return CFS_HOP(hs, get)(hs, hnode); } static inline void -cfs_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) { - LASSERT(CFS_HOP(hs, put_locked) != NULL); + LASSERT(CFS_HOP(hs, put_locked) != NULL); - return CFS_HOP(hs, put_locked)(hs, hnode); + return CFS_HOP(hs, put_locked)(hs, hnode); } static inline void -cfs_hash_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode) { - LASSERT(CFS_HOP(hs, put) != NULL); + LASSERT(CFS_HOP(hs, put) != NULL); - return CFS_HOP(hs, put)(hs, hnode); + return CFS_HOP(hs, put)(hs, hnode); } static inline void -cfs_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode) { - if (CFS_HOP(hs, exit)) - CFS_HOP(hs, exit)(hs, hnode); + if (CFS_HOP(hs, exit)) + CFS_HOP(hs, exit)(hs, hnode); } static inline void cfs_hash_lock(cfs_hash_t *hs, int excl) @@ -666,11 +666,11 @@ cfs_hash_bd_compare(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2) } void cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode); + struct hlist_node *hnode); void cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode); + struct hlist_node *hnode); void cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old, - cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode); + cfs_hash_bd_t *bd_new, struct hlist_node *hnode); static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd, atomic_t *condition) @@ -679,23 +679,23 @@ static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd, return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin); } -static inline cfs_hlist_head_t *cfs_hash_bd_hhead(cfs_hash_t *hs, +static inline struct hlist_head *cfs_hash_bd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd) { - return hs->hs_hops->hop_hhead(hs, bd); + return hs->hs_hops->hop_hhead(hs, bd); } -cfs_hlist_node_t *cfs_hash_bd_lookup_locked(cfs_hash_t *hs, - cfs_hash_bd_t *bd, const void *key); -cfs_hlist_node_t *cfs_hash_bd_peek_locked(cfs_hash_t *hs, - cfs_hash_bd_t *bd, const void *key); -cfs_hlist_node_t *cfs_hash_bd_findadd_locked(cfs_hash_t *hs, - cfs_hash_bd_t *bd, const void *key, - cfs_hlist_node_t *hnode, - int insist_add); -cfs_hlist_node_t *cfs_hash_bd_finddel_locked(cfs_hash_t *hs, - cfs_hash_bd_t *bd, const void *key, - cfs_hlist_node_t *hnode); +struct hlist_node *cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, + const void *key); +struct hlist_node *cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, + const void *key); +struct hlist_node *cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, + const void *key, + struct hlist_node *hnode, + int insist_add); +struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, + const void *key, + struct hlist_node *hnode); /** * operations on cfs_hash bucket (bd: bucket descriptor), @@ -706,65 +706,63 @@ void cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl); void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl); static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key, - cfs_hash_bd_t *bds, int excl) + cfs_hash_bd_t *bds, int excl) { - cfs_hash_dual_bd_get(hs, key, bds); - cfs_hash_dual_bd_lock(hs, bds, excl); + cfs_hash_dual_bd_get(hs, key, bds); + cfs_hash_dual_bd_lock(hs, bds, excl); } -cfs_hlist_node_t *cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, - cfs_hash_bd_t *bds, - const void *key); -cfs_hlist_node_t *cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, - cfs_hash_bd_t *bds, - const void *key, - cfs_hlist_node_t *hnode, - int insist_add); -cfs_hlist_node_t *cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, - cfs_hash_bd_t *bds, - const void *key, - cfs_hlist_node_t *hnode); +struct hlist_node * +cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds, + const void *key); +struct hlist_node * +cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds, + const void *key, struct hlist_node *hnode, + int insist_add); +struct hlist_node * +cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds, + const void *key, struct hlist_node *hnode); /* Hash init/cleanup functions */ cfs_hash_t *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, - unsigned bkt_bits, unsigned extra_bytes, - unsigned min_theta, unsigned max_theta, - cfs_hash_ops_t *ops, unsigned flags); + unsigned bkt_bits, unsigned extra_bytes, + unsigned min_theta, unsigned max_theta, + cfs_hash_ops_t *ops, unsigned flags); cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs); void cfs_hash_putref(cfs_hash_t *hs); /* Hash addition functions */ void cfs_hash_add(cfs_hash_t *hs, const void *key, - cfs_hlist_node_t *hnode); + struct hlist_node *hnode); int cfs_hash_add_unique(cfs_hash_t *hs, const void *key, - cfs_hlist_node_t *hnode); + struct hlist_node *hnode); void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key, - cfs_hlist_node_t *hnode); + struct hlist_node *hnode); /* Hash deletion functions */ -void *cfs_hash_del(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode); +void *cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode); void *cfs_hash_del_key(cfs_hash_t *hs, const void *key); /* Hash lookup/for_each functions */ #define CFS_HASH_LOOP_HOG 1024 typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *node, void *data); + struct hlist_node *node, void *data); void *cfs_hash_lookup(cfs_hash_t *hs, const void *key); void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data); void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data); -int cfs_hash_for_each_nolock(cfs_hash_t *hs, - cfs_hash_for_each_cb_t, void *data); -int cfs_hash_for_each_empty(cfs_hash_t *hs, - cfs_hash_for_each_cb_t, void *data); +int cfs_hash_for_each_nolock(cfs_hash_t *hs, cfs_hash_for_each_cb_t, + void *data); +int cfs_hash_for_each_empty(cfs_hash_t *hs, cfs_hash_for_each_cb_t, + void *data); void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key, - cfs_hash_for_each_cb_t, void *data); + cfs_hash_for_each_cb_t, void *data); typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data); void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data); void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex, - cfs_hash_for_each_cb_t, void *data); + cfs_hash_for_each_cb_t, void *data); int cfs_hash_is_empty(cfs_hash_t *hs); __u64 cfs_hash_size_get(cfs_hash_t *hs); @@ -776,38 +774,38 @@ void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs); void cfs_hash_rehash_cancel(cfs_hash_t *hs); int cfs_hash_rehash(cfs_hash_t *hs, int do_rehash); void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key, - void *new_key, cfs_hlist_node_t *hnode); + void *new_key, struct hlist_node *hnode); #if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 /* Validate hnode references the correct key */ static inline void cfs_hash_key_validate(cfs_hash_t *hs, const void *key, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - LASSERT(cfs_hash_keycmp(hs, key, hnode)); + LASSERT(cfs_hash_keycmp(hs, key, hnode)); } /* Validate hnode is in the correct bucket */ static inline void cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - cfs_hash_bd_t bds[2]; + cfs_hash_bd_t bds[2]; - cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds); - LASSERT(bds[0].bd_bucket == bd->bd_bucket || - bds[1].bd_bucket == bd->bd_bucket); + cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds); + LASSERT(bds[0].bd_bucket == bd->bd_bucket || + bds[1].bd_bucket == bd->bd_bucket); } #else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */ static inline void cfs_hash_key_validate(cfs_hash_t *hs, const void *key, - cfs_hlist_node_t *hnode) {} + struct hlist_node *hnode) {} static inline void cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) {} + struct hlist_node *hnode) {} #endif /* CFS_HASH_DEBUG_LEVEL */ diff --git a/libcfs/include/libcfs/libcfs_ioctl.h b/libcfs/include/libcfs/libcfs_ioctl.h index dcbae24..bc7991c 100644 --- a/libcfs/include/libcfs/libcfs_ioctl.h +++ b/libcfs/include/libcfs/libcfs_ioctl.h @@ -94,15 +94,15 @@ do { \ #ifdef __KERNEL__ struct libcfs_ioctl_handler { - cfs_list_t item; - int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data); + struct list_head item; + int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data); }; -#define DECLARE_IOCTL_HANDLER(ident, func) \ - struct libcfs_ioctl_handler ident = { \ - /* .item = */ CFS_LIST_HEAD_INIT(ident.item), \ - /* .handle_ioctl = */ func \ - } +#define DECLARE_IOCTL_HANDLER(ident, func) \ + struct libcfs_ioctl_handler ident = { \ + /* .item = */ LIST_HEAD_INIT(ident.item), \ + /* .handle_ioctl = */ func \ + } #endif diff --git a/libcfs/include/libcfs/libcfs_private.h b/libcfs/include/libcfs/libcfs_private.h index 42f1bc5..83d40dc 100644 --- a/libcfs/include/libcfs/libcfs_private.h +++ b/libcfs/include/libcfs/libcfs_private.h @@ -537,19 +537,19 @@ int cfs_percpt_atomic_summary(atomic_t **refs); #define CLASSERT(cond) do {switch(42) {case (cond): case 0: break;}} while (0) /* support decl needed both by kernel and liblustre */ -int libcfs_isknown_lnd(int type); -char *libcfs_lnd2modname(int type); -char *libcfs_lnd2str(int type); -int libcfs_str2lnd(const char *str); -char *libcfs_net2str(__u32 net); -char *libcfs_nid2str(lnet_nid_t nid); -__u32 libcfs_str2net(const char *str); -lnet_nid_t libcfs_str2nid(const char *str); -int libcfs_str2anynid(lnet_nid_t *nid, const char *str); -char *libcfs_id2str(lnet_process_id_t id); -void cfs_free_nidlist(cfs_list_t *list); -int cfs_parse_nidlist(char *str, int len, cfs_list_t *list); -int cfs_match_nid(lnet_nid_t nid, cfs_list_t *list); +int libcfs_isknown_lnd(int type); +char *libcfs_lnd2modname(int type); +char *libcfs_lnd2str(int type); +int libcfs_str2lnd(const char *str); +char *libcfs_net2str(__u32 net); +char *libcfs_nid2str(lnet_nid_t nid); +__u32 libcfs_str2net(const char *str); +lnet_nid_t libcfs_str2nid(const char *str); +int libcfs_str2anynid(lnet_nid_t *nid, const char *str); +char *libcfs_id2str(lnet_process_id_t id); +void cfs_free_nidlist(struct list_head *list); +int cfs_parse_nidlist(char *str, int len, struct list_head *list); +int cfs_match_nid(lnet_nid_t nid, struct list_head *list); /** \addtogroup lnet_addr * @{ */ diff --git a/libcfs/include/libcfs/libcfs_string.h b/libcfs/include/libcfs/libcfs_string.h index e91396c..e402b55 100644 --- a/libcfs/include/libcfs/libcfs_string.h +++ b/libcfs/include/libcfs/libcfs_string.h @@ -79,15 +79,15 @@ struct cfs_range_expr { /* * Link to cfs_expr_list::el_exprs. */ - cfs_list_t re_link; - __u32 re_lo; - __u32 re_hi; - __u32 re_stride; + struct list_head re_link; + __u32 re_lo; + __u32 re_hi; + __u32 re_stride; }; struct cfs_expr_list { - cfs_list_t el_link; - cfs_list_t el_exprs; + struct list_head el_link; + struct list_head el_exprs; }; static inline int @@ -127,10 +127,10 @@ void cfs_expr_list_free(struct cfs_expr_list *expr_list); void cfs_expr_list_print(struct cfs_expr_list *expr_list); int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, struct cfs_expr_list **elpp); -void cfs_expr_list_free_list(cfs_list_t *list); -int cfs_ip_addr_parse(char *str, int len, cfs_list_t *list); -int cfs_ip_addr_match(__u32 addr, cfs_list_t *list); -void cfs_ip_addr_free(cfs_list_t *list); +void cfs_expr_list_free_list(struct list_head *list); +int cfs_ip_addr_parse(char *str, int len, struct list_head *list); +int cfs_ip_addr_match(__u32 addr, struct list_head *list); +void cfs_ip_addr_free(struct list_head *list); #ifdef __KERNEL__ #define strtoul(str, endp, base) simple_strtoul(str, endp, base) diff --git a/libcfs/include/libcfs/libcfs_workitem.h b/libcfs/include/libcfs/libcfs_workitem.h index 8b1ffa1..69c516c 100644 --- a/libcfs/include/libcfs/libcfs_workitem.h +++ b/libcfs/include/libcfs/libcfs_workitem.h @@ -74,27 +74,27 @@ struct cfs_workitem; typedef int (*cfs_wi_action_t) (struct cfs_workitem *); typedef struct cfs_workitem { - /** chain on runq or rerunq */ - cfs_list_t wi_list; - /** working function */ - cfs_wi_action_t wi_action; - /** arg for working function */ - void *wi_data; - /** in running */ - unsigned short wi_running:1; - /** scheduled */ - unsigned short wi_scheduled:1; + /** chain on runq or rerunq */ + struct list_head wi_list; + /** working function */ + cfs_wi_action_t wi_action; + /** arg for working function */ + void *wi_data; + /** in running */ + unsigned short wi_running:1; + /** scheduled */ + unsigned short wi_scheduled:1; } cfs_workitem_t; static inline void cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action) { - CFS_INIT_LIST_HEAD(&wi->wi_list); + INIT_LIST_HEAD(&wi->wi_list); - wi->wi_running = 0; - wi->wi_scheduled = 0; - wi->wi_data = data; - wi->wi_action = action; + wi->wi_running = 0; + wi->wi_scheduled = 0; + wi->wi_data = data; + wi->wi_action = action; } void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi); diff --git a/libcfs/include/libcfs/linux/kp30.h b/libcfs/include/libcfs/linux/kp30.h index 7989619..e57f10c 100644 --- a/libcfs/include/libcfs/linux/kp30.h +++ b/libcfs/include/libcfs/linux/kp30.h @@ -137,9 +137,9 @@ typedef struct { # if !KLWT_SUPPORT typedef struct _lwt_page { - cfs_list_t lwtp_list; - struct page *lwtp_page; - lwt_event_t *lwtp_events; + struct list_head lwtp_list; + struct page *lwtp_page; + lwt_event_t *lwtp_events; } lwt_page_t; typedef struct { @@ -172,12 +172,12 @@ do { \ p = cpu->lwtc_current_page; \ e = &p->lwtp_events[cpu->lwtc_current_index++]; \ \ - if (cpu->lwtc_current_index >= LWT_EVENTS_PER_PAGE) { \ - cpu->lwtc_current_page = \ - cfs_list_entry (p->lwtp_list.next, \ - lwt_page_t, lwtp_list); \ - cpu->lwtc_current_index = 0; \ - } \ + if (cpu->lwtc_current_index >= LWT_EVENTS_PER_PAGE) { \ + cpu->lwtc_current_page = \ + list_entry (p->lwtp_list.next, \ + lwt_page_t, lwtp_list); \ + cpu->lwtc_current_index = 0; \ + } \ \ e->lwte_when = get_cycles(); \ e->lwte_where = LWTWHERE(__FILE__,__LINE__); \ diff --git a/libcfs/include/libcfs/list.h b/libcfs/include/libcfs/list.h index 404d08a..f9652a1 100644 --- a/libcfs/include/libcfs/list.h +++ b/libcfs/include/libcfs/list.h @@ -27,95 +27,6 @@ #include -typedef struct list_head cfs_list_t; - -#define __cfs_list_add(new, prev, next) __list_add(new, prev, next) -#define cfs_list_add(new, head) list_add(new, head) - -#define cfs_list_add_tail(new, head) list_add_tail(new, head) - -#define __cfs_list_del(prev, next) __list_del(prev, next) -#define cfs_list_del(entry) list_del(entry) -#define cfs_list_del_init(entry) list_del_init(entry) - -#define cfs_list_move(list, head) list_move(list, head) -#define cfs_list_move_tail(list, head) list_move_tail(list, head) - -#define cfs_list_empty(head) list_empty(head) -#define cfs_list_empty_careful(head) list_empty_careful(head) - -#define __cfs_list_splice(list, head) __list_splice(list, head) -#define cfs_list_splice(list, head) list_splice(list, head) -#define cfs_list_splice_tail(list, head) list_splice_tail(list, head) - -#define cfs_list_splice_init(list, head) list_splice_init(list, head) - -#define cfs_list_entry(ptr, type, member) list_entry(ptr, type, member) -#define cfs_list_for_each(pos, head) list_for_each(pos, head) -#define cfs_list_for_each_safe(pos, n, head) list_for_each_safe(pos, n, head) -#define cfs_list_for_each_prev(pos, head) list_for_each_prev(pos, head) -#define cfs_list_for_each_entry(pos, head, member) \ - list_for_each_entry(pos, head, member) -#define cfs_list_for_each_entry_reverse(pos, head, member) \ - list_for_each_entry_reverse(pos, head, member) -#define cfs_list_for_each_entry_safe_reverse(pos, n, head, member) \ - list_for_each_entry_safe_reverse(pos, n, head, member) -#define cfs_list_for_each_entry_safe(pos, n, head, member) \ - list_for_each_entry_safe(pos, n, head, member) -#ifdef list_for_each_entry_safe_from -#define cfs_list_for_each_entry_safe_from(pos, n, head, member) \ - list_for_each_entry_safe_from(pos, n, head, member) -#endif /* list_for_each_entry_safe_from */ -#define cfs_list_for_each_entry_continue(pos, head, member) \ - list_for_each_entry_continue(pos, head, member) - -#define CFS_LIST_HEAD_INIT(n) LIST_HEAD_INIT(n) -#define CFS_LIST_HEAD(n) LIST_HEAD(n) -#define CFS_INIT_LIST_HEAD(p) INIT_LIST_HEAD(p) - -typedef struct hlist_head cfs_hlist_head_t; -typedef struct hlist_node cfs_hlist_node_t; - -#define cfs_hlist_unhashed(h) hlist_unhashed(h) - -#define cfs_hlist_empty(h) hlist_empty(h) - -#define __cfs_hlist_del(n) __hlist_del(n) -#define cfs_hlist_del(n) hlist_del(n) -#define cfs_hlist_del_init(n) hlist_del_init(n) - -#define cfs_hlist_add_head(n, next) hlist_add_head(n, next) -#define cfs_hlist_add_before(n, next) hlist_add_before(n, next) -#define cfs_hlist_add_after(n, next) hlist_add_after(n, next) - -#define cfs_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member) -#define cfs_hlist_for_each(pos, head) hlist_for_each(pos, head) -#define cfs_hlist_for_each_safe(pos, n, head) \ - hlist_for_each_safe(pos, n, head) -#ifdef HAVE_HLIST_FOR_EACH_3ARG -#define cfs_hlist_for_each_entry(tpos, pos, head, member) \ - pos = NULL; hlist_for_each_entry(tpos, head, member) -#else -#define cfs_hlist_for_each_entry(tpos, pos, head, member) \ - hlist_for_each_entry(tpos, pos, head, member) -#endif -#define cfs_hlist_for_each_entry_continue(tpos, pos, member) \ - hlist_for_each_entry_continue(tpos, pos, member) -#define cfs_hlist_for_each_entry_from(tpos, pos, member) \ - hlist_for_each_entry_from(tpos, pos, member) -#ifdef HAVE_HLIST_FOR_EACH_3ARG -#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \ - pos = NULL; hlist_for_each_entry_safe(tpos, n, head, member) -#else -#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \ - hlist_for_each_entry_safe(tpos, pos, n, head, member) -#endif - -#define CFS_HLIST_HEAD_INIT HLIST_HEAD_INIT -#define CFS_HLIST_HEAD(n) HLIST_HEAD(n) -#define CFS_INIT_HLIST_HEAD(p) INIT_HLIST_HEAD(p) -#define CFS_INIT_HLIST_NODE(p) INIT_HLIST_NODE(p) - #else /* !defined (__linux__) || !defined(__KERNEL__) */ /* @@ -130,18 +41,13 @@ typedef struct hlist_node cfs_hlist_node_t; #define prefetch(a) ((void)a) -struct cfs_list_head { - struct cfs_list_head *next, *prev; +struct list_head { + struct list_head *next, *prev; }; -typedef struct cfs_list_head cfs_list_t; +#define LIST_HEAD_INIT(name) { &(name), &(name) } -#define CFS_LIST_HEAD_INIT(name) { &(name), &(name) } - -#define CFS_LIST_HEAD(name) \ - cfs_list_t name = CFS_LIST_HEAD_INIT(name) - -#define CFS_INIT_LIST_HEAD(ptr) do { \ +#define INIT_LIST_HEAD(ptr) do { \ (ptr)->next = (ptr); (ptr)->prev = (ptr); \ } while (0) @@ -151,9 +57,9 @@ typedef struct cfs_list_head cfs_list_t; * This is only for internal list manipulation where we know * the prev/next entries already! */ -static inline void __cfs_list_add(cfs_list_t * new, - cfs_list_t * prev, - cfs_list_t * next) +static inline void __list_add(struct list_head * new, + struct list_head * prev, + struct list_head * next) { next->prev = new; new->next = next; @@ -169,10 +75,10 @@ static inline void __cfs_list_add(cfs_list_t * new, * Insert a new entry after the specified head. * This is good for implementing stacks. */ -static inline void cfs_list_add(cfs_list_t *new, - cfs_list_t *head) +static inline void list_add(struct list_head *new, + struct list_head *head) { - __cfs_list_add(new, head, head->next); + __list_add(new, head, head->next); } /** @@ -183,10 +89,10 @@ static inline void cfs_list_add(cfs_list_t *new, * Insert a new entry before the specified head. * This is useful for implementing queues. */ -static inline void cfs_list_add_tail(cfs_list_t *new, - cfs_list_t *head) +static inline void list_add_tail(struct list_head *new, + struct list_head *head) { - __cfs_list_add(new, head->prev, head); + __list_add(new, head->prev, head); } /* @@ -196,8 +102,8 @@ static inline void cfs_list_add_tail(cfs_list_t *new, * This is only for internal list manipulation where we know * the prev/next entries already! */ -static inline void __cfs_list_del(cfs_list_t *prev, - cfs_list_t *next) +static inline void __list_del(struct list_head *prev, + struct list_head *next) { next->prev = prev; prev->next = next; @@ -209,19 +115,19 @@ static inline void __cfs_list_del(cfs_list_t *prev, * Note: list_empty(entry) does not return true after this, the entry is in an * undefined state. */ -static inline void cfs_list_del(cfs_list_t *entry) +static inline void list_del(struct list_head *entry) { - __cfs_list_del(entry->prev, entry->next); + __list_del(entry->prev, entry->next); } /** * Remove an entry from the list it is currently in and reinitialize it. * \param entry the entry to remove. */ -static inline void cfs_list_del_init(cfs_list_t *entry) +static inline void list_del_init(struct list_head *entry) { - __cfs_list_del(entry->prev, entry->next); - CFS_INIT_LIST_HEAD(entry); + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); } /** @@ -230,11 +136,11 @@ static inline void cfs_list_del_init(cfs_list_t *entry) * \param list the entry to move * \param head the list to move it to */ -static inline void cfs_list_move(cfs_list_t *list, - cfs_list_t *head) +static inline void list_move(struct list_head *list, + struct list_head *head) { - __cfs_list_del(list->prev, list->next); - cfs_list_add(list, head); + __list_del(list->prev, list->next); + list_add(list, head); } /** @@ -243,18 +149,18 @@ static inline void cfs_list_move(cfs_list_t *list, * \param list the entry to move * \param head the list to move it to */ -static inline void cfs_list_move_tail(cfs_list_t *list, - cfs_list_t *head) +static inline void list_move_tail(struct list_head *list, + struct list_head *head) { - __cfs_list_del(list->prev, list->next); - cfs_list_add_tail(list, head); + __list_del(list->prev, list->next); + list_add_tail(list, head); } /** * Test whether a list is empty * \param head the list to test. */ -static inline int cfs_list_empty(cfs_list_t *head) +static inline int list_empty(struct list_head *head) { return head->next == head; } @@ -266,23 +172,23 @@ static inline int cfs_list_empty(cfs_list_t *head) * Tests whether a list is empty _and_ checks that no other CPU might be * in the process of modifying either member (next or prev) * - * NOTE: using cfs_list_empty_careful() without synchronization + * NOTE: using list_empty_careful() without synchronization * can only be safe if the only activity that can happen - * to the list entry is cfs_list_del_init(). Eg. it cannot be used + * to the list entry is list_del_init(). Eg. it cannot be used * if another CPU could re-list_add() it. */ -static inline int cfs_list_empty_careful(const cfs_list_t *head) +static inline int list_empty_careful(const struct list_head *head) { - cfs_list_t *next = head->next; - return (next == head) && (next == head->prev); + struct list_head *next = head->next; + return (next == head) && (next == head->prev); } -static inline void __cfs_list_splice(cfs_list_t *list, - cfs_list_t *head) +static inline void __list_splice(struct list_head *list, + struct list_head *head) { - cfs_list_t *first = list->next; - cfs_list_t *last = list->prev; - cfs_list_t *at = head->next; + struct list_head *first = list->next; + struct list_head *last = list->prev; + struct list_head *at = head->next; first->prev = head; head->next = first; @@ -299,17 +205,17 @@ static inline void __cfs_list_splice(cfs_list_t *list, * The contents of \a list are added at the start of \a head. \a list is in an * undefined state on return. */ -static inline void cfs_list_splice(cfs_list_t *list, - cfs_list_t *head) +static inline void list_splice(struct list_head *list, + struct list_head *head) { - if (!cfs_list_empty(list)) - __cfs_list_splice(list, head); + if (!list_empty(list)) + __list_splice(list, head); } -static inline void cfs_list_splice_tail(cfs_list_t *list, cfs_list_t *head) +static inline void list_splice_tail(struct list_head *list, struct list_head *head) { - if (!cfs_list_empty(list)) - __cfs_list_splice(list, head->prev); + if (!list_empty(list)) + __list_splice(list, head->prev); } /** @@ -320,12 +226,12 @@ static inline void cfs_list_splice_tail(cfs_list_t *list, cfs_list_t *head) * The contents of \a list are added at the start of \a head. \a list is empty * on return. */ -static inline void cfs_list_splice_init(cfs_list_t *list, - cfs_list_t *head) +static inline void list_splice_init(struct list_head *list, + struct list_head *head) { - if (!cfs_list_empty(list)) { - __cfs_list_splice(list, head); - CFS_INIT_LIST_HEAD(list); + if (!list_empty(list)) { + __list_splice(list, head); + INIT_LIST_HEAD(list); } } @@ -335,7 +241,7 @@ static inline void cfs_list_splice_init(cfs_list_t *list, * \param type the type of the struct this is embedded in. * \param member the member name of the list within the struct. */ -#define cfs_list_entry(ptr, type, member) \ +#define list_entry(ptr, type, member) \ ((type *)((char *)(ptr)-(char *)(&((type *)0)->member))) /** @@ -346,7 +252,7 @@ static inline void cfs_list_splice_init(cfs_list_t *list, * Behaviour is undefined if \a pos is removed from the list in the body of the * loop. */ -#define cfs_list_for_each(pos, head) \ +#define list_for_each(pos, head) \ for (pos = (head)->next, prefetch(pos->next); pos != (head); \ pos = pos->next, prefetch(pos->next)) @@ -359,7 +265,7 @@ static inline void cfs_list_splice_init(cfs_list_t *list, * This is safe to use if \a pos could be removed from the list in the body of * the loop. */ -#define cfs_list_for_each_safe(pos, n, head) \ +#define list_for_each_safe(pos, n, head) \ for (pos = (head)->next, n = pos->next; pos != (head); \ pos = n, n = pos->next) @@ -367,12 +273,12 @@ static inline void cfs_list_splice_init(cfs_list_t *list, * Iterate over a list continuing after existing point * \param pos the type * to use as a loop counter * \param head the list head - * \param member the name of the list_struct within the struct + * \param member the name of the list_struct within the struct */ -#define cfs_list_for_each_entry_continue(pos, head, member) \ - for (pos = cfs_list_entry(pos->member.next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ - pos = cfs_list_entry(pos->member.next, typeof(*pos), member)) +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) /** * \defgroup hlist Hash List @@ -382,13 +288,13 @@ static inline void cfs_list_splice_init(cfs_list_t *list, * @{ */ -typedef struct cfs_hlist_node { - struct cfs_hlist_node *next, **pprev; -} cfs_hlist_node_t; +struct hlist_node { + struct hlist_node *next, **pprev; +}; -typedef struct cfs_hlist_head { - cfs_hlist_node_t *first; -} cfs_hlist_head_t; +struct hlist_head { + struct hlist_node *first; +}; /* @} */ @@ -406,47 +312,47 @@ typedef struct cfs_hlist_head { * @{ */ -#define CFS_HLIST_HEAD_INIT { NULL_P } -#define CFS_HLIST_HEAD(name) cfs_hlist_head_t name = { NULL_P } -#define CFS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL_P) -#define CFS_INIT_HLIST_NODE(ptr) ((ptr)->next = NULL_P, (ptr)->pprev = NULL_P) +#define HLIST_HEAD_INIT { NULL_P } +#define HLIST_HEAD(name) struct hlist_head name = { NULL_P } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL_P) +#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL_P, (ptr)->pprev = NULL_P) -static inline int cfs_hlist_unhashed(const cfs_hlist_node_t *h) +static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } -static inline int cfs_hlist_empty(const cfs_hlist_head_t *h) +static inline int hlist_empty(const struct hlist_head *h) { return !h->first; } -static inline void __cfs_hlist_del(cfs_hlist_node_t *n) +static inline void __hlist_del(struct hlist_node *n) { - cfs_hlist_node_t *next = n->next; - cfs_hlist_node_t **pprev = n->pprev; + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; *pprev = next; if (next) next->pprev = pprev; } -static inline void cfs_hlist_del(cfs_hlist_node_t *n) +static inline void hlist_del(struct hlist_node *n) { - __cfs_hlist_del(n); + __hlist_del(n); } -static inline void cfs_hlist_del_init(cfs_hlist_node_t *n) +static inline void hlist_del_init(struct hlist_node *n) { if (n->pprev) { - __cfs_hlist_del(n); - CFS_INIT_HLIST_NODE(n); + __hlist_del(n); + INIT_HLIST_NODE(n); } } -static inline void cfs_hlist_add_head(cfs_hlist_node_t *n, - cfs_hlist_head_t *h) +static inline void hlist_add_head(struct hlist_node *n, + struct hlist_head *h) { - cfs_hlist_node_t *first = h->first; + struct hlist_node *first = h->first; n->next = first; if (first) first->pprev = &n->next; @@ -455,8 +361,8 @@ static inline void cfs_hlist_add_head(cfs_hlist_node_t *n, } /* next must be != NULL */ -static inline void cfs_hlist_add_before(cfs_hlist_node_t *n, - cfs_hlist_node_t *next) +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) { n->pprev = next->pprev; n->next = next; @@ -464,8 +370,8 @@ static inline void cfs_hlist_add_before(cfs_hlist_node_t *n, *(n->pprev) = n; } -static inline void cfs_hlist_add_after(cfs_hlist_node_t *n, - cfs_hlist_node_t *next) +static inline void hlist_add_after(struct hlist_node *n, + struct hlist_node *next) { next->next = n->next; n->next = next; @@ -475,13 +381,13 @@ static inline void cfs_hlist_add_after(cfs_hlist_node_t *n, next->next->pprev = &next->next; } -#define cfs_hlist_entry(ptr, type, member) container_of(ptr,type,member) +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) -#define cfs_hlist_for_each(pos, head) \ +#define hlist_for_each(pos, head) \ for (pos = (head)->first; pos && (prefetch(pos->next), 1); \ pos = pos->next) -#define cfs_hlist_for_each_safe(pos, n, head) \ +#define hlist_for_each_safe(pos, n, head) \ for (pos = (head)->first; pos && (n = pos->next, 1); \ pos = n) @@ -492,10 +398,10 @@ static inline void cfs_hlist_add_after(cfs_hlist_node_t *n, * \param head the head for your list. * \param member the name of the hlist_node within the struct. */ -#define cfs_hlist_for_each_entry(tpos, pos, head, member) \ +#define hlist_for_each_entry(tpos, pos, head, member) \ for (pos = (head)->first; \ pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) /** @@ -504,10 +410,10 @@ static inline void cfs_hlist_add_after(cfs_hlist_node_t *n, * \param pos the &struct hlist_node to use as a loop counter. * \param member the name of the hlist_node within the struct. */ -#define cfs_hlist_for_each_entry_continue(tpos, pos, member) \ +#define hlist_for_each_entry_continue(tpos, pos, member) \ for (pos = (pos)->next; \ pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) /** @@ -516,9 +422,9 @@ static inline void cfs_hlist_add_after(cfs_hlist_node_t *n, * \param pos the &struct hlist_node to use as a loop counter. * \param member the name of the hlist_node within the struct. */ -#define cfs_hlist_for_each_entry_from(tpos, pos, member) \ +#define hlist_for_each_entry_from(tpos, pos, member) \ for (; pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) /** @@ -529,67 +435,57 @@ static inline void cfs_hlist_add_after(cfs_hlist_node_t *n, * \param head the head for your list. * \param member the name of the hlist_node within the struct. */ -#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \ +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ for (pos = (head)->first; \ pos && ({ n = pos->next; 1; }) && \ - ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = n) /* @} */ #endif /* __linux__ && __KERNEL__ */ -#ifndef cfs_list_for_each_prev +#ifndef list_for_each_prev /** * Iterate over a list in reverse order * \param pos the &struct list_head to use as a loop counter. * \param head the head for your list. */ -#define cfs_list_for_each_prev(pos, head) \ +#define list_for_each_prev(pos, head) \ for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \ pos = pos->prev, prefetch(pos->prev)) -#endif /* cfs_list_for_each_prev */ +#endif /* list_for_each_prev */ -#ifndef cfs_list_for_each_entry +#ifndef list_for_each_entry /** * Iterate over a list of given type * \param pos the type * to use as a loop counter. * \param head the head for your list. * \param member the name of the list_struct within the struct. */ -#define cfs_list_for_each_entry(pos, head, member) \ - for (pos = cfs_list_entry((head)->next, typeof(*pos), member), \ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ prefetch(pos->member.next); \ &pos->member != (head); \ - pos = cfs_list_entry(pos->member.next, typeof(*pos), member), \ + pos = list_entry(pos->member.next, typeof(*pos), member), \ prefetch(pos->member.next)) -#endif /* cfs_list_for_each_entry */ - -#ifndef cfs_list_for_each_entry_rcu -#define cfs_list_for_each_entry_rcu(pos, head, member) \ - list_for_each_entry(pos, head, member) -#endif +#endif /* list_for_each_entry */ -#ifndef cfs_list_for_each_entry_rcu -#define cfs_list_for_each_entry_rcu(pos, head, member) \ - list_for_each_entry(pos, head, member) -#endif - -#ifndef cfs_list_for_each_entry_reverse +#ifndef list_for_each_entry_reverse /** * Iterate backwards over a list of given type. * \param pos the type * to use as a loop counter. * \param head the head for your list. * \param member the name of the list_struct within the struct. */ -#define cfs_list_for_each_entry_reverse(pos, head, member) \ - for (pos = cfs_list_entry((head)->prev, typeof(*pos), member); \ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member); \ prefetch(pos->member.prev), &pos->member != (head); \ - pos = cfs_list_entry(pos->member.prev, typeof(*pos), member)) -#endif /* cfs_list_for_each_entry_reverse */ + pos = list_entry(pos->member.prev, typeof(*pos), member)) +#endif /* list_for_each_entry_reverse */ -#ifndef cfs_list_for_each_entry_safe +#ifndef list_for_each_entry_safe /** * Iterate over a list of given type safe against removal of list entry * \param pos the type * to use as a loop counter. @@ -597,64 +493,120 @@ static inline void cfs_hlist_add_after(cfs_hlist_node_t *n, * \param head the head for your list. * \param member the name of the list_struct within the struct. */ -#define cfs_list_for_each_entry_safe(pos, n, head, member) \ - for (pos = cfs_list_entry((head)->next, typeof(*pos), member), \ - n = cfs_list_entry(pos->member.next, typeof(*pos), member); \ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ - pos = n, n = cfs_list_entry(n->member.next, typeof(*n), member)) + pos = n, n = list_entry(n->member.next, typeof(*n), member)) -#endif /* cfs_list_for_each_entry_safe */ +#endif /* list_for_each_entry_safe */ -#ifndef cfs_list_for_each_entry_safe_from -/** - * Iterate over a list continuing from an existing point - * \param pos the type * to use as a loop cursor. - * \param n another type * to use as temporary storage - * \param head the head for your list. - * \param member the name of the list_struct within the struct. - * - * Iterate over list of given type from current point, safe against - * removal of list entry. - */ -#define cfs_list_for_each_entry_safe_from(pos, n, head, member) \ - for (n = cfs_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cfs_list_entry(n->member.next, typeof(*n), member)) -#endif /* cfs_list_for_each_entry_safe_from */ - -#define cfs_list_for_each_entry_typed(pos, head, type, member) \ - for (pos = cfs_list_entry((head)->next, type, member), \ - prefetch(pos->member.next); \ - &pos->member != (head); \ - pos = cfs_list_entry(pos->member.next, type, member), \ - prefetch(pos->member.next)) +/* Temporary until everything is moved over to linux api */ +typedef struct list_head cfs_list_t; + +#define __cfs_list_add(new, prev, next) __list_add(new, prev, next) +#define cfs_list_add(new, head) list_add(new, head) + +#define cfs_list_add_tail(new, head) list_add_tail(new, head) + +#define __cfs_list_del(prev, next) __list_del(prev, next) +#define cfs_list_del(entry) list_del(entry) +#define cfs_list_del_init(entry) list_del_init(entry) + +#define cfs_list_move(list, head) list_move(list, head) +#define cfs_list_move_tail(list, head) list_move_tail(list, head) + +#define cfs_list_empty(head) list_empty(head) +#define cfs_list_empty_careful(head) list_empty_careful(head) + +#define __cfs_list_splice(list, head) __list_splice(list, head) +#define cfs_list_splice(list, head) list_splice(list, head) +#define cfs_list_splice_tail(list, head) list_splice_tail(list, head) + +#define cfs_list_splice_init(list, head) list_splice_init(list, head) + +#define cfs_list_entry(ptr, type, member) list_entry(ptr, type, member) +#define cfs_list_for_each(pos, head) list_for_each(pos, head) +#define cfs_list_for_each_safe(pos, n, head) list_for_each_safe(pos, n, head) + +#define cfs_list_for_each_prev(pos, head) list_for_each_prev(pos, head) +#define cfs_list_for_each_entry(pos, head, member) \ + list_for_each_entry(pos, head, member) +#define cfs_list_for_each_entry_reverse(pos, head, member) \ + list_for_each_entry_reverse(pos, head, member) +#define cfs_list_for_each_entry_safe_reverse(pos, n, head, member) \ + list_for_each_entry_safe_reverse(pos, n, head, member) +#define cfs_list_for_each_entry_safe(pos, n, head, member) \ + list_for_each_entry_safe(pos, n, head, member) +#ifdef list_for_each_entry_safe_from +#define cfs_list_for_each_entry_safe_from(pos, n, head, member) \ + list_for_each_entry_safe_from(pos, n, head, member) +#endif /* list_for_each_entry_safe_from */ +#define cfs_list_for_each_entry_continue(pos, head, member) \ + list_for_each_entry_continue(pos, head, member) + +#define CFS_LIST_HEAD_INIT(n) LIST_HEAD_INIT(n) +#define CFS_INIT_LIST_HEAD(p) INIT_LIST_HEAD(p) + +typedef struct hlist_head cfs_hlist_head_t; +typedef struct hlist_node cfs_hlist_node_t; + +#define cfs_hlist_unhashed(h) hlist_unhashed(h) + +#define cfs_hlist_empty(h) hlist_empty(h) -#define cfs_list_for_each_entry_reverse_typed(pos, head, type, member) \ - for (pos = cfs_list_entry((head)->prev, type, member); \ - prefetch(pos->member.prev), &pos->member != (head); \ - pos = cfs_list_entry(pos->member.prev, type, member)) +#define __cfs_hlist_del(n) __hlist_del(n) +#define cfs_hlist_del(n) hlist_del(n) +#define cfs_hlist_del_init(n) hlist_del_init(n) + +#define cfs_hlist_add_head(n, next) hlist_add_head(n, next) +#define cfs_hlist_add_before(n, next) hlist_add_before(n, next) +#define cfs_hlist_add_after(n, next) hlist_add_after(n, next) + +#define cfs_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member) +#define cfs_hlist_for_each(pos, head) hlist_for_each(pos, head) +#define cfs_hlist_for_each_safe(pos, n, head) \ + hlist_for_each_safe(pos, n, head) +#ifdef HAVE_HLIST_FOR_EACH_3ARG +#define cfs_hlist_for_each_entry(tpos, pos, head, member) \ + pos = NULL; hlist_for_each_entry(tpos, head, member) +#else +#define cfs_hlist_for_each_entry(tpos, pos, head, member) \ + hlist_for_each_entry(tpos, pos, head, member) +#endif +#define cfs_hlist_for_each_entry_continue(tpos, pos, member) \ + hlist_for_each_entry_continue(tpos, pos, member) +#define cfs_hlist_for_each_entry_from(tpos, pos, member) \ + hlist_for_each_entry_from(tpos, pos, member) +#ifdef HAVE_HLIST_FOR_EACH_3ARG +#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + pos = NULL; hlist_for_each_entry_safe(tpos, n, head, member) +#else +#define cfs_hlist_for_each_entry(tpos, pos, head, member) \ + hlist_for_each_entry(tpos, pos, head, member) +#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + hlist_for_each_entry_safe(tpos, pos, n, head, member) +#endif + +#define cfs_list_for_each_entry_typed(pos, head, type, member) \ + for (pos = list_entry((head)->next, type, member), \ + prefetch(pos->member.next); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, type, member), \ + prefetch(pos->member.next)) #define cfs_list_for_each_entry_safe_typed(pos, n, head, type, member) \ - for (pos = cfs_list_entry((head)->next, type, member), \ - n = cfs_list_entry(pos->member.next, type, member); \ - &pos->member != (head); \ - pos = n, n = cfs_list_entry(n->member.next, type, member)) - -#define cfs_list_for_each_entry_safe_from_typed(pos, n, head, type, member) \ - for (n = cfs_list_entry(pos->member.next, type, member); \ - &pos->member != (head); \ - pos = n, n = cfs_list_entry(n->member.next, type, member)) - -#define cfs_hlist_for_each_entry_typed(tpos, pos, head, type, member) \ - for (pos = (head)->first; \ - pos && (prefetch(pos->next), 1) && \ - (tpos = cfs_hlist_entry(pos, type, member), 1); \ - pos = pos->next) + for (pos = list_entry((head)->next, type, member), \ + n = list_entry(pos->member.next, type, member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, type, member)) -#define cfs_hlist_for_each_entry_safe_typed(tpos, pos, n, head, type, member) \ - for (pos = (head)->first; \ - pos && (n = pos->next, 1) && \ - (tpos = cfs_hlist_entry(pos, type, member), 1); \ - pos = n) +#define CFS_HLIST_HEAD_INIT HLIST_HEAD_INIT +#define CFS_HLIST_HEAD(n) HLIST_HEAD(n) +#define CFS_INIT_HLIST_HEAD(p) INIT_HLIST_HEAD(p) +#define CFS_INIT_HLIST_NODE(p) INIT_HLIST_NODE(p) + +#define CFS_LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) #endif /* __LIBCFS_LUSTRE_LIST_H__ */ diff --git a/libcfs/include/libcfs/lucache.h b/libcfs/include/libcfs/lucache.h index 3446378..4a5cbeb 100644 --- a/libcfs/include/libcfs/lucache.h +++ b/libcfs/include/libcfs/lucache.h @@ -83,15 +83,15 @@ struct md_identity { }; struct upcall_cache_entry { - cfs_list_t ue_hash; - __u64 ue_key; + struct list_head ue_hash; + __u64 ue_key; atomic_t ue_refcount; - int ue_flags; + int ue_flags; wait_queue_head_t ue_waitq; - cfs_time_t ue_acquire_expire; - cfs_time_t ue_expire; + cfs_time_t ue_acquire_expire; + cfs_time_t ue_expire; union { - struct md_identity identity; + struct md_identity identity; } u; }; @@ -118,7 +118,7 @@ struct upcall_cache_ops { }; struct upcall_cache { - cfs_list_t uc_hashtable[UC_CACHE_HASH_SIZE]; + struct list_head uc_hashtable[UC_CACHE_HASH_SIZE]; spinlock_t uc_lock; rwlock_t uc_upcall_rwlock; diff --git a/libcfs/include/libcfs/posix/libcfs.h b/libcfs/include/libcfs/posix/libcfs.h index f49fecd..180d18e 100644 --- a/libcfs/include/libcfs/posix/libcfs.h +++ b/libcfs/include/libcfs/posix/libcfs.h @@ -303,88 +303,87 @@ static inline void remove_shrinker(struct shrinker *shrinker) ***************************************************************************/ struct radix_tree_root { - cfs_list_t list; - void *rnode; + struct list_head list; + void *rnode; }; struct radix_tree_node { - cfs_list_t _node; - unsigned long index; - void *item; + struct list_head _node; + unsigned long index; + void *item; }; -#define RADIX_TREE_INIT(mask) { \ - NOT_IMPLEMENTED \ +#define RADIX_TREE_INIT(mask) { \ + NOT_IMPLEMENTED \ } #define RADIX_TREE(name, mask) \ struct radix_tree_root name = RADIX_TREE_INIT(mask) -#define INIT_RADIX_TREE(root, mask) \ -do { \ - CFS_INIT_LIST_HEAD(&((struct radix_tree_root *)root)->list); \ - ((struct radix_tree_root *)root)->rnode = NULL; \ +#define INIT_RADIX_TREE(root, mask) \ +do { \ + INIT_LIST_HEAD(&((struct radix_tree_root *)root)->list); \ + ((struct radix_tree_root *)root)->rnode = NULL; \ } while (0) static inline int radix_tree_insert(struct radix_tree_root *root, - unsigned long idx, void *item) + unsigned long idx, void *item) { - struct radix_tree_node *node; - node = malloc(sizeof(*node)); - if (!node) - return -ENOMEM; - - CFS_INIT_LIST_HEAD(&node->_node); - node->index = idx; - node->item = item; - cfs_list_add_tail(&node->_node, &root->list); - root->rnode = (void *)1001; - return 0; + struct radix_tree_node *node; + node = malloc(sizeof(*node)); + if (!node) + return -ENOMEM; + + INIT_LIST_HEAD(&node->_node); + node->index = idx; + node->item = item; + list_add_tail(&node->_node, &root->list); + root->rnode = (void *)1001; + return 0; } -static inline struct radix_tree_node *radix_tree_lookup0(struct radix_tree_root *root, - unsigned long idx) +static inline struct radix_tree_node * +radix_tree_lookup0(struct radix_tree_root *root, unsigned long idx) { - struct radix_tree_node *node; + struct radix_tree_node *node; - if (cfs_list_empty(&root->list)) - return NULL; + if (list_empty(&root->list)) + return NULL; - cfs_list_for_each_entry_typed(node, &root->list, - struct radix_tree_node, _node) - if (node->index == idx) - return node; + list_for_each_entry(node, &root->list, _node) + if (node->index == idx) + return node; - return NULL; + return NULL; } static inline void *radix_tree_lookup(struct radix_tree_root *root, - unsigned long idx) + unsigned long idx) { - struct radix_tree_node *node = radix_tree_lookup0(root, idx); + struct radix_tree_node *node = radix_tree_lookup0(root, idx); - if (node) - return node->item; - return node; + if (node) + return node->item; + return node; } static inline void *radix_tree_delete(struct radix_tree_root *root, - unsigned long idx) + unsigned long idx) { - struct radix_tree_node *p = radix_tree_lookup0(root, idx); - void *item; + struct radix_tree_node *p = radix_tree_lookup0(root, idx); + void *item; - if (p == NULL) - return NULL; + if (p == NULL) + return NULL; - cfs_list_del_init(&p->_node); - item = p->item; - free(p); - if (cfs_list_empty(&root->list)) - root->rnode = NULL; + list_del_init(&p->_node); + item = p->item; + free(p); + if (list_empty(&root->list)) + root->rnode = NULL; - return item; + return item; } static inline unsigned int diff --git a/libcfs/include/libcfs/user-mem.h b/libcfs/include/libcfs/user-mem.h index 2ad7fce..af0e4b0 100644 --- a/libcfs/include/libcfs/user-mem.h +++ b/libcfs/include/libcfs/user-mem.h @@ -38,18 +38,18 @@ #define LIBLUSTRE_HANDLE_UNALIGNED_PAGE struct page { - void *addr; - unsigned long index; - cfs_list_t list; - unsigned long private; - - /* internally used by liblustre file i/o */ - int _offset; - int _count; + void *addr; + unsigned long index; + struct list_head list; + unsigned long private; + + /* internally used by liblustre file i/o */ + int _offset; + int _count; #ifdef LIBLUSTRE_HANDLE_UNALIGNED_PAGE - int _managed; + int _managed; #endif - cfs_list_t _node; + struct list_head _node; }; diff --git a/libcfs/include/libcfs/user-prim.h b/libcfs/include/libcfs/user-prim.h index ab2671e..6053eb4 100644 --- a/libcfs/include/libcfs/user-prim.h +++ b/libcfs/include/libcfs/user-prim.h @@ -89,12 +89,12 @@ typedef struct proc_dir_entry cfs_proc_dir_entry_t; */ typedef struct cfs_waitlink { - cfs_list_t sleeping; - void *process; + struct list_head sleeping; + void *process; } wait_queue_t; typedef struct cfs_waitq { - cfs_list_t sleepers; + struct list_head sleepers; } wait_queue_head_t; #define CFS_DECL_WAITQ(wq) wait_queue_head_t wq @@ -144,7 +144,7 @@ typedef int (write_proc_t)(struct file *file, const char *buffer, */ struct timer_list { - cfs_list_t tl_list; + struct list_head tl_list; void (*function)(ulong_ptr_t unused); ulong_ptr_t data; long expires; diff --git a/libcfs/include/libcfs/winnt/winnt-mem.h b/libcfs/include/libcfs/winnt/winnt-mem.h index 205fc2c..cf923f9 100644 --- a/libcfs/include/libcfs/winnt/winnt-mem.h +++ b/libcfs/include/libcfs/winnt/winnt-mem.h @@ -253,15 +253,15 @@ extern void *kmem_cache_alloc(struct kmem_cache *, int); extern void kmem_cache_free(struct kmem_cache *, void *); /* - * shrinker + * shrinker */ typedef int (*shrink_callback)(int nr_to_scan, gfp_t gfp_mask); struct shrinker { - shrink_callback cb; + shrink_callback cb; int seeks; /* seeks to recreate an obj */ /* These are for internal use */ - cfs_list_t list; + struct list_head list; long nr; /* objs pending delete */ }; @@ -272,7 +272,7 @@ int start_shrinker_timer(); void stop_shrinker_timer(); /* - * Page allocator slabs + * Page allocator slabs */ extern struct kmem_cache *cfs_page_t_slab; diff --git a/libcfs/include/libcfs/winnt/winnt-prim.h b/libcfs/include/libcfs/winnt/winnt-prim.h index 21a475c..ffb604a 100644 --- a/libcfs/include/libcfs/winnt/winnt-prim.h +++ b/libcfs/include/libcfs/winnt/winnt-prim.h @@ -100,10 +100,10 @@ void cfs_enter_debugger(void); #define CFS_SYMBOL_LEN 64 struct cfs_symbol { - char name[CFS_SYMBOL_LEN]; - void *value; - int ref; - cfs_list_t sym_list; + char name[CFS_SYMBOL_LEN]; + void *value; + int ref; + struct list_head sym_list; }; extern int cfs_symbol_register(const char *, const void *); @@ -254,8 +254,8 @@ struct ctl_table /* the mantaner of the cfs_sysctl_table trees */ struct ctl_table_header { - struct ctl_table * ctl_table; - cfs_list_t ctl_entry; + struct ctl_table *ctl_table; + struct list_head ctl_entry; }; /* proc root entries, support routines */ @@ -346,9 +346,9 @@ int seq_release_private(struct inode *, struct file *); * Helpers for iteration over list_head-s in seq_files */ -extern cfs_list_t *seq_list_start(cfs_list_t *head, loff_t pos); -extern cfs_list_t *seq_list_start_head(cfs_list_t *head, loff_t pos); -extern cfs_list_t *seq_list_next(void *v, cfs_list_t *head, loff_t *ppos); +extern struct list_head *seq_list_start(struct list_head *head, loff_t pos); +extern struct list_head *seq_list_start_head(struct list_head *head, loff_t pos); +extern struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos); /* * declaration of proc kernel process routines @@ -384,8 +384,7 @@ typedef struct cfs_waitq { unsigned int flags; spinlock_t guard; - cfs_list_t waiters; - + struct list_head waiters; } wait_queue_head_t; @@ -396,12 +395,10 @@ typedef struct cfs_waitlink wait_queue_t; #define CFS_WAITQ_CHAN_NORMAL (0) #define CFS_WAITQ_CHAN_FORWARD (1) - - typedef struct cfs_waitlink_channel { - cfs_list_t link; - wait_queue_head_t * waitq; - wait_queue_t * waitl; + struct list_head link; + wait_queue_head_t *waitq; + wait_queue_t *waitl; } cfs_waitlink_channel_t; struct cfs_waitlink { diff --git a/libcfs/include/libcfs/winnt/winnt-tcpip.h b/libcfs/include/libcfs/winnt/winnt-tcpip.h index 8da448a7..16953de 100644 --- a/libcfs/include/libcfs/winnt/winnt-tcpip.h +++ b/libcfs/include/libcfs/winnt/winnt-tcpip.h @@ -48,7 +48,7 @@ // ks definitions // -// iovec is defined in libcfs: winnt_prim.h +// iovec is defined in libcfs: winnt_prim.h // lnetkiov_t is defined in lnet/types.h typedef struct socket ks_tconn_t, cfs_socket_t; @@ -98,10 +98,10 @@ typedef VOID (*ks_schedule_cb)(struct socket*, int); #define TCP_SOCKET_WINDOW 6 -/* Flags we can use with send/ and recv. +/* Flags we can use with send/ and recv. Added those for 1003.1g not all are supported yet */ - + #define MSG_OOB 1 #define MSG_PEEK 2 #define MSG_DONTROUTE 4 @@ -166,14 +166,14 @@ typedef VOID (*ks_schedule_cb)(struct socket*, int); typedef struct _KS_TSDU { - ULONG Magic; /* magic */ - ULONG Flags; /* flags */ + ULONG Magic; /* magic */ + ULONG Flags; /* flags */ - cfs_list_t Link; /* link list */ + struct list_head Link; /* link list */ - ULONG TotalLength; /* total size of KS_TSDU */ - ULONG StartOffset; /* offset of the first Tsdu unit */ - ULONG LastOffset; /* end offset of the last Tsdu unit */ + ULONG TotalLength; /* total size of KS_TSDU */ + ULONG StartOffset; /* offset of the first Tsdu unit */ + ULONG LastOffset; /* end offset of the last Tsdu unit */ /* union { @@ -234,23 +234,23 @@ typedef struct _KS_TSDU_MDL { } KS_TSDU_MDL, *PKS_TSDU_MDL; typedef struct ks_engine_mgr { - spinlock_t lock; - int stop; - event_t exit; - event_t start; - cfs_list_t list; + spinlock_t lock; + int stop; + event_t exit; + event_t start; + struct list_head list; } ks_engine_mgr_t; typedef struct ks_engine_slot { - ks_tconn_t * tconn; - void * tsdumgr; - cfs_list_t link; - int queued; - ks_engine_mgr_t * emgr; + ks_tconn_t *tconn; + void *tsdumgr; + struct list_head link; + int queued; + ks_engine_mgr_t *emgr; } ks_engine_slot_t; typedef struct _KS_TSDUMGR { - cfs_list_t TsduList; + struct list_head TsduList; ULONG NumOfTsdu; ULONG TotalBytes; KEVENT Event; @@ -352,20 +352,17 @@ typedef KS_DISCONNECT_WORKITEM ks_disconnect_t; // typedef struct ks_backlogs { - - cfs_list_t list; /* list to link the backlog connections */ - int num; /* number of backlogs in the list */ - + struct list_head list; /* list to link the backlog connections */ + int num; /* number of backlogs in the list */ } ks_backlogs_t; typedef struct ks_daemon { - - ks_tconn_t * tconn; /* the listener connection object */ - unsigned short nbacklogs; /* number of listening backlog conns */ - unsigned short port; /* listening port number */ - int shutdown; /* daemon threads is to exit */ - cfs_list_t list; /* to be attached into ks_nal_data_t */ + ks_tconn_t *tconn; /* the listener connection object */ + unsigned short nbacklogs; /* number of listening backlog conns */ + unsigned short port; /* listening port number */ + int shutdown; /* daemon threads is to exit */ + struct list_head list; /* to be attached into ks_nal_data_t */ } ks_daemon_t; @@ -437,7 +434,7 @@ struct socket { atomic_t kstc_refcount; /* reference count of ks_tconn_t */ - cfs_list_t kstc_list; /* linked to global ksocknal_data */ + struct list_head kstc_list; /* linked to global ksocknal_data */ union { @@ -462,9 +459,9 @@ struct socket { int kstc_busy; /* referred by ConnectEventCallback ? */ int kstc_accepted; /* the connection is built ready ? */ - cfs_list_t kstc_link; /* linked to parent tdi connection */ - ks_tconn_t * kstc_parent; /* pointers to it's listener parent */ - } child; + struct list_head kstc_link; /* linked to parent tdi connection */ + ks_tconn_t *kstc_parent; /* pointers to it's listener parent */ + } child; struct { ks_tconn_info_t kstc_info; /* Connection Info if Connected */ @@ -624,36 +621,34 @@ typedef struct { HANDLE ksnd_pnp_handle; /* the handle for pnp changes */ spinlock_t ksnd_addrs_lock; /* serialize ip address list */ - LIST_ENTRY ksnd_addrs_list; /* list of the ip addresses */ - int ksnd_naddrs; /* number of the ip addresses */ + LIST_ENTRY ksnd_addrs_list; /* list of the ip addresses */ + int ksnd_naddrs; /* number of the ip addresses */ - /* - * Tdilnd internal defintions - */ - - int ksnd_init; /* initialisation state */ + /* + * Tdilnd internal defintions + */ + int ksnd_init; /* initialisation state */ - TDI_PROVIDER_INFO ksnd_provider; /* tdi tcp/ip provider's information */ + TDI_PROVIDER_INFO ksnd_provider; /* tdi tcp/ip provider's information */ spinlock_t ksnd_tconn_lock; /* tdi connections access lock*/ int ksnd_ntconns; /* number of tconns in list */ - cfs_list_t ksnd_tconns; /* tdi connections list */ + struct list_head ksnd_tconns; /* tdi connections list */ struct kmem_cache *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/ event_t ksnd_tconn_exit; /* event signal by last tconn */ spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */ - int ksnd_ntsdus; /* number of tsdu buffers allocated */ - ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */ - struct kmem_cache *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */ - - int ksnd_nfreetsdus; /* number of tsdu buffers in the freed list */ - cfs_list_t ksnd_freetsdus; /* List of the freed Tsdu buffer. */ + int ksnd_ntsdus; /* number of tsdu buffers allocated */ + ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */ + struct kmem_cache *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */ - int ksnd_engine_nums; /* number of tcp sending engine threads */ - ks_engine_mgr_t *ksnd_engine_mgr; /* tcp sending engine structure */ + int ksnd_nfreetsdus; /* number of tsdu buffers in the freed list */ + struct list_head ksnd_freetsdus; /* List of the freed Tsdu buffer. */ + int ksnd_engine_nums; /* number of tcp sending engine threads */ + ks_engine_mgr_t *ksnd_engine_mgr; /* tcp sending engine structure */ } ks_tdi_data_t; int diff --git a/libcfs/libcfs/darwin/darwin-mem.c b/libcfs/libcfs/darwin/darwin-mem.c index 333e010..4a30bec 100644 --- a/libcfs/libcfs/darwin/darwin-mem.c +++ b/libcfs/libcfs/darwin/darwin-mem.c @@ -54,8 +54,8 @@ extern void *zalloc_noblock(zone_t zone); extern void zfree(zone_t zone, void *addr); struct cfs_zone_nob { - struct list_head *z_nob; /* Pointer to z_link */ - struct list_head z_link; /* Do NOT access it directly */ + struct list_head *z_nob; /* Pointer to z_link */ + struct list_head z_link; /* Do NOT access it directly */ }; static struct cfs_zone_nob cfs_zone_nob; @@ -96,25 +96,25 @@ struct kmem_cache *mem_cache_create(vm_size_t objsize, const char *name) return NULL; } - cname = _MALLOC(strlen(name) + 1, M_TEMP, M_WAITOK); - LASSERT(cname != NULL); - mc->mc_cache = zinit(objsize, (KMEM_MAX_ZONE * objsize), 0, strcpy(cname, name)); - mc->mc_size = objsize; - CFS_INIT_LIST_HEAD(&mc->mc_link); - strncpy(mc->mc_name, name, 1 + strlen(name)); - return mc; + cname = _MALLOC(strlen(name) + 1, M_TEMP, M_WAITOK); + LASSERT(cname != NULL); + mc->mc_cache = zinit(objsize, (KMEM_MAX_ZONE * objsize), 0, strcpy(cname, name)); + mc->mc_size = objsize; + INIT_LIST_HEAD(&mc->mc_link); + strncpy(mc->mc_name, name, 1 + strlen(name)); + return mc; } void mem_cache_destroy(struct kmem_cache *mc) { - /* - * zone can NOT be destroyed after creating, - * so just keep it in list. - * - * We will not lost a zone after we unload - * libcfs, it can be found by from libcfs.zone - */ - return; + /* + * zone can NOT be destroyed after creating, + * so just keep it in list. + * + * We will not lost a zone after we unload + * libcfs, it can be found by from libcfs.zone + */ + return; } #define mem_cache_alloc(mc) zalloc((mc)->mc_cache) @@ -459,26 +459,26 @@ int cfs_mem_init(void) assert(cfs_sysctl_isvalid()); - nob = _MALLOC(sizeof(struct cfs_zone_nob), - M_TEMP, M_WAITOK | M_ZERO); - CFS_INIT_LIST_HEAD(&nob->z_link); - nob->z_nob = &nob->z_link; - oid = cfs_alloc_sysctl_struct(NULL, OID_AUTO, CTLFLAG_RD | CTLFLAG_KERN, - "zone", nob, sizeof(struct cfs_zone_nob)); - if (oid == NULL) { - _FREE(nob, M_TEMP); - return -ENOMEM; - } - sysctl_register_oid(oid); - - cfs_zone_nob.z_nob = nob->z_nob; - } + nob = _MALLOC(sizeof(struct cfs_zone_nob), + M_TEMP, M_WAITOK | M_ZERO); + INIT_LIST_HEAD(&nob->z_link); + nob->z_nob = &nob->z_link; + oid = cfs_alloc_sysctl_struct(NULL, OID_AUTO, CTLFLAG_RD | CTLFLAG_KERN, + "zone", nob, sizeof(struct cfs_zone_nob)); + if (oid == NULL) { + _FREE(nob, M_TEMP); + return -ENOMEM; + } + sysctl_register_oid(oid); + + cfs_zone_nob.z_nob = nob->z_nob; + } spin_lock_init(&cfs_zone_guard); #endif - CFS_INIT_LIST_HEAD(&page_death_row); + INIT_LIST_HEAD(&page_death_row); spin_lock_init(&page_death_row_phylax); raw_page_cache = kmem_cache_create("raw-page", PAGE_CACHE_SIZE, - 0, 0, NULL); + 0, 0, NULL); return 0; } diff --git a/libcfs/libcfs/darwin/darwin-prim.c b/libcfs/libcfs/darwin/darwin-prim.c index d9b95ff..22ff5a9 100644 --- a/libcfs/libcfs/darwin/darwin-prim.c +++ b/libcfs/libcfs/darwin/darwin-prim.c @@ -132,74 +132,72 @@ cfs_symbol_register(const char *name, const void *value) struct cfs_symbol *sym = NULL; struct cfs_symbol *new = NULL; - MALLOC(new, struct cfs_symbol *, sizeof(struct cfs_symbol), M_TEMP, M_WAITOK|M_ZERO); - strncpy(new->name, name, CFS_SYMBOL_LEN); - new->value = (void *)value; - new->ref = 0; - CFS_INIT_LIST_HEAD(&new->sym_list); - - down_write(&cfs_symbol_lock); - list_for_each(walker, &cfs_symbol_list) { - sym = list_entry (walker, struct cfs_symbol, sym_list); - if (!strcmp(sym->name, name)) { - up_write(&cfs_symbol_lock); - FREE(new, M_TEMP); - return KERN_NAME_EXISTS; - } - - } - list_add_tail(&new->sym_list, &cfs_symbol_list); - up_write(&cfs_symbol_lock); - - return KERN_SUCCESS; + MALLOC(new, struct cfs_symbol *, sizeof(struct cfs_symbol), M_TEMP, M_WAITOK|M_ZERO); + strncpy(new->name, name, CFS_SYMBOL_LEN); + new->value = (void *)value; + new->ref = 0; + INIT_LIST_HEAD(&new->sym_list); + + down_write(&cfs_symbol_lock); + list_for_each(walker, &cfs_symbol_list) { + sym = list_entry (walker, struct cfs_symbol, sym_list); + if (!strcmp(sym->name, name)) { + up_write(&cfs_symbol_lock); + FREE(new, M_TEMP); + return KERN_NAME_EXISTS; + } + } + list_add_tail(&new->sym_list, &cfs_symbol_list); + up_write(&cfs_symbol_lock); + return KERN_SUCCESS; } kern_return_t cfs_symbol_unregister(const char *name) { - struct list_head *walker; - struct list_head *nxt; - struct cfs_symbol *sym = NULL; - - down_write(&cfs_symbol_lock); - list_for_each_safe(walker, nxt, &cfs_symbol_list) { - sym = list_entry (walker, struct cfs_symbol, sym_list); - if (!strcmp(sym->name, name)) { - LASSERT(sym->ref == 0); - list_del (&sym->sym_list); - FREE(sym, M_TEMP); - break; - } - } - up_write(&cfs_symbol_lock); + struct list_head *walker; + struct list_head *nxt; + struct cfs_symbol *sym = NULL; + + down_write(&cfs_symbol_lock); + list_for_each_safe(walker, nxt, &cfs_symbol_list) { + sym = list_entry(walker, struct cfs_symbol, sym_list); + if (!strcmp(sym->name, name)) { + LASSERT(sym->ref == 0); + list_del(&sym->sym_list); + FREE(sym, M_TEMP); + break; + } + } + up_write(&cfs_symbol_lock); - return KERN_SUCCESS; + return KERN_SUCCESS; } void cfs_symbol_init() { - CFS_INIT_LIST_HEAD(&cfs_symbol_list); - init_rwsem(&cfs_symbol_lock); + INIT_LIST_HEAD(&cfs_symbol_list); + init_rwsem(&cfs_symbol_lock); } void cfs_symbol_fini() { - struct list_head *walker; - struct cfs_symbol *sym = NULL; + struct list_head *walker; + struct cfs_symbol *sym = NULL; - down_write(&cfs_symbol_lock); - list_for_each(walker, &cfs_symbol_list) { - sym = list_entry (walker, struct cfs_symbol, sym_list); - LASSERT(sym->ref == 0); - list_del (&sym->sym_list); - FREE(sym, M_TEMP); - } - up_write(&cfs_symbol_lock); + down_write(&cfs_symbol_lock); + list_for_each(walker, &cfs_symbol_list) { + sym = list_entry(walker, struct cfs_symbol, sym_list); + LASSERT(sym->ref == 0); + list_del(&sym->sym_list); + FREE(sym, M_TEMP); + } + up_write(&cfs_symbol_lock); - fini_rwsem(&cfs_symbol_lock); - return; + fini_rwsem(&cfs_symbol_lock); + return; } struct kernel_thread_arg diff --git a/libcfs/libcfs/darwin/darwin-sync.c b/libcfs/libcfs/darwin/darwin-sync.c index 81110c7..0aaabbb 100644 --- a/libcfs/libcfs/darwin/darwin-sync.c +++ b/libcfs/libcfs/darwin/darwin-sync.c @@ -661,7 +661,7 @@ void ksleep_chan_init(struct ksleep_chan *chan) SLASSERT(chan != NULL); kspin_init(&chan->guard); - CFS_INIT_LIST_HEAD(&chan->waiters); + INIT_LIST_HEAD(&chan->waiters); ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC); } @@ -677,7 +677,7 @@ void ksleep_link_init(struct ksleep_link *link) { SLASSERT(link != NULL); - CFS_INIT_LIST_HEAD(&link->linkage); + INIT_LIST_HEAD(&link->linkage); link->flags = 0; link->event = current_thread(); link->hits = 0; diff --git a/libcfs/libcfs/darwin/darwin-tracefile.c b/libcfs/libcfs/darwin/darwin-tracefile.c index 2eb8b64..7ef1cad 100644 --- a/libcfs/libcfs/darwin/darwin-tracefile.c +++ b/libcfs/libcfs/darwin/darwin-tracefile.c @@ -110,17 +110,17 @@ struct trace_cpu_data *trace_get_tcd(void) * debugging check for recursive call to libcfs_debug_msg() */ if (trace_owner == current_thread()) { - /* - * Cannot assert here. - */ + /* + * Cannot assert here. + */ printk(KERN_EMERG "recursive call to %s", __FUNCTION__); /* - * "The death of God left the angels in a strange position." + * "The death of God left the angels in a strange position." */ cfs_enter_debugger(); } tcd = &trace_data[0].tcd; - CFS_INIT_LIST_HEAD(&pages); + INIT_LIST_HEAD(&pages); if (get_preemption_level() == 0) nr_pages = trace_refill_stock(tcd, GFP_IOFS, &pages); else diff --git a/libcfs/libcfs/hash.c b/libcfs/libcfs/hash.c index bb3ab35..e112955 100644 --- a/libcfs/libcfs/hash.c +++ b/libcfs/libcfs/hash.c @@ -241,37 +241,37 @@ cfs_hash_lock_setup(cfs_hash_t *hs) * new element is always added to head of hlist */ typedef struct { - cfs_hlist_head_t hh_head; /**< entries list */ + struct hlist_head hh_head; /**< entries list */ } cfs_hash_head_t; static int cfs_hash_hh_hhead_size(cfs_hash_t *hs) { - return sizeof(cfs_hash_head_t); + return sizeof(cfs_hash_head_t); } -static cfs_hlist_head_t * +static struct hlist_head * cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd) { - cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0]; + cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].hh_head; + return &head[bd->bd_offset].hh_head; } static int cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - cfs_hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd)); - return -1; /* unknown depth */ + hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd)); + return -1; /* unknown depth */ } static int cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - cfs_hlist_del_init(hnode); - return -1; /* unknown depth */ + hlist_del_init(hnode); + return -1; /* unknown depth */ } /** @@ -279,43 +279,43 @@ cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd, * new element is always added to head of hlist */ typedef struct { - cfs_hlist_head_t hd_head; /**< entries list */ - unsigned int hd_depth; /**< list length */ + struct hlist_head hd_head; /**< entries list */ + unsigned int hd_depth; /**< list length */ } cfs_hash_head_dep_t; static int cfs_hash_hd_hhead_size(cfs_hash_t *hs) { - return sizeof(cfs_hash_head_dep_t); + return sizeof(cfs_hash_head_dep_t); } -static cfs_hlist_head_t * +static struct hlist_head * cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd) { - cfs_hash_head_dep_t *head; + cfs_hash_head_dep_t *head; - head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].hd_head; + head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0]; + return &head[bd->bd_offset].hd_head; } static int cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd), - cfs_hash_head_dep_t, hd_head); - cfs_hlist_add_head(hnode, &hh->hd_head); - return ++hh->hd_depth; + cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd), + cfs_hash_head_dep_t, hd_head); + hlist_add_head(hnode, &hh->hd_head); + return ++hh->hd_depth; } static int cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd), - cfs_hash_head_dep_t, hd_head); - cfs_hlist_del_init(hnode); - return --hh->hd_depth; + cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd), + cfs_hash_head_dep_t, hd_head); + hlist_del_init(hnode); + return --hh->hd_depth; } /** @@ -323,53 +323,53 @@ cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd, * new element is always added to tail of hlist */ typedef struct { - cfs_hlist_head_t dh_head; /**< entries list */ - cfs_hlist_node_t *dh_tail; /**< the last entry */ + struct hlist_head dh_head; /**< entries list */ + struct hlist_node *dh_tail; /**< the last entry */ } cfs_hash_dhead_t; static int cfs_hash_dh_hhead_size(cfs_hash_t *hs) { - return sizeof(cfs_hash_dhead_t); + return sizeof(cfs_hash_dhead_t); } -static cfs_hlist_head_t * +static struct hlist_head * cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd) { - cfs_hash_dhead_t *head; + cfs_hash_dhead_t *head; - head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].dh_head; + head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0]; + return &head[bd->bd_offset].dh_head; } static int cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd), - cfs_hash_dhead_t, dh_head); + cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd), + cfs_hash_dhead_t, dh_head); - if (dh->dh_tail != NULL) /* not empty */ - cfs_hlist_add_after(dh->dh_tail, hnode); - else /* empty list */ - cfs_hlist_add_head(hnode, &dh->dh_head); - dh->dh_tail = hnode; - return -1; /* unknown depth */ + if (dh->dh_tail != NULL) /* not empty */ + hlist_add_after(dh->dh_tail, hnode); + else /* empty list */ + hlist_add_head(hnode, &dh->dh_head); + dh->dh_tail = hnode; + return -1; /* unknown depth */ } static int cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnd) + struct hlist_node *hnd) { - cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd), - cfs_hash_dhead_t, dh_head); + cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd), + cfs_hash_dhead_t, dh_head); - if (hnd->next == NULL) { /* it's the tail */ - dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL : - container_of(hnd->pprev, cfs_hlist_node_t, next); - } - cfs_hlist_del_init(hnd); - return -1; /* unknown depth */ + if (hnd->next == NULL) { /* it's the tail */ + dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL : + container_of(hnd->pprev, struct hlist_node, next); + } + hlist_del_init(hnd); + return -1; /* unknown depth */ } /** @@ -377,54 +377,54 @@ cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd, * new element is always added to tail of hlist */ typedef struct { - cfs_hlist_head_t dd_head; /**< entries list */ - cfs_hlist_node_t *dd_tail; /**< the last entry */ - unsigned int dd_depth; /**< list length */ + struct hlist_head dd_head; /**< entries list */ + struct hlist_node *dd_tail; /**< the last entry */ + unsigned int dd_depth; /**< list length */ } cfs_hash_dhead_dep_t; static int cfs_hash_dd_hhead_size(cfs_hash_t *hs) { - return sizeof(cfs_hash_dhead_dep_t); + return sizeof(cfs_hash_dhead_dep_t); } -static cfs_hlist_head_t * +static struct hlist_head * cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd) { - cfs_hash_dhead_dep_t *head; + cfs_hash_dhead_dep_t *head; - head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].dd_head; + head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0]; + return &head[bd->bd_offset].dd_head; } static int cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd), - cfs_hash_dhead_dep_t, dd_head); + cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd), + cfs_hash_dhead_dep_t, dd_head); - if (dh->dd_tail != NULL) /* not empty */ - cfs_hlist_add_after(dh->dd_tail, hnode); - else /* empty list */ - cfs_hlist_add_head(hnode, &dh->dd_head); - dh->dd_tail = hnode; - return ++dh->dd_depth; + if (dh->dd_tail != NULL) /* not empty */ + hlist_add_after(dh->dd_tail, hnode); + else /* empty list */ + hlist_add_head(hnode, &dh->dd_head); + dh->dd_tail = hnode; + return ++dh->dd_depth; } static int cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnd) + struct hlist_node *hnd) { - cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd), - cfs_hash_dhead_dep_t, dd_head); + cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd), + cfs_hash_dhead_dep_t, dd_head); - if (hnd->next == NULL) { /* it's the tail */ - dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL : - container_of(hnd->pprev, cfs_hlist_node_t, next); - } - cfs_hlist_del_init(hnd); - return --dh->dd_depth; + if (hnd->next == NULL) { /* it's the tail */ + dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL : + container_of(hnd->pprev, struct hlist_node, next); + } + hlist_del_init(hnd); + return --dh->dd_depth; } static cfs_hash_hlist_ops_t cfs_hash_hh_hops = { @@ -519,9 +519,9 @@ cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur) void cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - int rc; + int rc; rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode); cfs_hash_bd_dep_record(hs, bd, rc); @@ -539,7 +539,7 @@ EXPORT_SYMBOL(cfs_hash_bd_add_locked); void cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { hs->hs_hops->hop_hnode_del(hs, bd, hnode); @@ -560,7 +560,7 @@ EXPORT_SYMBOL(cfs_hash_bd_del_locked); void cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old, - cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode) + cfs_hash_bd_t *bd_new, struct hlist_node *hnode) { cfs_hash_bucket_t *obkt = bd_old->bd_bucket; cfs_hash_bucket_t *nbkt = bd_new->bd_bucket; @@ -615,23 +615,23 @@ typedef enum cfs_hash_lookup_intent { CFS_HS_LOOKUP_MASK_DEL) } cfs_hash_lookup_intent_t; -static cfs_hlist_node_t * +static struct hlist_node * cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd, - const void *key, cfs_hlist_node_t *hnode, - cfs_hash_lookup_intent_t intent) + const void *key, struct hlist_node *hnode, + cfs_hash_lookup_intent_t intent) { - cfs_hlist_head_t *hhead = cfs_hash_bd_hhead(hs, bd); - cfs_hlist_node_t *ehnode; - cfs_hlist_node_t *match; - int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0; + struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd); + struct hlist_node *ehnode; + struct hlist_node *match; + int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0; - /* with this function, we can avoid a lot of useless refcount ops, - * which are expensive atomic operations most time. */ - match = intent_add ? NULL : hnode; - cfs_hlist_for_each(ehnode, hhead) { - if (!cfs_hash_keycmp(hs, key, ehnode)) - continue; + /* with this function, we can avoid a lot of useless refcount ops, + * which are expensive atomic operations most time. */ + match = intent_add ? NULL : hnode; + hlist_for_each(ehnode, hhead) { + if (!cfs_hash_keycmp(hs, key, ehnode)) + continue; if (match != NULL && match != ehnode) /* can't match */ continue; @@ -656,40 +656,40 @@ cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd, return hnode; } -cfs_hlist_node_t * +struct hlist_node * cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key) { - return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, - CFS_HS_LOOKUP_IT_FIND); + return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, + CFS_HS_LOOKUP_IT_FIND); } EXPORT_SYMBOL(cfs_hash_bd_lookup_locked); -cfs_hlist_node_t * +struct hlist_node * cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key) { return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, - CFS_HS_LOOKUP_IT_PEEK); + CFS_HS_LOOKUP_IT_PEEK); } EXPORT_SYMBOL(cfs_hash_bd_peek_locked); -cfs_hlist_node_t * +struct hlist_node * cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, - const void *key, cfs_hlist_node_t *hnode, + const void *key, struct hlist_node *hnode, int noref) { - return cfs_hash_bd_lookup_intent(hs, bd, key, hnode, - CFS_HS_LOOKUP_IT_ADD | - (!noref * CFS_HS_LOOKUP_MASK_REF)); + return cfs_hash_bd_lookup_intent(hs, bd, key, hnode, + CFS_HS_LOOKUP_IT_ADD | + (!noref * CFS_HS_LOOKUP_MASK_REF)); } EXPORT_SYMBOL(cfs_hash_bd_findadd_locked); -cfs_hlist_node_t * +struct hlist_node * cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, - const void *key, cfs_hlist_node_t *hnode) + const void *key, struct hlist_node *hnode) { - /* hnode can be NULL, we find the first item with @key */ - return cfs_hash_bd_lookup_intent(hs, bd, key, hnode, - CFS_HS_LOOKUP_IT_FINDDEL); + /* hnode can be NULL, we find the first item with @key */ + return cfs_hash_bd_lookup_intent(hs, bd, key, hnode, + CFS_HS_LOOKUP_IT_FINDDEL); } EXPORT_SYMBOL(cfs_hash_bd_finddel_locked); @@ -731,30 +731,30 @@ cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, } } -static cfs_hlist_node_t * +static struct hlist_node * cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds, - unsigned n, const void *key) + unsigned n, const void *key) { - cfs_hlist_node_t *ehnode; - unsigned i; + struct hlist_node *ehnode; + unsigned i; - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, - CFS_HS_LOOKUP_IT_FIND); - if (ehnode != NULL) - return ehnode; - } - return NULL; + cfs_hash_for_each_bd(bds, n, i) { + ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, + CFS_HS_LOOKUP_IT_FIND); + if (ehnode != NULL) + return ehnode; + } + return NULL; } -static cfs_hlist_node_t * +static struct hlist_node * cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs, - cfs_hash_bd_t *bds, unsigned n, const void *key, - cfs_hlist_node_t *hnode, int noref) + cfs_hash_bd_t *bds, unsigned n, const void *key, + struct hlist_node *hnode, int noref) { - cfs_hlist_node_t *ehnode; - int intent; - unsigned i; + struct hlist_node *ehnode; + int intent; + unsigned i; LASSERT(hnode != NULL); intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF); @@ -778,21 +778,21 @@ cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs, return hnode; } -static cfs_hlist_node_t * +static struct hlist_node * cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds, - unsigned n, const void *key, - cfs_hlist_node_t *hnode) + unsigned n, const void *key, + struct hlist_node *hnode) { - cfs_hlist_node_t *ehnode; - unsigned i; + struct hlist_node *ehnode; + unsigned i; - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode, - CFS_HS_LOOKUP_IT_FINDDEL); - if (ehnode != NULL) - return ehnode; - } - return NULL; + cfs_hash_for_each_bd(bds, n, i) { + ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode, + CFS_HS_LOOKUP_IT_FINDDEL); + if (ehnode != NULL) + return ehnode; + } + return NULL; } static void @@ -856,7 +856,7 @@ cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl) } EXPORT_SYMBOL(cfs_hash_dual_bd_unlock); -cfs_hlist_node_t * +struct hlist_node * cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds, const void *key) { @@ -864,21 +864,21 @@ cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds, } EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked); -cfs_hlist_node_t * +struct hlist_node * cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds, - const void *key, cfs_hlist_node_t *hnode, - int noref) + const void *key, struct hlist_node *hnode, + int noref) { - return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key, - hnode, noref); + return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key, + hnode, noref); } EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked); -cfs_hlist_node_t * +struct hlist_node * cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds, - const void *key, cfs_hlist_node_t *hnode) + const void *key, struct hlist_node *hnode) { - return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode); + return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode); } EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked); @@ -922,9 +922,9 @@ cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts, min(old_size, new_size) * sizeof(*old_bkts)); } - for (i = old_size; i < new_size; i++) { - cfs_hlist_head_t *hhead; - cfs_hash_bd_t bd; + for (i = old_size; i < new_size; i++) { + struct hlist_head *hhead; + cfs_hash_bd_t bd; LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs)); if (new_bkts[i] == NULL) { @@ -933,12 +933,12 @@ cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts, return NULL; } - new_bkts[i]->hsb_index = i; - new_bkts[i]->hsb_version = 1; /* shouldn't be zero */ - new_bkts[i]->hsb_depmax = -1; /* unknown */ - bd.bd_bucket = new_bkts[i]; - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) - CFS_INIT_HLIST_HEAD(hhead); + new_bkts[i]->hsb_index = i; + new_bkts[i]->hsb_version = 1; /* shouldn't be zero */ + new_bkts[i]->hsb_depmax = -1; /* unknown */ + bd.bd_bucket = new_bkts[i]; + cfs_hash_bd_for_each_hlist(hs, &bd, hhead) + INIT_HLIST_HEAD(hhead); if (cfs_hash_with_no_lock(hs) || cfs_hash_with_no_bktlock(hs)) @@ -1096,15 +1096,15 @@ EXPORT_SYMBOL(cfs_hash_create); static void cfs_hash_destroy(cfs_hash_t *hs) { - cfs_hlist_node_t *hnode; - cfs_hlist_node_t *pos; - cfs_hash_bd_t bd; - int i; - ENTRY; + struct hlist_node *hnode; + struct hlist_node *pos; + cfs_hash_bd_t bd; + int i; + ENTRY; - LASSERT(hs != NULL); - LASSERT(!cfs_hash_is_exiting(hs) && - !cfs_hash_is_iterating(hs)); + LASSERT(hs != NULL); + LASSERT(!cfs_hash_is_exiting(hs) && + !cfs_hash_is_iterating(hs)); /** * prohibit further rehashes, don't need any lock because @@ -1119,26 +1119,26 @@ cfs_hash_destroy(cfs_hash_t *hs) LASSERT(hs->hs_buckets != NULL && hs->hs_rehash_buckets == NULL); - cfs_hash_for_each_bucket(hs, &bd, i) { - cfs_hlist_head_t *hhead; + cfs_hash_for_each_bucket(hs, &bd, i) { + struct hlist_head *hhead; - LASSERT(bd.bd_bucket != NULL); - /* no need to take this lock, just for consistent code */ - cfs_hash_bd_lock(hs, &bd, 1); + LASSERT(bd.bd_bucket != NULL); + /* no need to take this lock, just for consistent code */ + cfs_hash_bd_lock(hs, &bd, 1); cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - cfs_hlist_for_each_safe(hnode, pos, hhead) { - LASSERTF(!cfs_hash_with_assert_empty(hs), - "hash %s bucket %u(%u) is not " - " empty: %u items left\n", - hs->hs_name, bd.bd_bucket->hsb_index, - bd.bd_offset, bd.bd_bucket->hsb_count); - /* can't assert key valicate, because we - * can interrupt rehash */ - cfs_hash_bd_del_locked(hs, &bd, hnode); - cfs_hash_exit(hs, hnode); - } - } + hlist_for_each_safe(hnode, pos, hhead) { + LASSERTF(!cfs_hash_with_assert_empty(hs), + "hash %s bucket %u(%u) is not " + " empty: %u items left\n", + hs->hs_name, bd.bd_bucket->hsb_index, + bd.bd_offset, bd.bd_bucket->hsb_count); + /* can't assert key valicate, because we + * can interrupt rehash */ + cfs_hash_bd_del_locked(hs, &bd, hnode); + cfs_hash_exit(hs, hnode); + } + } LASSERT(bd.bd_bucket->hsb_count == 0); cfs_hash_bd_unlock(hs, &bd, 1); cond_resched(); @@ -1219,12 +1219,12 @@ cfs_hash_rehash_inline(cfs_hash_t *hs) * ops->hs_get function will be called when the item is added. */ void -cfs_hash_add(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode) +cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode) { cfs_hash_bd_t bd; int bits; - LASSERT(cfs_hlist_unhashed(hnode)); + LASSERT(hlist_unhashed(hnode)); cfs_hash_lock(hs, 0); cfs_hash_bd_get_and_lock(hs, key, &bd, 1); @@ -1241,15 +1241,15 @@ cfs_hash_add(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode) } EXPORT_SYMBOL(cfs_hash_add); -static cfs_hlist_node_t * +static struct hlist_node * cfs_hash_find_or_add(cfs_hash_t *hs, const void *key, - cfs_hlist_node_t *hnode, int noref) + struct hlist_node *hnode, int noref) { - cfs_hlist_node_t *ehnode; - cfs_hash_bd_t bds[2]; - int bits = 0; + struct hlist_node *ehnode; + cfs_hash_bd_t bds[2]; + int bits = 0; - LASSERT(cfs_hlist_unhashed(hnode)); + LASSERT(hlist_unhashed(hnode)); cfs_hash_lock(hs, 0); cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); @@ -1274,10 +1274,10 @@ cfs_hash_find_or_add(cfs_hash_t *hs, const void *key, * Returns 0 on success or -EALREADY on key collisions. */ int -cfs_hash_add_unique(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode) +cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode) { - return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ? - -EALREADY : 0; + return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ? + -EALREADY : 0; } EXPORT_SYMBOL(cfs_hash_add_unique); @@ -1289,11 +1289,11 @@ EXPORT_SYMBOL(cfs_hash_add_unique); */ void * cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key, - cfs_hlist_node_t *hnode) + struct hlist_node *hnode) { - hnode = cfs_hash_find_or_add(hs, key, hnode, 0); + hnode = cfs_hash_find_or_add(hs, key, hnode, 0); - return cfs_hash_object(hs, hnode); + return cfs_hash_object(hs, hnode); } EXPORT_SYMBOL(cfs_hash_findadd_unique); @@ -1305,7 +1305,7 @@ EXPORT_SYMBOL(cfs_hash_findadd_unique); * on the removed object. */ void * -cfs_hash_del(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode) +cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode) { void *obj = NULL; int bits = 0; @@ -1315,7 +1315,7 @@ cfs_hash_del(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode) cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); /* NB: do nothing if @hnode is not in hash table */ - if (hnode == NULL || !cfs_hlist_unhashed(hnode)) { + if (hnode == NULL || !hlist_unhashed(hnode)) { if (bds[1].bd_bucket == NULL && hnode != NULL) { cfs_hash_bd_del_locked(hs, &bds[0], hnode); } else { @@ -1363,7 +1363,7 @@ void * cfs_hash_lookup(cfs_hash_t *hs, const void *key) { void *obj = NULL; - cfs_hlist_node_t *hnode; + struct hlist_node *hnode; cfs_hash_bd_t bds[2]; cfs_hash_lock(hs, 0); @@ -1439,56 +1439,56 @@ cfs_hash_for_each_exit(cfs_hash_t *hs) */ static __u64 cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, - void *data, int remove_safe) -{ - cfs_hlist_node_t *hnode; - cfs_hlist_node_t *pos; - cfs_hash_bd_t bd; - __u64 count = 0; - int excl = !!remove_safe; - int loop = 0; - int i; - ENTRY; - - cfs_hash_for_each_enter(hs); - - cfs_hash_lock(hs, 0); - LASSERT(!cfs_hash_is_rehashing(hs)); - - cfs_hash_for_each_bucket(hs, &bd, i) { - cfs_hlist_head_t *hhead; - - cfs_hash_bd_lock(hs, &bd, excl); - if (func == NULL) { /* only glimpse size */ - count += bd.bd_bucket->hsb_count; - cfs_hash_bd_unlock(hs, &bd, excl); - continue; - } + void *data, int remove_safe) +{ + struct hlist_node *hnode; + struct hlist_node *pos; + cfs_hash_bd_t bd; + __u64 count = 0; + int excl = !!remove_safe; + int loop = 0; + int i; + ENTRY; + + cfs_hash_for_each_enter(hs); + + cfs_hash_lock(hs, 0); + LASSERT(!cfs_hash_is_rehashing(hs)); + + cfs_hash_for_each_bucket(hs, &bd, i) { + struct hlist_head *hhead; + + cfs_hash_bd_lock(hs, &bd, excl); + if (func == NULL) { /* only glimpse size */ + count += bd.bd_bucket->hsb_count; + cfs_hash_bd_unlock(hs, &bd, excl); + continue; + } - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - cfs_hlist_for_each_safe(hnode, pos, hhead) { - cfs_hash_bucket_validate(hs, &bd, hnode); - count++; - loop++; - if (func(hs, &bd, hnode, data)) { - cfs_hash_bd_unlock(hs, &bd, excl); - goto out; - } - } - } - cfs_hash_bd_unlock(hs, &bd, excl); - if (loop < CFS_HASH_LOOP_HOG) - continue; + cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { + hlist_for_each_safe(hnode, pos, hhead) { + cfs_hash_bucket_validate(hs, &bd, hnode); + count++; + loop++; + if (func(hs, &bd, hnode, data)) { + cfs_hash_bd_unlock(hs, &bd, excl); + goto out; + } + } + } + cfs_hash_bd_unlock(hs, &bd, excl); + if (loop < CFS_HASH_LOOP_HOG) + continue; loop = 0; cfs_hash_unlock(hs, 0); cond_resched(); cfs_hash_lock(hs, 0); } out: - cfs_hash_unlock(hs, 0); + cfs_hash_unlock(hs, 0); - cfs_hash_for_each_exit(hs); - RETURN(count); + cfs_hash_for_each_exit(hs); + RETURN(count); } typedef struct { @@ -1498,7 +1498,7 @@ typedef struct { static int cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *data) + struct hlist_node *hnode, void *data) { cfs_hash_cond_arg_t *cond = data; @@ -1542,10 +1542,10 @@ EXPORT_SYMBOL(cfs_hash_for_each_safe); static int cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *data) + struct hlist_node *hnode, void *data) { - *(int *)data = 0; - return 1; /* return 1 to break the loop */ + *(int *)data = 0; + return 1; /* return 1 to break the loop */ } int @@ -1585,8 +1585,8 @@ EXPORT_SYMBOL(cfs_hash_size_get); static int cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data) { - cfs_hlist_node_t *hnode; - cfs_hlist_node_t *tmp; + struct hlist_node *hnode; + struct hlist_node *tmp; cfs_hash_bd_t bd; __u32 version; int count = 0; @@ -1601,8 +1601,8 @@ cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data) cfs_hash_lock(hs, 0); LASSERT(!cfs_hash_is_rehashing(hs)); - cfs_hash_for_each_bucket(hs, &bd, i) { - cfs_hlist_head_t *hhead; + cfs_hash_for_each_bucket(hs, &bd, i) { + struct hlist_head *hhead; cfs_hash_bd_lock(hs, &bd, 0); version = cfs_hash_bd_version_get(&bd); @@ -1706,29 +1706,29 @@ EXPORT_SYMBOL(cfs_hash_for_each_empty); void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex, - cfs_hash_for_each_cb_t func, void *data) + cfs_hash_for_each_cb_t func, void *data) { - cfs_hlist_head_t *hhead; - cfs_hlist_node_t *hnode; - cfs_hash_bd_t bd; + struct hlist_head *hhead; + struct hlist_node *hnode; + cfs_hash_bd_t bd; cfs_hash_for_each_enter(hs); cfs_hash_lock(hs, 0); if (hindex >= CFS_HASH_NHLIST(hs)) goto out; - cfs_hash_bd_index_set(hs, hindex, &bd); + cfs_hash_bd_index_set(hs, hindex, &bd); - cfs_hash_bd_lock(hs, &bd, 0); - hhead = cfs_hash_bd_hhead(hs, &bd); - cfs_hlist_for_each(hnode, hhead) { - if (func(hs, &bd, hnode, data)) - break; - } - cfs_hash_bd_unlock(hs, &bd, 0); - out: - cfs_hash_unlock(hs, 0); - cfs_hash_for_each_exit(hs); + cfs_hash_bd_lock(hs, &bd, 0); + hhead = cfs_hash_bd_hhead(hs, &bd); + hlist_for_each(hnode, hhead) { + if (func(hs, &bd, hnode, data)) + break; + } + cfs_hash_bd_unlock(hs, &bd, 0); +out: + cfs_hash_unlock(hs, 0); + cfs_hash_for_each_exit(hs); } EXPORT_SYMBOL(cfs_hash_hlist_for_each); @@ -1741,31 +1741,31 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each); */ void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key, - cfs_hash_for_each_cb_t func, void *data) + cfs_hash_for_each_cb_t func, void *data) { - cfs_hlist_node_t *hnode; - cfs_hash_bd_t bds[2]; - unsigned i; + struct hlist_node *hnode; + cfs_hash_bd_t bds[2]; + unsigned i; - cfs_hash_lock(hs, 0); + cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); + cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); - cfs_hash_for_each_bd(bds, 2, i) { - cfs_hlist_head_t *hlist = cfs_hash_bd_hhead(hs, &bds[i]); + cfs_hash_for_each_bd(bds, 2, i) { + struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]); - cfs_hlist_for_each(hnode, hlist) { - cfs_hash_bucket_validate(hs, &bds[i], hnode); + hlist_for_each(hnode, hlist) { + cfs_hash_bucket_validate(hs, &bds[i], hnode); - if (cfs_hash_keycmp(hs, key, hnode)) { - if (func(hs, &bds[i], hnode, data)) - break; - } - } - } + if (cfs_hash_keycmp(hs, key, hnode)) { + if (func(hs, &bds[i], hnode, data)) + break; + } + } + } - cfs_hash_dual_bd_unlock(hs, bds, 0); - cfs_hash_unlock(hs, 0); + cfs_hash_dual_bd_unlock(hs, bds, 0); + cfs_hash_unlock(hs, 0); } EXPORT_SYMBOL(cfs_hash_for_each_key); @@ -1851,32 +1851,31 @@ EXPORT_SYMBOL(cfs_hash_rehash); static int cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old) { - cfs_hash_bd_t new; - cfs_hlist_head_t *hhead; - cfs_hlist_node_t *hnode; - cfs_hlist_node_t *pos; - void *key; - int c = 0; - - /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */ - cfs_hash_bd_for_each_hlist(hs, old, hhead) { - cfs_hlist_for_each_safe(hnode, pos, hhead) { - key = cfs_hash_key(hs, hnode); - LASSERT(key != NULL); - /* Validate hnode is in the correct bucket. */ - cfs_hash_bucket_validate(hs, old, hnode); - /* - * Delete from old hash bucket; move to new bucket. - * ops->hs_key must be defined. - */ - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, &new); - cfs_hash_bd_move_locked(hs, old, &new, hnode); - c++; - } - } - - return c; + cfs_hash_bd_t new; + struct hlist_head *hhead; + struct hlist_node *hnode; + struct hlist_node *pos; + void *key; + int c = 0; + + /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */ + cfs_hash_bd_for_each_hlist(hs, old, hhead) { + hlist_for_each_safe(hnode, pos, hhead) { + key = cfs_hash_key(hs, hnode); + LASSERT(key != NULL); + /* Validate hnode is in the correct bucket. */ + cfs_hash_bucket_validate(hs, old, hnode); + /* + * Delete from old hash bucket; move to new bucket. + * ops->hs_key must be defined. + */ + cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, + hs->hs_rehash_bits, key, &new); + cfs_hash_bd_move_locked(hs, old, &new, hnode); + c++; + } + } + return c; } static int @@ -1990,13 +1989,13 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi) * not be called. */ void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key, - void *new_key, cfs_hlist_node_t *hnode) + void *new_key, struct hlist_node *hnode) { cfs_hash_bd_t bds[3]; cfs_hash_bd_t old_bds[2]; cfs_hash_bd_t new_bd; - LASSERT(!cfs_hlist_unhashed(hnode)); + LASSERT(!hlist_unhashed(hnode)); cfs_hash_lock(hs, 0); diff --git a/libcfs/libcfs/kernel_user_comm.c b/libcfs/libcfs/kernel_user_comm.c index 96bee49..fc92e2f 100644 --- a/libcfs/libcfs/kernel_user_comm.c +++ b/libcfs/libcfs/kernel_user_comm.c @@ -193,13 +193,13 @@ EXPORT_SYMBOL(libcfs_kkuc_msg_put); * group from any fs */ /** A single group registration has a uid and a file pointer */ struct kkuc_reg { - cfs_list_t kr_chain; + struct list_head kr_chain; int kr_uid; struct file *kr_fp; void *kr_data; }; -static cfs_list_t kkuc_groups[KUC_GRP_MAX+1] = {}; +static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {}; /* Protect message sending against remove and adds */ static DECLARE_RWSEM(kg_sem); @@ -233,8 +233,8 @@ int libcfs_kkuc_group_add(struct file *filp, int uid, int group, void *data) down_write(&kg_sem); if (kkuc_groups[group].next == NULL) - CFS_INIT_LIST_HEAD(&kkuc_groups[group]); - cfs_list_add(®->kr_chain, &kkuc_groups[group]); + INIT_LIST_HEAD(&kkuc_groups[group]); + list_add(®->kr_chain, &kkuc_groups[group]); up_write(&kg_sem); CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group); @@ -263,12 +263,12 @@ int libcfs_kkuc_group_rem(int uid, int group, void **pdata) } down_write(&kg_sem); - cfs_list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) { - if ((uid == 0) || (uid == reg->kr_uid)) { - cfs_list_del(®->kr_chain); - CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n", - reg->kr_uid, reg->kr_fp, group); - if (reg->kr_fp != NULL) + list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) { + if ((uid == 0) || (uid == reg->kr_uid)) { + list_del(®->kr_chain); + CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n", + reg->kr_uid, reg->kr_fp, group); + if (reg->kr_fp != NULL) fput(reg->kr_fp); if (pdata != NULL) *pdata = reg->kr_data; @@ -277,7 +277,7 @@ int libcfs_kkuc_group_rem(int uid, int group, void **pdata) } up_write(&kg_sem); - RETURN(0); + RETURN(0); } EXPORT_SYMBOL(libcfs_kkuc_group_rem); @@ -289,7 +289,7 @@ int libcfs_kkuc_group_put(int group, void *payload) ENTRY; down_read(&kg_sem); - cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { + list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { if (reg->kr_fp != NULL) { rc = libcfs_kkuc_msg_put(reg->kr_fp, payload); if (rc == 0) @@ -324,24 +324,24 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, int rc = 0; ENTRY; - if (group > KUC_GRP_MAX) { - CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); - RETURN(-EINVAL); - } + if (group > KUC_GRP_MAX) { + CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); + RETURN(-EINVAL); + } - /* no link for this group */ - if (kkuc_groups[group].next == NULL) - RETURN(0); + /* no link for this group */ + if (kkuc_groups[group].next == NULL) + RETURN(0); down_read(&kg_sem); - cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { - if (reg->kr_fp != NULL) { - rc = cb_func(reg->kr_data, cb_arg); - } - } + list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { + if (reg->kr_fp != NULL) { + rc = cb_func(reg->kr_data, cb_arg); + } + } up_read(&kg_sem); - RETURN(rc); + RETURN(rc); } EXPORT_SYMBOL(libcfs_kkuc_group_foreach); diff --git a/libcfs/libcfs/libcfs_string.c b/libcfs/libcfs/libcfs_string.c index 73276e7..be26f93 100644 --- a/libcfs/libcfs/libcfs_string.c +++ b/libcfs/libcfs/libcfs_string.c @@ -402,7 +402,7 @@ cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list) { struct cfs_range_expr *expr; - cfs_list_for_each_entry(expr, &expr_list->el_exprs, re_link) { + list_for_each_entry(expr, &expr_list->el_exprs, re_link) { if (value >= expr->re_lo && value <= expr->re_hi && ((value - expr->re_lo) % expr->re_stride) == 0) return 1; @@ -427,7 +427,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp) int count = 0; int i; - cfs_list_for_each_entry(expr, &expr_list->el_exprs, re_link) { + list_for_each_entry(expr, &expr_list->el_exprs, re_link) { for (i = expr->re_lo; i <= expr->re_hi; i++) { if (((i - expr->re_lo) % expr->re_stride) == 0) count++; @@ -448,7 +448,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp) return -ENOMEM; count = 0; - cfs_list_for_each_entry(expr, &expr_list->el_exprs, re_link) { + list_for_each_entry(expr, &expr_list->el_exprs, re_link) { for (i = expr->re_lo; i <= expr->re_hi; i++) { if (((i - expr->re_lo) % expr->re_stride) == 0) val[count++] = i; @@ -468,12 +468,12 @@ EXPORT_SYMBOL(cfs_expr_list_values); void cfs_expr_list_free(struct cfs_expr_list *expr_list) { - while (!cfs_list_empty(&expr_list->el_exprs)) { + while (!list_empty(&expr_list->el_exprs)) { struct cfs_range_expr *expr; - expr = cfs_list_entry(expr_list->el_exprs.next, + expr = list_entry(expr_list->el_exprs.next, struct cfs_range_expr, re_link), - cfs_list_del(&expr->re_link); + list_del(&expr->re_link); LIBCFS_FREE(expr, sizeof(*expr)); } @@ -486,7 +486,7 @@ cfs_expr_list_print(struct cfs_expr_list *expr_list) { struct cfs_range_expr *expr; - cfs_list_for_each_entry(expr, &expr_list->el_exprs, re_link) { + list_for_each_entry(expr, &expr_list->el_exprs, re_link) { CDEBUG(D_WARNING, "%d-%d/%d\n", expr->re_lo, expr->re_hi, expr->re_stride); } @@ -515,7 +515,7 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, src.ls_str = str; src.ls_len = len; - CFS_INIT_LIST_HEAD(&expr_list->el_exprs); + INIT_LIST_HEAD(&expr_list->el_exprs); if (src.ls_str[0] == '[' && src.ls_str[src.ls_len - 1] == ']') { @@ -535,13 +535,13 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, if (rc != 0) break; - cfs_list_add_tail(&expr->re_link, + list_add_tail(&expr->re_link, &expr_list->el_exprs); } } else { rc = cfs_range_expr_parse(&src, min, max, 0, &expr); if (rc == 0) { - cfs_list_add_tail(&expr->re_link, + list_add_tail(&expr->re_link, &expr_list->el_exprs); } } @@ -564,21 +564,21 @@ EXPORT_SYMBOL(cfs_expr_list_parse); * \retval none */ void -cfs_expr_list_free_list(cfs_list_t *list) +cfs_expr_list_free_list(struct list_head *list) { struct cfs_expr_list *el; - while (!cfs_list_empty(list)) { - el = cfs_list_entry(list->next, + while (!list_empty(list)) { + el = list_entry(list->next, struct cfs_expr_list, el_link); - cfs_list_del(&el->el_link); + list_del(&el->el_link); cfs_expr_list_free(el); } } EXPORT_SYMBOL(cfs_expr_list_free_list); int -cfs_ip_addr_parse(char *str, int len, cfs_list_t *list) +cfs_ip_addr_parse(char *str, int len, struct list_head *list) { struct cfs_expr_list *el; struct cfs_lstr src; @@ -601,7 +601,7 @@ cfs_ip_addr_parse(char *str, int len, cfs_list_t *list) if (rc != 0) goto out; - cfs_list_add_tail(&el->el_link, list); + list_add_tail(&el->el_link, list); i++; } @@ -623,12 +623,12 @@ EXPORT_SYMBOL(cfs_ip_addr_parse); * \retval 0 otherwise */ int -cfs_ip_addr_match(__u32 addr, cfs_list_t *list) +cfs_ip_addr_match(__u32 addr, struct list_head *list) { struct cfs_expr_list *el; int i = 0; - cfs_list_for_each_entry_reverse(el, list, el_link) { + list_for_each_entry_reverse(el, list, el_link) { if (!cfs_expr_list_match(addr & 0xff, el)) return 0; addr >>= 8; @@ -640,7 +640,7 @@ cfs_ip_addr_match(__u32 addr, cfs_list_t *list) EXPORT_SYMBOL(cfs_ip_addr_match); void -cfs_ip_addr_free(cfs_list_t *list) +cfs_ip_addr_free(struct list_head *list) { cfs_expr_list_free_list(list); } diff --git a/libcfs/libcfs/linux/linux-cpu.c b/libcfs/libcfs/linux/linux-cpu.c index 18bbcfb..b685296 100644 --- a/libcfs/libcfs/linux/linux-cpu.c +++ b/libcfs/libcfs/linux/linux-cpu.c @@ -958,7 +958,7 @@ cfs_cpt_table_create_pattern(char *pattern) goto failed; } - cfs_list_for_each_entry(range, &el->el_exprs, re_link) { + list_for_each_entry(range, &el->el_exprs, re_link) { for (i = range->re_lo; i <= range->re_hi; i++) { if ((i - range->re_lo) % range->re_stride != 0) continue; diff --git a/libcfs/libcfs/lwt.c b/libcfs/libcfs/lwt.c index 48bf56b..39e6743 100644 --- a/libcfs/libcfs/lwt.c +++ b/libcfs/libcfs/lwt.c @@ -114,13 +114,13 @@ lwt_control (int enable, int clear) if (!clear) continue; - for (j = 0; j < lwt_pages_per_cpu; j++) { + for (j = 0; j < lwt_pages_per_cpu; j++) { memset(p->lwtp_events, 0, PAGE_CACHE_SIZE); - p = cfs_list_entry (p->lwtp_list.next, - lwt_page_t, lwtp_list); - } - } + p = list_entry(p->lwtp_list.next, + lwt_page_t, lwtp_list); + } + } if (enable) { lwt_enabled = 1; @@ -162,13 +162,12 @@ lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *total_size, bytes_per_page)) return -EFAULT; - user_ptr = ((char *)user_ptr) + bytes_per_page; - p = cfs_list_entry(p->lwtp_list.next, - lwt_page_t, lwtp_list); - } - } - - return (0); + user_ptr = ((char *)user_ptr) + bytes_per_page; + p = list_entry(p->lwtp_list.next, + lwt_page_t, lwtp_list); + } + } + return (0); } int lwt_init () @@ -210,11 +209,11 @@ int lwt_init () memset(lwtp->lwtp_events, 0, PAGE_CACHE_SIZE); if (j == 0) { - CFS_INIT_LIST_HEAD (&lwtp->lwtp_list); + INIT_LIST_HEAD (&lwtp->lwtp_list); lwt_cpus[i].lwtc_current_page = lwtp; } else { - cfs_list_add (&lwtp->lwtp_list, - &lwt_cpus[i].lwtc_current_page->lwtp_list); + list_add(&lwtp->lwtp_list, + &lwt_cpus[i].lwtc_current_page->lwtp_list); } } @@ -236,19 +235,17 @@ void lwt_fini () while (lwt_cpus[i].lwtc_current_page != NULL) { lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page; - if (cfs_list_empty (&lwtp->lwtp_list)) { - lwt_cpus[i].lwtc_current_page = NULL; - } else { - lwt_cpus[i].lwtc_current_page = - cfs_list_entry (lwtp->lwtp_list.next, - lwt_page_t, lwtp_list); - - cfs_list_del (&lwtp->lwtp_list); - } - - __free_page (lwtp->lwtp_page); - LIBCFS_FREE (lwtp, sizeof (*lwtp)); - } + if (list_empty (&lwtp->lwtp_list)) { + lwt_cpus[i].lwtc_current_page = NULL; + } else { + lwt_cpus[i].lwtc_current_page = + list_entry(lwtp->lwtp_list.next, + lwt_page_t, lwtp_list); + list_del (&lwtp->lwtp_list); + } + __free_page (lwtp->lwtp_page); + LIBCFS_FREE (lwtp, sizeof (*lwtp)); + } } EXPORT_SYMBOL(lwt_enabled); diff --git a/libcfs/libcfs/module.c b/libcfs/libcfs/module.c index 20c63be..e474285 100644 --- a/libcfs/libcfs/module.c +++ b/libcfs/libcfs/module.c @@ -185,35 +185,35 @@ static int libcfs_psdev_release(unsigned long flags, void *args) } static struct rw_semaphore ioctl_list_sem; -static cfs_list_t ioctl_list; +static struct list_head ioctl_list; int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand) { - int rc = 0; + int rc = 0; down_write(&ioctl_list_sem); - if (!cfs_list_empty(&hand->item)) - rc = -EBUSY; - else - cfs_list_add_tail(&hand->item, &ioctl_list); + if (!list_empty(&hand->item)) + rc = -EBUSY; + else + list_add_tail(&hand->item, &ioctl_list); up_write(&ioctl_list_sem); - return rc; + return rc; } EXPORT_SYMBOL(libcfs_register_ioctl); int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand) { - int rc = 0; + int rc = 0; down_write(&ioctl_list_sem); - if (cfs_list_empty(&hand->item)) - rc = -ENOENT; - else - cfs_list_del_init(&hand->item); + if (list_empty(&hand->item)) + rc = -ENOENT; + else + list_del_init(&hand->item); up_write(&ioctl_list_sem); - return rc; + return rc; } EXPORT_SYMBOL(libcfs_deregister_ioctl); @@ -304,25 +304,25 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd, } default: { - struct libcfs_ioctl_handler *hand; - err = -EINVAL; + struct libcfs_ioctl_handler *hand; + + err = -EINVAL; down_read(&ioctl_list_sem); - cfs_list_for_each_entry_typed(hand, &ioctl_list, - struct libcfs_ioctl_handler, item) { - err = hand->handle_ioctl(cmd, data); - if (err != -EINVAL) { - if (err == 0) - err = libcfs_ioctl_popdata(arg, - data, sizeof (*data)); - break; - } - } + list_for_each_entry(hand, &ioctl_list, item) { + err = hand->handle_ioctl(cmd, data); + if (err != -EINVAL) { + if (err == 0) + err = libcfs_ioctl_popdata(arg, + data, sizeof (*data)); + break; + } + } up_read(&ioctl_list_sem); - break; - } - } + break; + } + } - RETURN(err); + RETURN(err); } static int libcfs_ioctl(struct cfs_psdev_file *pfile, @@ -384,7 +384,7 @@ static int init_libcfs_module(void) init_rwsem(&cfs_tracefile_sem); mutex_init(&cfs_trace_thread_mutex); init_rwsem(&ioctl_list_sem); - CFS_INIT_LIST_HEAD(&ioctl_list); + INIT_LIST_HEAD(&ioctl_list); init_waitqueue_head(&cfs_race_waitq); rc = libcfs_debug_init(5 * 1024 * 1024); diff --git a/libcfs/libcfs/nidstrings.c b/libcfs/libcfs/nidstrings.c index b49376b..75b6ff3 100644 --- a/libcfs/libcfs/nidstrings.c +++ b/libcfs/libcfs/nidstrings.c @@ -101,18 +101,18 @@ static int libcfs_ip_str2addr(const char *str, int nob, __u32 *addr); static void libcfs_decnum_addr2str(__u32 addr, char *str); static void libcfs_hexnum_addr2str(__u32 addr, char *str); static int libcfs_num_str2addr(const char *str, int nob, __u32 *addr); -static int libcfs_num_parse(char *str, int len, cfs_list_t *list); -static int libcfs_num_match(__u32 addr, cfs_list_t *list); +static int libcfs_num_parse(char *str, int len, struct list_head *list); +static int libcfs_num_match(__u32 addr, struct list_head *list); struct netstrfns { - int nf_type; - char *nf_name; - char *nf_modname; - void (*nf_addr2str)(__u32 addr, char *str); - int (*nf_str2addr)(const char *str, int nob, __u32 *addr); - int (*nf_parse_addrlist)(char *str, int len, - cfs_list_t *list); - int (*nf_match_addr)(__u32 addr, cfs_list_t *list); + int nf_type; + char *nf_name; + char *nf_modname; + void (*nf_addr2str)(__u32 addr, char *str); + int (*nf_str2addr)(const char *str, int nob, __u32 *addr); + int (*nf_parse_addrlist)(char *str, int len, + struct list_head *list); + int (*nf_match_addr)(__u32 addr, struct list_head *list); }; static struct netstrfns libcfs_netstrfns[] = { @@ -587,41 +587,41 @@ libcfs_str2anynid(lnet_nid_t *nidp, const char *str) * One of this is created for each \ parsed. */ struct nidrange { - /** - * Link to list of this structures which is built on nid range - * list parsing. - */ - cfs_list_t nr_link; - /** - * List head for addrrange::ar_link. - */ - cfs_list_t nr_addrranges; - /** - * Flag indicating that *@ is found. - */ - int nr_all; - /** - * Pointer to corresponding element of libcfs_netstrfns. - */ - struct netstrfns *nr_netstrfns; - /** - * Number of network. E.g. 5 if \ is "elan5". - */ - int nr_netnum; + /** + * Link to list of this structures which is built on nid range + * list parsing. + */ + struct list_head nr_link; + /** + * List head for addrrange::ar_link. + */ + struct list_head nr_addrranges; + /** + * Flag indicating that *@ is found. + */ + int nr_all; + /** + * Pointer to corresponding element of libcfs_netstrfns. + */ + struct netstrfns *nr_netstrfns; + /** + * Number of network. E.g. 5 if \ is "elan5". + */ + int nr_netnum; }; /** * Structure to represent \ token of the syntax. */ struct addrrange { - /** - * Link to nidrange::nr_addrranges. - */ - cfs_list_t ar_link; - /** + /** + * Link to nidrange::nr_addrranges. + */ + struct list_head ar_link; + /** * List head for cfs_expr_list::el_list. - */ - cfs_list_t ar_numaddr_ranges; + */ + struct list_head ar_numaddr_ranges; }; /** @@ -633,14 +633,14 @@ struct addrrange { * \retval errno otherwise */ static int -libcfs_num_parse(char *str, int len, cfs_list_t *list) +libcfs_num_parse(char *str, int len, struct list_head *list) { struct cfs_expr_list *el; int rc; rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el); if (rc == 0) - cfs_list_add_tail(&el->el_link, list); + list_add_tail(&el->el_link, list); return rc; } @@ -657,22 +657,22 @@ libcfs_num_parse(char *str, int len, cfs_list_t *list) static int parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange) { - struct addrrange *addrrange; + struct addrrange *addrrange; - if (src->ls_len == 1 && src->ls_str[0] == '*') { - nidrange->nr_all = 1; - return 1; - } + if (src->ls_len == 1 && src->ls_str[0] == '*') { + nidrange->nr_all = 1; + return 1; + } - LIBCFS_ALLOC(addrrange, sizeof(struct addrrange)); - if (addrrange == NULL) - return 0; - cfs_list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges); - CFS_INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges); + LIBCFS_ALLOC(addrrange, sizeof(struct addrrange)); + if (addrrange == NULL) + return 0; + list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges); + INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges); - return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str, - src->ls_len, - &addrrange->ar_numaddr_ranges); + return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str, + src->ls_len, + &addrrange->ar_numaddr_ranges); } /** @@ -687,7 +687,7 @@ parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange) */ static struct nidrange * add_nidrange(const struct cfs_lstr *src, - cfs_list_t *nidlist) + struct list_head *nidlist) { struct netstrfns *nf; struct nidrange *nr; @@ -713,24 +713,24 @@ add_nidrange(const struct cfs_lstr *src, return NULL; } - cfs_list_for_each_entry(nr, nidlist, nr_link) { - if (nr->nr_netstrfns != nf) - continue; - if (nr->nr_netnum != netnum) - continue; - return nr; - } + list_for_each_entry(nr, nidlist, nr_link) { + if (nr->nr_netstrfns != nf) + continue; + if (nr->nr_netnum != netnum) + continue; + return nr; + } - LIBCFS_ALLOC(nr, sizeof(struct nidrange)); - if (nr == NULL) - return NULL; - cfs_list_add_tail(&nr->nr_link, nidlist); - CFS_INIT_LIST_HEAD(&nr->nr_addrranges); - nr->nr_netstrfns = nf; - nr->nr_all = 0; - nr->nr_netnum = netnum; + LIBCFS_ALLOC(nr, sizeof(struct nidrange)); + if (nr == NULL) + return NULL; + list_add_tail(&nr->nr_link, nidlist); + INIT_LIST_HEAD(&nr->nr_addrranges); + nr->nr_netstrfns = nf; + nr->nr_all = 0; + nr->nr_netnum = netnum; - return nr; + return nr; } /** @@ -740,7 +740,7 @@ add_nidrange(const struct cfs_lstr *src, * \retval 0 otherwise */ static int -parse_nidrange(struct cfs_lstr *src, cfs_list_t *nidlist) +parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist) { struct cfs_lstr addrrange; struct cfs_lstr net; @@ -776,15 +776,15 @@ parse_nidrange(struct cfs_lstr *src, cfs_list_t *nidlist) * \retval none */ static void -free_addrranges(cfs_list_t *list) +free_addrranges(struct list_head *list) { - while (!cfs_list_empty(list)) { + while (!list_empty(list)) { struct addrrange *ar; - ar = cfs_list_entry(list->next, struct addrrange, ar_link); + ar = list_entry(list->next, struct addrrange, ar_link); cfs_expr_list_free_list(&ar->ar_numaddr_ranges); - cfs_list_del(&ar->ar_link); + list_del(&ar->ar_link); LIBCFS_FREE(ar, sizeof(struct addrrange)); } } @@ -798,17 +798,17 @@ free_addrranges(cfs_list_t *list) * \retval none */ void -cfs_free_nidlist(cfs_list_t *list) +cfs_free_nidlist(struct list_head *list) { - cfs_list_t *pos, *next; - struct nidrange *nr; - - cfs_list_for_each_safe(pos, next, list) { - nr = cfs_list_entry(pos, struct nidrange, nr_link); - free_addrranges(&nr->nr_addrranges); - cfs_list_del(pos); - LIBCFS_FREE(nr, sizeof(struct nidrange)); - } + struct list_head *pos, *next; + struct nidrange *nr; + + list_for_each_safe(pos, next, list) { + nr = list_entry(pos, struct nidrange, nr_link); + free_addrranges(&nr->nr_addrranges); + list_del(pos); + LIBCFS_FREE(nr, sizeof(struct nidrange)); + } } /** @@ -825,29 +825,29 @@ cfs_free_nidlist(cfs_list_t *list) * \retval 0 otherwise */ int -cfs_parse_nidlist(char *str, int len, cfs_list_t *nidlist) +cfs_parse_nidlist(char *str, int len, struct list_head *nidlist) { struct cfs_lstr src; struct cfs_lstr res; - int rc; - ENTRY; + int rc; + ENTRY; - src.ls_str = str; - src.ls_len = len; - CFS_INIT_LIST_HEAD(nidlist); - while (src.ls_str) { + src.ls_str = str; + src.ls_len = len; + INIT_LIST_HEAD(nidlist); + while (src.ls_str) { rc = cfs_gettok(&src, ' ', &res); - if (rc == 0) { - cfs_free_nidlist(nidlist); - RETURN(0); - } - rc = parse_nidrange(&res, nidlist); - if (rc == 0) { - cfs_free_nidlist(nidlist); - RETURN(0); - } - } - RETURN(1); + if (rc == 0) { + cfs_free_nidlist(nidlist); + RETURN(0); + } + rc = parse_nidrange(&res, nidlist); + if (rc == 0) { + cfs_free_nidlist(nidlist); + RETURN(0); + } + } + RETURN(1); } /* @@ -857,12 +857,12 @@ cfs_parse_nidlist(char *str, int len, cfs_list_t *nidlist) * \retval 0 otherwise */ static int -libcfs_num_match(__u32 addr, cfs_list_t *numaddr) +libcfs_num_match(__u32 addr, struct list_head *numaddr) { struct cfs_expr_list *el; - LASSERT(!cfs_list_empty(numaddr)); - el = cfs_list_entry(numaddr->next, struct cfs_expr_list, el_link); + LASSERT(!list_empty(numaddr)); + el = list_entry(numaddr->next, struct cfs_expr_list, el_link); return cfs_expr_list_match(addr, el); } @@ -875,25 +875,25 @@ libcfs_num_match(__u32 addr, cfs_list_t *numaddr) * \retval 1 on match * \retval 0 otherwises */ -int cfs_match_nid(lnet_nid_t nid, cfs_list_t *nidlist) +int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist) { - struct nidrange *nr; - struct addrrange *ar; - ENTRY; - - cfs_list_for_each_entry(nr, nidlist, nr_link) { - if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid))) - continue; - if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid))) - continue; - if (nr->nr_all) - RETURN(1); - cfs_list_for_each_entry(ar, &nr->nr_addrranges, ar_link) - if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid), - &ar->ar_numaddr_ranges)) - RETURN(1); - } - RETURN(0); + struct nidrange *nr; + struct addrrange *ar; + ENTRY; + + list_for_each_entry(nr, nidlist, nr_link) { + if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid))) + continue; + if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid))) + continue; + if (nr->nr_all) + RETURN(1); + list_for_each_entry(ar, &nr->nr_addrranges, ar_link) + if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid), + &ar->ar_numaddr_ranges)) + RETURN(1); + } + RETURN(0); } #ifdef __KERNEL__ diff --git a/libcfs/libcfs/tracefile.c b/libcfs/libcfs/tracefile.c index e15ec68..0f65ae7 100644 --- a/libcfs/libcfs/tracefile.c +++ b/libcfs/libcfs/tracefile.c @@ -58,12 +58,12 @@ static int thread_running = 0; atomic_t cfs_tage_allocated = ATOMIC_INIT(0); static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct cfs_trace_cpu_data *tcd); + struct cfs_trace_cpu_data *tcd); static inline struct cfs_trace_page * -cfs_tage_from_list(cfs_list_t *list) +cfs_tage_from_list(struct list_head *list) { - return cfs_list_entry(list, struct cfs_trace_page, linkage); + return list_entry(list, struct cfs_trace_page, linkage); } static struct cfs_trace_page *cfs_tage_alloc(int gfp) @@ -106,33 +106,33 @@ static void cfs_tage_free(struct cfs_trace_page *tage) } static void cfs_tage_to_tail(struct cfs_trace_page *tage, - cfs_list_t *queue) + struct list_head *queue) { - __LASSERT(tage != NULL); - __LASSERT(queue != NULL); + __LASSERT(tage != NULL); + __LASSERT(queue != NULL); - cfs_list_move_tail(&tage->linkage, queue); + list_move_tail(&tage->linkage, queue); } int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp, - cfs_list_t *stock) + struct list_head *stock) { - int i; + int i; - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ + /* + * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ - for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) { - struct cfs_trace_page *tage; + for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) { + struct cfs_trace_page *tage; - tage = cfs_tage_alloc(gfp); - if (tage == NULL) - break; - cfs_list_add_tail(&tage->linkage, stock); - } - return i; + tage = cfs_tage_alloc(gfp); + if (tage == NULL) + break; + list_add_tail(&tage->linkage, stock); + } + return i; } /* return a page that has 'len' bytes left at the end */ @@ -142,17 +142,17 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) struct cfs_trace_page *tage; if (tcd->tcd_cur_pages > 0) { - __LASSERT(!cfs_list_empty(&tcd->tcd_pages)); + __LASSERT(!list_empty(&tcd->tcd_pages)); tage = cfs_tage_from_list(tcd->tcd_pages.prev); if (tage->used + len <= PAGE_CACHE_SIZE) return tage; } - if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { + if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { if (tcd->tcd_cur_stock_pages > 0) { tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); --tcd->tcd_cur_stock_pages; - cfs_list_del_init(&tage->linkage); + list_del_init(&tage->linkage); } else { tage = cfs_tage_alloc(GFP_ATOMIC); if (unlikely(tage == NULL)) { @@ -168,7 +168,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) tage->used = 0; tage->cpu = smp_processor_id(); tage->type = tcd->tcd_type; - cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages); + list_add_tail(&tage->linkage, &tcd->tcd_pages); tcd->tcd_cur_pages++; if (tcd->tcd_cur_pages > 8 && thread_running) { @@ -185,32 +185,31 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd) { - int pgcount = tcd->tcd_cur_pages / 10; - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; + int pgcount = tcd->tcd_cur_pages / 10; + struct page_collection pc; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ + /* + * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ if (printk_ratelimit()) printk(KERN_WARNING "debug daemon buffer overflowed; " - "discarding 10%% of pages (%d of %ld)\n", - pgcount + 1, tcd->tcd_cur_pages); + "discarding 10%% of pages (%d of %ld)\n", + pgcount + 1, tcd->tcd_cur_pages); - CFS_INIT_LIST_HEAD(&pc.pc_pages); + INIT_LIST_HEAD(&pc.pc_pages); - cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages, - struct cfs_trace_page, linkage) { - if (pgcount-- == 0) - break; + list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { + if (pgcount-- == 0) + break; - cfs_list_move_tail(&tage->linkage, &pc.pc_pages); - tcd->tcd_cur_pages--; - } - put_pages_on_tcd_daemon_list(&pc, tcd); + list_move_tail(&tage->linkage, &pc.pc_pages); + tcd->tcd_cur_pages--; + } + put_pages_on_tcd_daemon_list(&pc, tcd); } /* return a page that has 'len' bytes left at the end */ @@ -502,25 +501,25 @@ cfs_trace_assertion_failed(const char *str, static void panic_collect_pages(struct page_collection *pc) { - /* Do the collect_pages job on a single CPU: assumes that all other - * CPUs have been stopped during a panic. If this isn't true for some - * arch, this will have to be implemented separately in each arch. */ - int i; - int j; - struct cfs_trace_cpu_data *tcd; + /* Do the collect_pages job on a single CPU: assumes that all other + * CPUs have been stopped during a panic. If this isn't true for some + * arch, this will have to be implemented separately in each arch. */ + int i; + int j; + struct cfs_trace_cpu_data *tcd; - CFS_INIT_LIST_HEAD(&pc->pc_pages); + INIT_LIST_HEAD(&pc->pc_pages); - cfs_tcd_for_each(tcd, i, j) { - cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; + cfs_tcd_for_each(tcd, i, j) { + list_splice_init(&tcd->tcd_pages, &pc->pc_pages); + tcd->tcd_cur_pages = 0; - if (pc->pc_want_daemon_pages) { - cfs_list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } + if (pc->pc_want_daemon_pages) { + list_splice_init(&tcd->tcd_daemon_pages, + &pc->pc_pages); + tcd->tcd_cur_daemon_pages = 0; + } + } } static void collect_pages_on_all_cpus(struct page_collection *pc) @@ -528,33 +527,33 @@ static void collect_pages_on_all_cpus(struct page_collection *pc) struct cfs_trace_cpu_data *tcd; int i, cpu; - cfs_for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - if (pc->pc_want_daemon_pages) { - cfs_list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } - } + cfs_for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { + list_splice_init(&tcd->tcd_pages, &pc->pc_pages); + tcd->tcd_cur_pages = 0; + if (pc->pc_want_daemon_pages) { + list_splice_init(&tcd->tcd_daemon_pages, + &pc->pc_pages); + tcd->tcd_cur_daemon_pages = 0; + } + } + } } static void collect_pages(struct page_collection *pc) { - CFS_INIT_LIST_HEAD(&pc->pc_pages); + INIT_LIST_HEAD(&pc->pc_pages); - if (libcfs_panic_in_progress) - panic_collect_pages(pc); - else - collect_pages_on_all_cpus(pc); + if (libcfs_panic_in_progress) + panic_collect_pages(pc); + else + collect_pages_on_all_cpus(pc); } static void put_pages_back_on_all_cpus(struct page_collection *pc) { struct cfs_trace_cpu_data *tcd; - cfs_list_t *cur_head; + struct list_head *cur_head; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; int i, cpu; @@ -563,21 +562,19 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc) cfs_tcd_for_each_type_lock(tcd, i, cpu) { cur_head = tcd->tcd_pages.next; - cfs_list_for_each_entry_safe_typed(tage, tmp, - &pc->pc_pages, - struct cfs_trace_page, - linkage) { + list_for_each_entry_safe(tage, tmp, &pc->pc_pages, + linkage) { - __LASSERT_TAGE_INVARIANT(tage); + __LASSERT_TAGE_INVARIANT(tage); - if (tage->cpu != cpu || tage->type != i) - continue; + if (tage->cpu != cpu || tage->type != i) + continue; - cfs_tage_to_tail(tage, cur_head); - tcd->tcd_cur_pages++; - } - } - } + cfs_tage_to_tail(tage, cur_head); + tcd->tcd_cur_pages++; + } + } + } } static void put_pages_back(struct page_collection *pc) @@ -596,30 +593,28 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc, struct cfs_trace_page *tage; struct cfs_trace_page *tmp; - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages, - struct cfs_trace_page, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); + list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { + __LASSERT_TAGE_INVARIANT(tage); - if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) - continue; + if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) + continue; - cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages); - tcd->tcd_cur_daemon_pages++; + cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages); + tcd->tcd_cur_daemon_pages++; - if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { - struct cfs_trace_page *victim; + if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { + struct cfs_trace_page *victim; - __LASSERT(!cfs_list_empty(&tcd->tcd_daemon_pages)); - victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next); + __LASSERT(!list_empty(&tcd->tcd_daemon_pages)); + victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next); __LASSERT_TAGE_INVARIANT(victim); - cfs_list_del(&victim->linkage); - cfs_tage_free(victim); - tcd->tcd_cur_daemon_pages--; - } - } + list_del(&victim->linkage); + cfs_tage_free(victim); + tcd->tcd_cur_daemon_pages--; + } + } } static void put_pages_on_daemon_list(struct page_collection *pc) @@ -639,10 +634,9 @@ void cfs_trace_debug_print(void) struct cfs_trace_page *tage; struct cfs_trace_page *tmp; - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct cfs_trace_page, linkage) { + pc.pc_want_daemon_pages = 1; + collect_pages(&pc); + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { char *p, *file, *fn; struct page *page; @@ -666,9 +660,9 @@ void cfs_trace_debug_print(void) p += len; } - cfs_list_del(&tage->linkage); - cfs_tage_free(tage); - } + list_del(&tage->linkage); + cfs_tage_free(tage); + } } int cfs_tracefile_dump_all_pages(char *filename) @@ -694,7 +688,7 @@ int cfs_tracefile_dump_all_pages(char *filename) pc.pc_want_daemon_pages = 1; collect_pages(&pc); - if (cfs_list_empty(&pc.pc_pages)) { + if (list_empty(&pc.pc_pages)) { rc = 0; goto close; } @@ -702,8 +696,7 @@ int cfs_tracefile_dump_all_pages(char *filename) /* ok, for now, just write the pages. in the future we'll be building * iobufs with the pages and calling generic_direct_IO */ MMSPACE_OPEN; - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct cfs_trace_page, linkage) { + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { __LASSERT_TAGE_INVARIANT(tage); @@ -713,10 +706,10 @@ int cfs_tracefile_dump_all_pages(char *filename) printk(KERN_WARNING "wanted to write %u but wrote " "%d\n", tage->used, rc); put_pages_back(&pc); - __LASSERT(cfs_list_empty(&pc.pc_pages)); + __LASSERT(list_empty(&pc.pc_pages)); break; } - cfs_list_del(&tage->linkage); + list_del(&tage->linkage); cfs_tage_free(tage); } MMSPACE_CLOSE; @@ -736,16 +729,15 @@ void cfs_trace_flush_pages(void) struct cfs_trace_page *tage; struct cfs_trace_page *tmp; - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct cfs_trace_page, linkage) { + pc.pc_want_daemon_pages = 1; + collect_pages(&pc); + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - __LASSERT_TAGE_INVARIANT(tage); + __LASSERT_TAGE_INVARIANT(tage); - cfs_list_del(&tage->linkage); - cfs_tage_free(tage); - } + list_del(&tage->linkage); + cfs_tage_free(tage); + } } int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, @@ -988,7 +980,7 @@ static int tracefiled(void *arg) pc.pc_want_daemon_pages = 0; collect_pages(&pc); - if (cfs_list_empty(&pc.pc_pages)) + if (list_empty(&pc.pc_pages)) goto end_loop; filp = NULL; @@ -1007,15 +999,13 @@ static int tracefiled(void *arg) cfs_tracefile_read_unlock(); if (filp == NULL) { put_pages_on_daemon_list(&pc); - __LASSERT(cfs_list_empty(&pc.pc_pages)); + __LASSERT(list_empty(&pc.pc_pages)); goto end_loop; } MMSPACE_OPEN; - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct cfs_trace_page, - linkage) { + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { static loff_t f_pos; __LASSERT_TAGE_INVARIANT(tage); @@ -1031,14 +1021,14 @@ static int tracefiled(void *arg) printk(KERN_WARNING "wanted to write %u " "but wrote %d\n", tage->used, rc); put_pages_back(&pc); - __LASSERT(cfs_list_empty(&pc.pc_pages)); + __LASSERT(list_empty(&pc.pc_pages)); } } MMSPACE_CLOSE; filp_close(filp, NULL); put_pages_on_daemon_list(&pc); - if (!cfs_list_empty(&pc.pc_pages)) { + if (!list_empty(&pc.pc_pages)) { int i; printk(KERN_ALERT "Lustre: trace pages aren't " @@ -1053,14 +1043,14 @@ static int tracefiled(void *arg) printk(KERN_ERR "\n"); i = 0; - cfs_list_for_each_entry_safe(tage, tmp, &pc.pc_pages, + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) printk(KERN_ERR "page %d belongs to cpu " "%d\n", ++i, tage->cpu); printk(KERN_ERR "There are %d pages unwritten\n", i); } - __LASSERT(cfs_list_empty(&pc.pc_pages)); + __LASSERT(list_empty(&pc.pc_pages)); end_loop: if (atomic_read(&tctl->tctl_shutdown)) { if (last_loop == 0) { @@ -1124,64 +1114,59 @@ void cfs_trace_stop_thread(void) int cfs_tracefile_init(int max_pages) { - struct cfs_trace_cpu_data *tcd; - int i; - int j; - int rc; - int factor; - - rc = cfs_tracefile_init_arch(); - if (rc != 0) - return rc; - - cfs_tcd_for_each(tcd, i, j) { - /* tcd_pages_factor is initialized int tracefile_init_arch. */ - factor = tcd->tcd_pages_factor; - CFS_INIT_LIST_HEAD(&tcd->tcd_pages); - CFS_INIT_LIST_HEAD(&tcd->tcd_stock_pages); - CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages); - tcd->tcd_cur_pages = 0; - tcd->tcd_cur_stock_pages = 0; - tcd->tcd_cur_daemon_pages = 0; - tcd->tcd_max_pages = (max_pages * factor) / 100; - LASSERT(tcd->tcd_max_pages > 0); - tcd->tcd_shutting_down = 0; - } - - return 0; + struct cfs_trace_cpu_data *tcd; + int i; + int j; + int rc; + int factor; + + rc = cfs_tracefile_init_arch(); + if (rc != 0) + return rc; + + cfs_tcd_for_each(tcd, i, j) { + /* tcd_pages_factor is initialized int tracefile_init_arch. */ + factor = tcd->tcd_pages_factor; + INIT_LIST_HEAD(&tcd->tcd_pages); + INIT_LIST_HEAD(&tcd->tcd_stock_pages); + INIT_LIST_HEAD(&tcd->tcd_daemon_pages); + tcd->tcd_cur_pages = 0; + tcd->tcd_cur_stock_pages = 0; + tcd->tcd_cur_daemon_pages = 0; + tcd->tcd_max_pages = (max_pages * factor) / 100; + LASSERT(tcd->tcd_max_pages > 0); + tcd->tcd_shutting_down = 0; + } + return 0; } static void trace_cleanup_on_all_cpus(void) { - struct cfs_trace_cpu_data *tcd; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - int i, cpu; - - cfs_for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - tcd->tcd_shutting_down = 1; + struct cfs_trace_cpu_data *tcd; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + int i, cpu; - cfs_list_for_each_entry_safe_typed(tage, tmp, - &tcd->tcd_pages, - struct cfs_trace_page, - linkage) { - __LASSERT_TAGE_INVARIANT(tage); + cfs_for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { + tcd->tcd_shutting_down = 1; - cfs_list_del(&tage->linkage); - cfs_tage_free(tage); - } + list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { + __LASSERT_TAGE_INVARIANT(tage); - tcd->tcd_cur_pages = 0; - } - } + list_del(&tage->linkage); + cfs_tage_free(tage); + } + tcd->tcd_cur_pages = 0; + } + } } static void cfs_trace_cleanup(void) { struct page_collection pc; - CFS_INIT_LIST_HEAD(&pc.pc_pages); + INIT_LIST_HEAD(&pc.pc_pages); trace_cleanup_on_all_cpus(); diff --git a/libcfs/libcfs/tracefile.h b/libcfs/libcfs/tracefile.h index 513ee17..c7b4c42 100644 --- a/libcfs/libcfs/tracefile.h +++ b/libcfs/libcfs/tracefile.h @@ -125,9 +125,9 @@ union cfs_trace_data_union { /* * pages with trace records not yet processed by tracefiled. */ - cfs_list_t tcd_pages; + struct list_head tcd_pages; /* number of pages on ->tcd_pages */ - unsigned long tcd_cur_pages; + unsigned long tcd_cur_pages; /* * pages with trace records already processed by @@ -139,9 +139,9 @@ union cfs_trace_data_union { * (put_pages_on_daemon_list()). LRU pages from this list are * discarded when list grows too large. */ - cfs_list_t tcd_daemon_pages; + struct list_head tcd_daemon_pages; /* number of pages on ->tcd_daemon_pages */ - unsigned long tcd_cur_daemon_pages; + unsigned long tcd_cur_daemon_pages; /* * Maximal number of pages allowed on ->tcd_pages and @@ -173,7 +173,7 @@ union cfs_trace_data_union { * TCD_STOCK_PAGES pagesful are consumed by trace records all * emitted in non-blocking contexts. Which is quite unlikely. */ - cfs_list_t tcd_stock_pages; + struct list_head tcd_stock_pages; /* number of pages on ->tcd_stock_pages */ unsigned long tcd_cur_stock_pages; @@ -203,13 +203,13 @@ extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS]; /* XXX nikita: this declaration is internal to tracefile.c and should probably * be moved there */ struct page_collection { - cfs_list_t pc_pages; + struct list_head pc_pages; /* * if this flag is set, collect_pages() will spill both * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise, * only ->tcd_pages are spilled. */ - int pc_want_daemon_pages; + int pc_want_daemon_pages; }; /* XXX nikita: this declaration is internal to tracefile.c and should probably @@ -231,24 +231,24 @@ struct cfs_trace_page { /* * page itself */ - struct page *page; + struct page *page; /* * linkage into one of the lists in trace_data_union or * page_collection */ - cfs_list_t linkage; + struct list_head linkage; /* * number of bytes used within this page */ - unsigned int used; + unsigned int used; /* * cpu that owns this page */ - unsigned short cpu; + unsigned short cpu; /* * type(context) of this page */ - unsigned short type; + unsigned short type; }; extern void cfs_set_ptldebug_header(struct ptldebug_header *header, @@ -302,7 +302,7 @@ static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd) } int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp, - cfs_list_t *stock); + struct list_head *stock); int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd, diff --git a/libcfs/libcfs/upcall_cache.c b/libcfs/libcfs/upcall_cache.c index 2a3137e..e481ed7 100644 --- a/libcfs/libcfs/upcall_cache.c +++ b/libcfs/libcfs/upcall_cache.c @@ -51,7 +51,7 @@ static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache, return NULL; UC_CACHE_SET_NEW(entry); - CFS_INIT_LIST_HEAD(&entry->ue_hash); + INIT_LIST_HEAD(&entry->ue_hash); entry->ue_key = key; atomic_set(&entry->ue_refcount, 0); init_waitqueue_head(&entry->ue_waitq); @@ -64,13 +64,13 @@ static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache, static void free_entry(struct upcall_cache *cache, struct upcall_cache_entry *entry) { - if (cache->uc_ops->free_entry) - cache->uc_ops->free_entry(cache, entry); + if (cache->uc_ops->free_entry) + cache->uc_ops->free_entry(cache, entry); - cfs_list_del(&entry->ue_hash); - CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n", - entry, entry->ue_key); - LIBCFS_FREE(entry, sizeof(*entry)); + list_del(&entry->ue_hash); + CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n", + entry, entry->ue_key); + LIBCFS_FREE(entry, sizeof(*entry)); } static inline int upcall_compare(struct upcall_cache *cache, @@ -132,7 +132,7 @@ static int check_unlink_entry(struct upcall_cache *cache, UC_CACHE_SET_EXPIRED(entry); } - cfs_list_del_init(&entry->ue_hash); + list_del_init(&entry->ue_hash); if (!atomic_read(&entry->ue_refcount)) free_entry(cache, entry); return 1; @@ -149,48 +149,48 @@ struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache, __u64 key, void *args) { struct upcall_cache_entry *entry = NULL, *new = NULL, *next; - cfs_list_t *head; + struct list_head *head; wait_queue_t wait; int rc, found; ENTRY; LASSERT(cache); - head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)]; + head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)]; find_again: - found = 0; + found = 0; spin_lock(&cache->uc_lock); - cfs_list_for_each_entry_safe(entry, next, head, ue_hash) { - /* check invalid & expired items */ - if (check_unlink_entry(cache, entry)) - continue; - if (upcall_compare(cache, entry, key, args) == 0) { - found = 1; - break; - } - } + list_for_each_entry_safe(entry, next, head, ue_hash) { + /* check invalid & expired items */ + if (check_unlink_entry(cache, entry)) + continue; + if (upcall_compare(cache, entry, key, args) == 0) { + found = 1; + break; + } + } - if (!found) { - if (!new) { + if (!found) { + if (!new) { spin_unlock(&cache->uc_lock); - new = alloc_entry(cache, key, args); - if (!new) { - CERROR("fail to alloc entry\n"); - RETURN(ERR_PTR(-ENOMEM)); - } - goto find_again; - } else { - cfs_list_add(&new->ue_hash, head); - entry = new; - } - } else { - if (new) { - free_entry(cache, new); - new = NULL; - } - cfs_list_move(&entry->ue_hash, head); - } - get_entry(entry); + new = alloc_entry(cache, key, args); + if (!new) { + CERROR("fail to alloc entry\n"); + RETURN(ERR_PTR(-ENOMEM)); + } + goto find_again; + } else { + list_add(&new->ue_hash, head); + entry = new; + } + } else { + if (new) { + free_entry(cache, new); + new = NULL; + } + list_move(&entry->ue_hash, head); + } + get_entry(entry); /* acquire for new one */ if (UC_CACHE_IS_NEW(entry)) { @@ -291,23 +291,23 @@ EXPORT_SYMBOL(upcall_cache_put_entry); int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key, void *args) { - struct upcall_cache_entry *entry = NULL; - cfs_list_t *head; - int found = 0, rc = 0; - ENTRY; + struct upcall_cache_entry *entry = NULL; + struct list_head *head; + int found = 0, rc = 0; + ENTRY; - LASSERT(cache); + LASSERT(cache); - head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)]; + head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)]; spin_lock(&cache->uc_lock); - cfs_list_for_each_entry(entry, head, ue_hash) { - if (downcall_compare(cache, entry, key, args) == 0) { - found = 1; - get_entry(entry); - break; - } - } + list_for_each_entry(entry, head, ue_hash) { + if (downcall_compare(cache, entry, key, args) == 0) { + found = 1; + get_entry(entry); + break; + } + } if (!found) { CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n", @@ -347,11 +347,11 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key, CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n", cache->uc_name, entry, entry->ue_key); out: - if (rc) { - UC_CACHE_SET_INVALID(entry); - cfs_list_del_init(&entry->ue_hash); - } - UC_CACHE_CLEAR_ACQUIRING(entry); + if (rc) { + UC_CACHE_SET_INVALID(entry); + list_del_init(&entry->ue_hash); + } + UC_CACHE_CLEAR_ACQUIRING(entry); spin_unlock(&cache->uc_lock); wake_up_all(&entry->ue_waitq); put_entry(cache, entry); @@ -368,7 +368,7 @@ static void cache_flush(struct upcall_cache *cache, int force) spin_lock(&cache->uc_lock); for (i = 0; i < UC_CACHE_HASH_SIZE; i++) { - cfs_list_for_each_entry_safe(entry, next, + list_for_each_entry_safe(entry, next, &cache->uc_hashtable[i], ue_hash) { if (!force && atomic_read(&entry->ue_refcount)) { UC_CACHE_SET_EXPIRED(entry); @@ -384,19 +384,19 @@ static void cache_flush(struct upcall_cache *cache, int force) void upcall_cache_flush_idle(struct upcall_cache *cache) { - cache_flush(cache, 0); + cache_flush(cache, 0); } EXPORT_SYMBOL(upcall_cache_flush_idle); void upcall_cache_flush_all(struct upcall_cache *cache) { - cache_flush(cache, 1); + cache_flush(cache, 1); } EXPORT_SYMBOL(upcall_cache_flush_all); void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args) { - cfs_list_t *head; + struct list_head *head; struct upcall_cache_entry *entry; int found = 0; ENTRY; @@ -404,7 +404,7 @@ void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args) head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)]; spin_lock(&cache->uc_lock); - cfs_list_for_each_entry(entry, head, ue_hash) { + list_for_each_entry(entry, head, ue_hash) { if (upcall_compare(cache, entry, key, args) == 0) { found = 1; break; @@ -429,26 +429,26 @@ EXPORT_SYMBOL(upcall_cache_flush_one); struct upcall_cache *upcall_cache_init(const char *name, const char *upcall, struct upcall_cache_ops *ops) { - struct upcall_cache *cache; - int i; - ENTRY; + struct upcall_cache *cache; + int i; + ENTRY; - LIBCFS_ALLOC(cache, sizeof(*cache)); - if (!cache) - RETURN(ERR_PTR(-ENOMEM)); + LIBCFS_ALLOC(cache, sizeof(*cache)); + if (!cache) + RETURN(ERR_PTR(-ENOMEM)); spin_lock_init(&cache->uc_lock); rwlock_init(&cache->uc_upcall_rwlock); for (i = 0; i < UC_CACHE_HASH_SIZE; i++) - CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]); - strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1); - /* upcall pathname proc tunable */ - strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1); - cache->uc_entry_expire = 20 * 60; - cache->uc_acquire_expire = 30; - cache->uc_ops = ops; - - RETURN(cache); + INIT_LIST_HEAD(&cache->uc_hashtable[i]); + strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1); + /* upcall pathname proc tunable */ + strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1); + cache->uc_entry_expire = 20 * 60; + cache->uc_acquire_expire = 30; + cache->uc_ops = ops; + + RETURN(cache); } EXPORT_SYMBOL(upcall_cache_init); diff --git a/libcfs/libcfs/user-prim.c b/libcfs/libcfs/user-prim.c index 80bec14..1a6fdd9 100644 --- a/libcfs/libcfs/user-prim.c +++ b/libcfs/libcfs/user-prim.c @@ -168,12 +168,12 @@ void cond_resched(void) void cfs_init_timer(struct timer_list *t) { - CFS_INIT_LIST_HEAD(&t->tl_list); + INIT_LIST_HEAD(&t->tl_list); } void cfs_timer_init(struct timer_list *l, cfs_timer_func_t *func, void *arg) { - CFS_INIT_LIST_HEAD(&l->tl_list); + INIT_LIST_HEAD(&l->tl_list); l->function = func; l->data = (ulong_ptr_t)arg; return; diff --git a/libcfs/libcfs/watchdog.c b/libcfs/libcfs/watchdog.c index 3a97431..ed1acf7 100644 --- a/libcfs/libcfs/watchdog.c +++ b/libcfs/libcfs/watchdog.c @@ -44,16 +44,16 @@ #include "tracefile.h" struct lc_watchdog { - spinlock_t lcw_lock; /* check or change lcw_list */ - int lcw_refcount; /* must hold lcw_pending_timers_lock */ - struct timer_list lcw_timer; /* kernel timer */ - cfs_list_t lcw_list; /* chain on pending list */ - cfs_time_t lcw_last_touched; /* last touched stamp */ - struct task_struct *lcw_task; /* owner task */ - void (*lcw_callback)(pid_t, void *); - void *lcw_data; + spinlock_t lcw_lock; /* check or change lcw_list */ + int lcw_refcount; /* must hold lcw_pending_timers_lock */ + struct timer_list lcw_timer; /* kernel timer */ + struct list_head lcw_list; /* chain on pending list */ + cfs_time_t lcw_last_touched;/* last touched stamp */ + struct task_struct *lcw_task; /* owner task */ + void (*lcw_callback)(pid_t, void *); + void *lcw_data; - pid_t lcw_pid; + pid_t lcw_pid; enum { LC_WATCHDOG_DISABLED, @@ -94,7 +94,7 @@ static DEFINE_MUTEX(lcw_refcount_mutex); */ /* BH lock! */ static DEFINE_SPINLOCK(lcw_pending_timers_lock); -static cfs_list_t lcw_pending_timers = CFS_LIST_HEAD_INIT(lcw_pending_timers); +static struct list_head lcw_pending_timers = LIST_HEAD_INIT(lcw_pending_timers); /* Last time a watchdog expired */ static cfs_time_t lcw_last_watchdog_time; @@ -130,11 +130,11 @@ static void lcw_cb(ulong_ptr_t data) lcw->lcw_state = LC_WATCHDOG_EXPIRED; spin_lock_bh(&lcw->lcw_lock); - LASSERT(cfs_list_empty(&lcw->lcw_list)); + LASSERT(list_empty(&lcw->lcw_list)); spin_lock_bh(&lcw_pending_timers_lock); lcw->lcw_refcount++; /* +1 for pending list */ - cfs_list_add(&lcw->lcw_list, &lcw_pending_timers); + list_add(&lcw->lcw_list, &lcw_pending_timers); wake_up(&lcw_event_waitq); spin_unlock_bh(&lcw_pending_timers_lock); @@ -150,7 +150,7 @@ static int is_watchdog_fired(void) return 1; spin_lock_bh(&lcw_pending_timers_lock); - rc = !cfs_list_empty(&lcw_pending_timers); + rc = !list_empty(&lcw_pending_timers); spin_unlock_bh(&lcw_pending_timers_lock); return rc; } @@ -206,7 +206,7 @@ static int lcw_dispatch_main(void *data) { int rc = 0; struct lc_watchdog *lcw; - CFS_LIST_HEAD (zombies); + struct list_head zombies = LIST_HEAD_INIT(zombies); ENTRY; @@ -222,7 +222,7 @@ static int lcw_dispatch_main(void *data) CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n"); spin_lock_bh(&lcw_pending_timers_lock); - rc = !cfs_list_empty(&lcw_pending_timers); + rc = !list_empty(&lcw_pending_timers); spin_unlock_bh(&lcw_pending_timers_lock); if (rc) { CERROR("pending timers list was not empty at " @@ -232,32 +232,32 @@ static int lcw_dispatch_main(void *data) } spin_lock_bh(&lcw_pending_timers_lock); - while (!cfs_list_empty(&lcw_pending_timers)) { - int is_dumplog; - - lcw = cfs_list_entry(lcw_pending_timers.next, - struct lc_watchdog, lcw_list); - /* +1 ref for callback to make sure lwc wouldn't be - * deleted after releasing lcw_pending_timers_lock */ - lcw->lcw_refcount++; + while (!list_empty(&lcw_pending_timers)) { + int is_dumplog; + + lcw = list_entry(lcw_pending_timers.next, + struct lc_watchdog, lcw_list); + /* +1 ref for callback to make sure lwc wouldn't be + * deleted after releasing lcw_pending_timers_lock */ + lcw->lcw_refcount++; spin_unlock_bh(&lcw_pending_timers_lock); /* lock ordering */ spin_lock_bh(&lcw->lcw_lock); spin_lock_bh(&lcw_pending_timers_lock); - if (cfs_list_empty(&lcw->lcw_list)) { + if (list_empty(&lcw->lcw_list)) { /* already removed from pending list */ lcw->lcw_refcount--; /* -1 ref for callback */ if (lcw->lcw_refcount == 0) - cfs_list_add(&lcw->lcw_list, &zombies); + list_add(&lcw->lcw_list, &zombies); spin_unlock_bh(&lcw->lcw_lock); - /* still hold lcw_pending_timers_lock */ - continue; - } + /* still hold lcw_pending_timers_lock */ + continue; + } - cfs_list_del_init(&lcw->lcw_list); - lcw->lcw_refcount--; /* -1 ref for pending list */ + list_del_init(&lcw->lcw_list); + lcw->lcw_refcount--; /* -1 ref for pending list */ spin_unlock_bh(&lcw_pending_timers_lock); spin_unlock_bh(&lcw->lcw_lock); @@ -277,14 +277,14 @@ static int lcw_dispatch_main(void *data) spin_lock_bh(&lcw_pending_timers_lock); lcw->lcw_refcount--; /* -1 ref for callback */ if (lcw->lcw_refcount == 0) - cfs_list_add(&lcw->lcw_list, &zombies); + list_add(&lcw->lcw_list, &zombies); } spin_unlock_bh(&lcw_pending_timers_lock); - while (!cfs_list_empty(&zombies)) { - lcw = cfs_list_entry(zombies.next, + while (!list_empty(&zombies)) { + lcw = list_entry(zombies.next, struct lc_watchdog, lcw_list); - cfs_list_del_init(&lcw->lcw_list); + list_del_init(&lcw->lcw_list); LIBCFS_FREE(lcw, sizeof(*lcw)); } } @@ -357,8 +357,8 @@ struct lc_watchdog *lc_watchdog_add(int timeout, lcw->lcw_data = data; lcw->lcw_state = LC_WATCHDOG_DISABLED; - CFS_INIT_LIST_HEAD(&lcw->lcw_list); - cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw); + INIT_LIST_HEAD(&lcw->lcw_list); + cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw); mutex_lock(&lcw_refcount_mutex); if (++lcw_refcount == 1) @@ -401,9 +401,9 @@ static void lcw_update_time(struct lc_watchdog *lcw, const char *message) static void lc_watchdog_del_pending(struct lc_watchdog *lcw) { spin_lock_bh(&lcw->lcw_lock); - if (unlikely(!cfs_list_empty(&lcw->lcw_list))) { + if (unlikely(!list_empty(&lcw->lcw_list))) { spin_lock_bh(&lcw_pending_timers_lock); - cfs_list_del_init(&lcw->lcw_list); + list_del_init(&lcw->lcw_list); lcw->lcw_refcount--; /* -1 ref for pending list */ spin_unlock_bh(&lcw_pending_timers_lock); } @@ -455,8 +455,8 @@ void lc_watchdog_delete(struct lc_watchdog *lcw) spin_lock_bh(&lcw->lcw_lock); spin_lock_bh(&lcw_pending_timers_lock); - if (unlikely(!cfs_list_empty(&lcw->lcw_list))) { - cfs_list_del_init(&lcw->lcw_list); + if (unlikely(!list_empty(&lcw->lcw_list))) { + list_del_init(&lcw->lcw_list); lcw->lcw_refcount--; /* -1 ref for pending list */ } diff --git a/libcfs/libcfs/winnt/winnt-mem.c b/libcfs/libcfs/winnt/winnt-mem.c index 21dce7e..1e29881 100644 --- a/libcfs/libcfs/winnt/winnt-mem.c +++ b/libcfs/libcfs/winnt/winnt-mem.c @@ -367,7 +367,7 @@ void kmem_cache_free(struct kmem_cache *kmc, void *buf) } spinlock_t shrinker_guard = {0}; -CFS_LIST_HEAD(shrinker_hdr); +struct list_head shrinker_hdr = LIST_HEAD_INIT(shrinker_hdr); struct timer_list shrinker_timer = {0}; struct shrinker *set_shrinker(int seeks, shrink_callback cb) @@ -379,7 +379,7 @@ struct shrinker *set_shrinker(int seeks, shrink_callback cb) s->seeks = seeks; s->nr = 2; spin_lock(&shrinker_guard); - cfs_list_add(&s->list, &shrinker_hdr); + list_add(&s->list, &shrinker_hdr); spin_unlock(&shrinker_guard); } @@ -391,15 +391,14 @@ void remove_shrinker(struct shrinker *s) struct shrinker *tmp; spin_lock(&shrinker_guard); #if TRUE - cfs_list_for_each_entry_typed(tmp, &shrinker_hdr, - struct shrinker, list) { + list_for_each_entry(tmp, &shrinker_hdr, list) { if (tmp == s) { - cfs_list_del(&tmp->list); + list_del(&tmp->list); break; } } #else - cfs_list_del(&s->list); + list_del(&s->list); #endif spin_unlock(&shrinker_guard); kfree(s); @@ -411,8 +410,7 @@ void shrinker_timer_proc(ulong_ptr_t arg) struct shrinker *s; spin_lock(&shrinker_guard); - cfs_list_for_each_entry_typed(s, &shrinker_hdr, - struct shrinker, list) { + list_for_each_entry(s, &shrinker_hdr, list) { s->cb(s->nr, __GFP_FS); } spin_unlock(&shrinker_guard); diff --git a/libcfs/libcfs/winnt/winnt-prim.c b/libcfs/libcfs/winnt/winnt-prim.c index 077f3a4..2fe3a7b 100644 --- a/libcfs/libcfs/winnt/winnt-prim.c +++ b/libcfs/libcfs/winnt/winnt-prim.c @@ -147,7 +147,7 @@ struct task_struct kthread_run(int (*func)(void *), void *arg, char *name) static DECLARE_RWSEM(cfs_symbol_lock); -CFS_LIST_HEAD(cfs_symbol_list); +struct list_head cfs_symbol_list = LIST_HEAD_INIT(cfs_symbol_list); int libcfs_is_mp_system = FALSE; @@ -169,12 +169,12 @@ int libcfs_is_mp_system = FALSE; void * cfs_symbol_get(const char *name) { - cfs_list_t *walker; + struct list_head *walker; struct cfs_symbol *sym = NULL; down_read(&cfs_symbol_lock); - cfs_list_for_each(walker, &cfs_symbol_list) { - sym = cfs_list_entry (walker, struct cfs_symbol, sym_list); + list_for_each(walker, &cfs_symbol_list) { + sym = list_entry (walker, struct cfs_symbol, sym_list); if (!strcmp(sym->name, name)) { sym->ref ++; break; @@ -205,12 +205,12 @@ cfs_symbol_get(const char *name) void cfs_symbol_put(const char *name) { - cfs_list_t *walker; + struct list_head *walker; struct cfs_symbol *sym = NULL; down_read(&cfs_symbol_lock); - cfs_list_for_each(walker, &cfs_symbol_list) { - sym = cfs_list_entry (walker, struct cfs_symbol, sym_list); + list_for_each(walker, &cfs_symbol_list) { + sym = list_entry (walker, struct cfs_symbol, sym_list); if (!strcmp(sym->name, name)) { LASSERT(sym->ref > 0); sym->ref--; @@ -242,7 +242,7 @@ cfs_symbol_put(const char *name) int cfs_symbol_register(const char *name, const void *value) { - cfs_list_t *walker; + struct list_head *walker; struct cfs_symbol *sym = NULL; struct cfs_symbol *new = NULL; @@ -253,18 +253,18 @@ cfs_symbol_register(const char *name, const void *value) strncpy(new->name, name, CFS_SYMBOL_LEN); new->value = (void *)value; new->ref = 0; - CFS_INIT_LIST_HEAD(&new->sym_list); + INIT_LIST_HEAD(&new->sym_list); down_write(&cfs_symbol_lock); - cfs_list_for_each(walker, &cfs_symbol_list) { - sym = cfs_list_entry (walker, struct cfs_symbol, sym_list); + list_for_each(walker, &cfs_symbol_list) { + sym = list_entry (walker, struct cfs_symbol, sym_list); if (!strcmp(sym->name, name)) { up_write(&cfs_symbol_lock); kfree(new); return 0; /* alreay registerred */ } } - cfs_list_add_tail(&new->sym_list, &cfs_symbol_list); + list_add_tail(&new->sym_list, &cfs_symbol_list); up_write(&cfs_symbol_lock); return 0; @@ -287,16 +287,16 @@ cfs_symbol_register(const char *name, const void *value) void cfs_symbol_unregister(const char *name) { - cfs_list_t *walker; - cfs_list_t *nxt; + struct list_head *walker; + struct list_head *nxt; struct cfs_symbol *sym = NULL; down_write(&cfs_symbol_lock); - cfs_list_for_each_safe(walker, nxt, &cfs_symbol_list) { - sym = cfs_list_entry (walker, struct cfs_symbol, sym_list); + list_for_each_safe(walker, nxt, &cfs_symbol_list) { + sym = list_entry (walker, struct cfs_symbol, sym_list); if (!strcmp(sym->name, name)) { LASSERT(sym->ref == 0); - cfs_list_del (&sym->sym_list); + list_del (&sym->sym_list); kfree(sym); break; } @@ -321,14 +321,14 @@ cfs_symbol_unregister(const char *name) void cfs_symbol_clean() { - cfs_list_t *walker; + struct list_head *walker; struct cfs_symbol *sym = NULL; down_write(&cfs_symbol_lock); - cfs_list_for_each(walker, &cfs_symbol_list) { - sym = cfs_list_entry (walker, struct cfs_symbol, sym_list); + list_for_each(walker, &cfs_symbol_list) { + sym = list_entry (walker, struct cfs_symbol, sym_list); LASSERT(sym->ref == 0); - cfs_list_del (&sym->sym_list); + list_del (&sym->sym_list); kfree(sym); } up_write(&cfs_symbol_lock); diff --git a/libcfs/libcfs/winnt/winnt-proc.c b/libcfs/libcfs/winnt/winnt-proc.c index 94378fe..3d7e7bb 100644 --- a/libcfs/libcfs/winnt/winnt-proc.c +++ b/libcfs/libcfs/winnt/winnt-proc.c @@ -787,7 +787,7 @@ int proc_init_fs() cfs_proc_entry_t * root = NULL; memset(&(root_table_header), 0, sizeof(struct ctl_table_header)); - CFS_INIT_LIST_HEAD(&(root_table_header.ctl_entry)); + INIT_LIST_HEAD(&(root_table_header.ctl_entry)); INIT_PROCFS_LOCK(); proc_entry_cache = kmem_cache_create(NULL, sizeof(cfs_proc_entry_t), @@ -1339,7 +1339,7 @@ repeat: int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp, void *newval, size_t newlen) { - cfs_list_t *tmp; + struct list_head *tmp; if (nlen <= 0 || nlen >= CTL_MAXNAME) return -ENOTDIR; @@ -1351,7 +1351,7 @@ int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp, tmp = &root_table_header.ctl_entry; do { struct ctl_table_header *head = - cfs_list_entry(tmp, struct ctl_table_header, ctl_entry); + list_entry(tmp, struct ctl_table_header, ctl_entry); void *context = NULL; int error = parse_table(name, nlen, oldval, oldlenp, newval, newlen, head->ctl_table, @@ -1443,8 +1443,8 @@ register_sysctl_table(struct ctl_table *table) return NULL; tmp->ctl_table = table; - CFS_INIT_LIST_HEAD(&tmp->ctl_entry); - cfs_list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry); + INIT_LIST_HEAD(&tmp->ctl_entry); + list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry); #ifdef CONFIG_PROC_FS register_proc_table(table, cfs_proc_sys); #endif @@ -1460,7 +1460,7 @@ register_sysctl_table(struct ctl_table *table) */ void unregister_sysctl_table(struct ctl_table_header * header) { - cfs_list_del(&header->ctl_entry); + list_del(&header->ctl_entry); #ifdef CONFIG_PROC_FS unregister_proc_table(header->ctl_table, cfs_proc_sys); #endif @@ -2264,11 +2264,11 @@ int seq_puts(struct seq_file *m, const char *s) } EXPORT_SYMBOL(seq_puts); -cfs_list_t *seq_list_start(cfs_list_t *head, loff_t pos) +struct list_head *seq_list_start(struct list_head *head, loff_t pos) { - cfs_list_t *lh; + struct list_head *lh; - cfs_list_for_each(lh, head) + list_for_each(lh, head) if (pos-- == 0) return lh; @@ -2277,7 +2277,7 @@ cfs_list_t *seq_list_start(cfs_list_t *head, loff_t pos) EXPORT_SYMBOL(seq_list_start); -cfs_list_t *seq_list_start_head(cfs_list_t *head, +struct list_head *seq_list_start_head(struct list_head *head, loff_t pos) { if (!pos) @@ -2288,12 +2288,12 @@ cfs_list_t *seq_list_start_head(cfs_list_t *head, EXPORT_SYMBOL(seq_list_start_head); -cfs_list_t *seq_list_next(void *v, cfs_list_t *head, +struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos) { - cfs_list_t *lh; + struct list_head *lh; - lh = ((cfs_list_t *)v)->next; + lh = ((struct list_head *)v)->next; ++*ppos; return lh == head ? NULL : lh; } diff --git a/libcfs/libcfs/winnt/winnt-sync.c b/libcfs/libcfs/winnt/winnt-sync.c index a2b3e1d..e135f71 100644 --- a/libcfs/libcfs/winnt/winnt-sync.c +++ b/libcfs/libcfs/winnt/winnt-sync.c @@ -61,7 +61,7 @@ void init_waitqueue_head(wait_queue_head_t *waitq) { waitq->magic = CFS_WAITQ_MAGIC; waitq->flags = 0; - CFS_INIT_LIST_HEAD(&(waitq->waiters)); + INIT_LIST_HEAD(&(waitq->waiters)); spin_lock_init(&(waitq->guard)); } @@ -103,8 +103,8 @@ void init_waitqueue_entry_current(wait_queue_t *link) atomic_inc(&slot->count); - CFS_INIT_LIST_HEAD(&(link->waitq[0].link)); - CFS_INIT_LIST_HEAD(&(link->waitq[1].link)); + INIT_LIST_HEAD(&(link->waitq[0].link)); + INIT_LIST_HEAD(&(link->waitq[1].link)); link->waitq[0].waitl = link->waitq[1].waitl = link; } @@ -175,9 +175,9 @@ void cfs_waitq_add_internal(wait_queue_head_t *waitq, LASSERT(link->waitq[waitqid].waitq == NULL); link->waitq[waitqid].waitq = waitq; if (link->flags & CFS_WAITQ_EXCLUSIVE) { - cfs_list_add_tail(&link->waitq[waitqid].link, &waitq->waiters); + list_add_tail(&link->waitq[waitqid].link, &waitq->waiters); } else { - cfs_list_add(&link->waitq[waitqid].link, &waitq->waiters); + list_add(&link->waitq[waitqid].link, &waitq->waiters); } spin_unlock(&(waitq->guard)); } @@ -265,7 +265,7 @@ void remove_wait_queue( wait_queue_head_t *waitq, if (i < CFS_WAITQ_CHANNELS) { link->waitq[i].waitq = NULL; - cfs_list_del_init(&link->waitq[i].link); + list_del_init(&link->waitq[i].link); } else { cfs_enter_debugger(); } @@ -322,8 +322,7 @@ void wake_up_nr(wait_queue_head_t *waitq, int nr) LASSERT(waitq->magic == CFS_WAITQ_MAGIC); spin_lock(&waitq->guard); - cfs_list_for_each_entry_typed(scan, &waitq->waiters, - cfs_waitlink_channel_t, + list_for_each_entry(scan, &waitq->waiters, link) { wait_queue_t *waitl = scan->waitl; diff --git a/libcfs/libcfs/winnt/winnt-tcpip.c b/libcfs/libcfs/winnt/winnt-tcpip.c index f68d05e..a77120b 100644 --- a/libcfs/libcfs/winnt/winnt-tcpip.c +++ b/libcfs/libcfs/winnt/winnt-tcpip.c @@ -350,12 +350,12 @@ KsAllocateKsTsdu() spin_lock(&(ks_data.ksnd_tsdu_lock)); - if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) { + if (!list_empty (&(ks_data.ksnd_freetsdus))) { LASSERT(ks_data.ksnd_nfreetsdus > 0); - KsTsdu = cfs_list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link); - cfs_list_del(&(KsTsdu->Link)); + KsTsdu = list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link); + list_del(&(KsTsdu->Link)); ks_data.ksnd_nfreetsdus--; } else { @@ -421,7 +421,7 @@ KsPutKsTsdu( if (ks_data.ksnd_nfreetsdus > 128) { KsFreeKsTsdu(KsTsdu); } else { - cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus)); + list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus)); ks_data.ksnd_nfreetsdus++; } spin_unlock(&(ks_data.ksnd_tsdu_lock)); @@ -447,8 +447,7 @@ KsLockTsdus( *Length = 0; - cfs_list_for_each_entry_typed(KsTsdu, - &TsduMgr->TsduList,KS_TSDU, Link) { + list_for_each_entry(KsTsdu, &TsduMgr->TsduList, Link) { ULONG start = 0; @@ -593,11 +592,11 @@ KsReleaseTsdus( LASSERT(TsduMgr->TotalBytes >= length); - while (!cfs_list_empty(&TsduMgr->TsduList)) { + while (!list_empty(&TsduMgr->TsduList)) { ULONG start = 0; - KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link); + KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link); LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC); start = KsTsdu->StartOffset; @@ -695,7 +694,7 @@ KsReleaseTsdus( if (KsTsdu->StartOffset >= KsTsdu->LastOffset) { /* remove KsTsdu from list */ - cfs_list_del(&KsTsdu->Link); + list_del(&KsTsdu->Link); TsduMgr->NumOfTsdu--; KsPutKsTsdu(KsTsdu); } @@ -760,7 +759,7 @@ KsGetTsdu(PKS_TSDUMGR TsduMgr, ULONG Length) /* retrieve the latest Tsdu buffer form TsduMgr list if the list is not empty. */ - if (cfs_list_empty(&(TsduMgr->TsduList))) { + if (list_empty(&(TsduMgr->TsduList))) { LASSERT(TsduMgr->NumOfTsdu == 0); KsTsdu = NULL; @@ -768,7 +767,7 @@ KsGetTsdu(PKS_TSDUMGR TsduMgr, ULONG Length) } else { LASSERT(TsduMgr->NumOfTsdu > 0); - KsTsdu = cfs_list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link); + KsTsdu = list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link); /* if this Tsdu does not contain enough space, we need allocate a new Tsdu queue. */ @@ -782,7 +781,7 @@ KsGetTsdu(PKS_TSDUMGR TsduMgr, ULONG Length) if (NULL == KsTsdu) { KsTsdu = KsAllocateKsTsdu(); if (NULL != KsTsdu) { - cfs_list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList)); + list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList)); TsduMgr->NumOfTsdu++; } } @@ -1011,11 +1010,11 @@ NextTsdu: } else { - KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link); + KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link); LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC); /* remove the KsTsdu from TsduMgr list to release the lock */ - cfs_list_del(&(KsTsdu->Link)); + list_del(&(KsTsdu->Link)); TsduMgr->NumOfTsdu--; while (length > BytesRecved) { @@ -1164,7 +1163,7 @@ NextTsdu: KsTsdu = NULL; } else { TsduMgr->NumOfTsdu++; - cfs_list_add(&(KsTsdu->Link), &(TsduMgr->TsduList)); + list_add(&(KsTsdu->Link), &(TsduMgr->TsduList)); } } @@ -1277,7 +1276,7 @@ KsInitializeKsTsduMgr( FALSE ); - CFS_INIT_LIST_HEAD( + INIT_LIST_HEAD( &(TsduMgr->TsduList) ); @@ -1343,9 +1342,9 @@ KsCleanupTsduMgr( KsRemoveTdiEngine(TsduMgr); KeSetEvent(&(TsduMgr->Event), 0, FALSE); - while (!cfs_list_empty(&TsduMgr->TsduList)) { + while (!list_empty(&TsduMgr->TsduList)) { - KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link); + KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link); LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC); if (KsTsdu->StartOffset == KsTsdu->LastOffset) { @@ -1354,7 +1353,7 @@ KsCleanupTsduMgr( // KsTsdu is empty now, we need free it ... // - cfs_list_del(&(KsTsdu->Link)); + list_del(&(KsTsdu->Link)); TsduMgr->NumOfTsdu--; KsFreeKsTsdu(KsTsdu); @@ -3199,18 +3198,18 @@ KsGetVacancyBacklog( LASSERT(parent->kstc_type == kstt_listener); LASSERT(parent->kstc_state == ksts_listening); - if (cfs_list_empty(&(parent->listener.kstc_listening.list))) { + if (list_empty(&(parent->listener.kstc_listening.list))) { child = NULL; } else { - cfs_list_t * tmp; + struct list_head * tmp; /* check the listening queue and try to get a free connecton */ - cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) { - child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link); + list_for_each(tmp, &(parent->listener.kstc_listening.list)) { + child = list_entry (tmp, ks_tconn_t, child.kstc_link); spin_lock(&(child->kstc_lock)); if (!child->child.kstc_busy) { @@ -4340,7 +4339,7 @@ ks_create_tconn() /* attach it into global list in ks_data */ - cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns)); + list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns)); ks_data.ksnd_ntconns++; spin_unlock(&(ks_data.ksnd_tconn_lock)); @@ -4373,7 +4372,7 @@ ks_free_tconn(ks_tconn_t * tconn) spin_lock(&(ks_data.ksnd_tconn_lock)); /* remove it from the global list */ - cfs_list_del(&tconn->kstc_list); + list_del(&tconn->kstc_list); ks_data.ksnd_ntconns--; /* if this is the last tconn, it would be safe for @@ -4415,8 +4414,8 @@ ks_init_listener( RtlInitUnicodeString(&(tconn->kstc_dev), TCP_DEVICE_NAME); - CFS_INIT_LIST_HEAD(&(tconn->listener.kstc_listening.list)); - CFS_INIT_LIST_HEAD(&(tconn->listener.kstc_accepted.list)); + INIT_LIST_HEAD(&(tconn->listener.kstc_listening.list)); + INIT_LIST_HEAD(&(tconn->listener.kstc_accepted.list)); cfs_init_event( &(tconn->listener.kstc_accept_event), TRUE, @@ -4632,7 +4631,7 @@ ks_destroy_tconn( if (tconn->child.kstc_queued) { - cfs_list_del(&(tconn->child.kstc_link)); + list_del(&(tconn->child.kstc_link)); if (tconn->child.kstc_queueno) { @@ -5560,7 +5559,7 @@ KsQueueTdiEngine(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr) if (!engs->queued) { spin_lock(&engm->lock); if (!engs->queued) { - cfs_list_add_tail(&engs->link, &engm->list); + list_add_tail(&engs->link, &engm->list); engs->queued = TRUE; engs->tconn = tconn; engs->emgr = engm; @@ -5586,7 +5585,7 @@ KsRemoveTdiEngine(PKS_TSDUMGR TsduMgr) LASSERT(engm != NULL); spin_lock(&engm->lock); if (engs->queued) { - cfs_list_del(&engs->link); + list_del(&engs->link); engs->queued = FALSE; engs->tconn = NULL; engs->emgr = NULL; @@ -5742,7 +5741,7 @@ KsDeliveryTsdus(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr) tflags = TDI_SEND_NON_BLOCKING; } - if (cfs_list_empty(&TsduMgr->TsduList)) { + if (list_empty(&TsduMgr->TsduList)) { LASSERT(TsduMgr->TotalBytes == 0); ks_unlock_tsdumgr(TsduMgr); goto errorout; @@ -5800,7 +5799,7 @@ KsDeliveryEngineThread(void * context) { ks_engine_mgr_t * engm = context; ks_engine_slot_t * engs; - cfs_list_t * list; + struct list_head * list; ks_tconn_t * tconn; cfs_set_thread_priority(31); @@ -5810,14 +5809,14 @@ KsDeliveryEngineThread(void * context) cfs_wait_event_internal(&engm->start, 0); spin_lock(&engm->lock); - if (cfs_list_empty(&engm->list)) { + if (list_empty(&engm->list)) { spin_unlock(&engm->lock); continue; } list = engm->list.next; - cfs_list_del(list); - engs = cfs_list_entry(list, ks_engine_slot_t, link); + list_del(list); + engs = list_entry(list, ks_engine_slot_t, link); LASSERT(engs->emgr == engm); LASSERT(engs->queued); engs->emgr = NULL; @@ -5862,7 +5861,7 @@ ks_init_tdi_data() RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t)); spin_lock_init(&ks_data.ksnd_tconn_lock); - CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns); + INIT_LIST_HEAD(&ks_data.ksnd_tconns); cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE); ks_data.ksnd_tconn_slab = kmem_cache_create("tcon", sizeof(ks_tconn_t), @@ -5875,7 +5874,7 @@ ks_init_tdi_data() /* initialize tsdu related globals */ spin_lock_init(&ks_data.ksnd_tsdu_lock); - CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus); + INIT_LIST_HEAD(&ks_data.ksnd_freetsdus); ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */ ks_data.ksnd_tsdu_slab = kmem_cache_create("tsdu", ks_data.ksnd_tsdu_size, 0, 0, NULL); @@ -5900,7 +5899,7 @@ ks_init_tdi_data() spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock); cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE); cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE); - CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list); + INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list); kthread_run(KsDeliveryEngineThread, &ks_data.ksnd_engine_mgr[i], ""); } @@ -5939,7 +5938,7 @@ void ks_fini_tdi_data() { PKS_TSDU KsTsdu = NULL; - cfs_list_t * list = NULL; + struct list_head * list = NULL; int i; /* clean up the pnp handler and address slots */ @@ -5958,7 +5957,7 @@ ks_fini_tdi_data() /* we need wait until all the tconn are freed */ spin_lock(&(ks_data.ksnd_tconn_lock)); - if (cfs_list_empty(&(ks_data.ksnd_tconns))) { + if (list_empty(&(ks_data.ksnd_tconns))) { cfs_wake_event(&ks_data.ksnd_tconn_exit); } spin_unlock(&(ks_data.ksnd_tconn_lock)); @@ -5972,8 +5971,8 @@ kmem_cache_destroy(ks_data.ksnd_tconn_slab); /* clean up all the tsud buffers in the free list */ spin_lock(&(ks_data.ksnd_tsdu_lock)); - cfs_list_for_each (list, &ks_data.ksnd_freetsdus) { - KsTsdu = cfs_list_entry (list, KS_TSDU, Link); + list_for_each (list, &ks_data.ksnd_freetsdus) { + KsTsdu = list_entry (list, KS_TSDU, Link); kmem_cache_free( ks_data.ksnd_tsdu_slab, @@ -6108,7 +6107,7 @@ ks_replenish_backlogs( if (backlog) { spin_lock(&backlog->kstc_lock); /* attch it into the listing list of daemon */ - cfs_list_add( &backlog->child.kstc_link, + list_add( &backlog->child.kstc_link, &parent->listener.kstc_listening.list ); parent->listener.kstc_listening.num++; @@ -6165,7 +6164,7 @@ ks_start_listen(ks_tconn_t *tconn, int nbacklog) void ks_stop_listen(ks_tconn_t *tconn) { - cfs_list_t * list; + struct list_head * list; ks_tconn_t * backlog; /* reset all tdi event callbacks to NULL */ @@ -6176,8 +6175,8 @@ ks_stop_listen(ks_tconn_t *tconn) cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED); /* cleanup all the listening backlog child connections */ - cfs_list_for_each (list, &(tconn->listener.kstc_listening.list)) { - backlog = cfs_list_entry(list, ks_tconn_t, child.kstc_link); + list_for_each (list, &(tconn->listener.kstc_listening.list)) { + backlog = list_entry(list, ks_tconn_t, child.kstc_link); /* destory and free it */ ks_put_tconn(backlog); @@ -6214,7 +6213,7 @@ ks_wait_child_tconn( ks_tconn_t ** child ) { - cfs_list_t * tmp; + struct list_head * tmp; ks_tconn_t * backlog = NULL; ks_replenish_backlogs(parent, parent->listener.nbacklog); @@ -6230,8 +6229,8 @@ again: /* check the listening queue and try to search the accepted connecton */ - cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) { - backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link); + list_for_each(tmp, &(parent->listener.kstc_listening.list)) { + backlog = list_entry (tmp, ks_tconn_t, child.kstc_link); spin_lock(&(backlog->kstc_lock)); @@ -6240,8 +6239,8 @@ again: LASSERT(backlog->kstc_state == ksts_connected); LASSERT(backlog->child.kstc_busy); - cfs_list_del(&(backlog->child.kstc_link)); - cfs_list_add(&(backlog->child.kstc_link), + list_del(&(backlog->child.kstc_link)); + list_add(&(backlog->child.kstc_link), &(parent->listener.kstc_accepted.list)); parent->listener.kstc_accepted.num++; parent->listener.kstc_listening.num--; diff --git a/libcfs/libcfs/workitem.c b/libcfs/libcfs/workitem.c index a22d4a9..bcb4f39 100644 --- a/libcfs/libcfs/workitem.c +++ b/libcfs/libcfs/workitem.c @@ -46,21 +46,21 @@ #define CFS_WS_NAME_LEN 16 typedef struct cfs_wi_sched { - cfs_list_t ws_list; /* chain on global list */ + struct list_head ws_list; /* chain on global list */ #ifdef __KERNEL__ /** serialised workitems */ - spinlock_t ws_lock; + spinlock_t ws_lock; /** where schedulers sleep */ wait_queue_head_t ws_waitq; #endif /** concurrent workitems */ - cfs_list_t ws_runq; + struct list_head ws_runq; /** rescheduled running-workitems, a workitem can be rescheduled * while running in wi_action(), but we don't to execute it again * unless it returns from wi_action(), so we put it on ws_rerunq * while rescheduling, and move it to runq after it returns * from wi_action() */ - cfs_list_t ws_rerunq; + struct list_head ws_rerunq; /** CPT-table for this scheduler */ struct cfs_cpt_table *ws_cptab; /** CPT id for affinity */ @@ -81,7 +81,7 @@ struct cfs_workitem_data { /** serialize */ spinlock_t wi_glock; /** list of all schedulers */ - cfs_list_t wi_scheds; + struct list_head wi_scheds; /** WI module is initialized */ int wi_init; /** shutting down the whole WI module */ @@ -106,16 +106,16 @@ cfs_wi_sched_cansleep(cfs_wi_sched_t *sched) { cfs_wi_sched_lock(sched); if (sched->ws_stopping) { - cfs_wi_sched_unlock(sched); - return 0; - } + cfs_wi_sched_unlock(sched); + return 0; + } - if (!cfs_list_empty(&sched->ws_runq)) { - cfs_wi_sched_unlock(sched); - return 0; - } - cfs_wi_sched_unlock(sched); - return 1; + if (!list_empty(&sched->ws_runq)) { + cfs_wi_sched_unlock(sched); + return 0; + } + cfs_wi_sched_unlock(sched); + return 1; } #else /* !__KERNEL__ */ @@ -150,14 +150,14 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) LASSERT(wi->wi_running); #endif if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!cfs_list_empty(&wi->wi_list)); - cfs_list_del_init(&wi->wi_list); + LASSERT(!list_empty(&wi->wi_list)); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; } - LASSERT(cfs_list_empty(&wi->wi_list)); + LASSERT(list_empty(&wi->wi_list)); wi->wi_scheduled = 1; /* LBUG future schedule attempts */ cfs_wi_sched_unlock(sched); @@ -187,19 +187,19 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) rc = !(wi->wi_running); if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!cfs_list_empty(&wi->wi_list)); - cfs_list_del_init(&wi->wi_list); + LASSERT(!list_empty(&wi->wi_list)); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; - wi->wi_scheduled = 0; - } + wi->wi_scheduled = 0; + } - LASSERT (cfs_list_empty(&wi->wi_list)); + LASSERT (list_empty(&wi->wi_list)); - cfs_wi_sched_unlock(sched); - return rc; + cfs_wi_sched_unlock(sched); + return rc; } EXPORT_SYMBOL(cfs_wi_deschedule); @@ -219,21 +219,21 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) cfs_wi_sched_lock(sched); if (!wi->wi_scheduled) { - LASSERT (cfs_list_empty(&wi->wi_list)); + LASSERT (list_empty(&wi->wi_list)); wi->wi_scheduled = 1; sched->ws_nscheduled++; if (!wi->wi_running) { - cfs_list_add_tail(&wi->wi_list, &sched->ws_runq); + list_add_tail(&wi->wi_list, &sched->ws_runq); #ifdef __KERNEL__ wake_up(&sched->ws_waitq); #endif } else { - cfs_list_add(&wi->wi_list, &sched->ws_rerunq); + list_add(&wi->wi_list, &sched->ws_rerunq); } } - LASSERT (!cfs_list_empty(&wi->wi_list)); + LASSERT (!list_empty(&wi->wi_list)); cfs_wi_sched_unlock(sched); return; } @@ -265,17 +265,17 @@ cfs_wi_scheduler (void *arg) cfs_wi_sched_lock(sched); while (!sched->ws_stopping) { - int nloops = 0; - int rc; - cfs_workitem_t *wi; - - while (!cfs_list_empty(&sched->ws_runq) && - nloops < CFS_WI_RESCHED) { - wi = cfs_list_entry(sched->ws_runq.next, - cfs_workitem_t, wi_list); + int nloops = 0; + int rc; + cfs_workitem_t *wi; + + while (!list_empty(&sched->ws_runq) && + nloops < CFS_WI_RESCHED) { + wi = list_entry(sched->ws_runq.next, + cfs_workitem_t, wi_list); LASSERT(wi->wi_scheduled && !wi->wi_running); - cfs_list_del_init(&wi->wi_list); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; @@ -293,17 +293,17 @@ cfs_wi_scheduler (void *arg) if (rc != 0) /* WI should be dead, even be freed! */ continue; - wi->wi_running = 0; - if (cfs_list_empty(&wi->wi_list)) + wi->wi_running = 0; + if (list_empty(&wi->wi_list)) continue; LASSERT(wi->wi_scheduled); - /* wi is rescheduled, should be on rerunq now, we - * move it to runq so it can run action now */ - cfs_list_move_tail(&wi->wi_list, &sched->ws_runq); + /* wi is rescheduled, should be on rerunq now, we + * move it to runq so it can run action now */ + list_move_tail(&wi->wi_list, &sched->ws_runq); } - if (!cfs_list_empty(&sched->ws_runq)) { + if (!list_empty(&sched->ws_runq)) { cfs_wi_sched_unlock(sched); /* don't sleep because some workitems still * expect me to come back soon */ @@ -342,9 +342,8 @@ cfs_wi_check_events (void) struct cfs_wi_sched *tmp; /** rerunq is always empty for userspace */ - cfs_list_for_each_entry(tmp, - &cfs_wi_data.wi_scheds, ws_list) { - if (!cfs_list_empty(&tmp->ws_runq)) { + list_for_each_entry(tmp, &cfs_wi_data.wi_scheds, ws_list) { + if (!list_empty(&tmp->ws_runq)) { sched = tmp; break; } @@ -353,9 +352,9 @@ cfs_wi_check_events (void) if (sched == NULL) break; - wi = cfs_list_entry(sched->ws_runq.next, + wi = list_entry(sched->ws_runq.next, cfs_workitem_t, wi_list); - cfs_list_del_init(&wi->wi_list); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; @@ -390,7 +389,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) return; } - LASSERT(!cfs_list_empty(&sched->ws_list)); + LASSERT(!list_empty(&sched->ws_list)); sched->ws_stopping = 1; spin_unlock(&cfs_wi_data.wi_glock); @@ -414,7 +413,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) } } - cfs_list_del(&sched->ws_list); + list_del(&sched->ws_list); spin_unlock(&cfs_wi_data.wi_glock); #endif @@ -452,9 +451,9 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, spin_lock_init(&sched->ws_lock); init_waitqueue_head(&sched->ws_waitq); #endif - CFS_INIT_LIST_HEAD(&sched->ws_runq); - CFS_INIT_LIST_HEAD(&sched->ws_rerunq); - CFS_INIT_LIST_HEAD(&sched->ws_list); + INIT_LIST_HEAD(&sched->ws_runq); + INIT_LIST_HEAD(&sched->ws_rerunq); + INIT_LIST_HEAD(&sched->ws_list); #ifdef __KERNEL__ for (; nthrs > 0; nthrs--) { @@ -490,7 +489,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, spin_lock(&cfs_wi_data.wi_glock); /* make up for cfs_wi_sched_destroy */ - cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); + list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); sched->ws_starting--; spin_unlock(&cfs_wi_data.wi_glock); @@ -501,7 +500,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, } #endif spin_lock(&cfs_wi_data.wi_glock); - cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); + list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); spin_unlock(&cfs_wi_data.wi_glock); *sched_pp = sched; @@ -515,7 +514,7 @@ cfs_wi_startup(void) memset(&cfs_wi_data, 0, sizeof(cfs_wi_data)); spin_lock_init(&cfs_wi_data.wi_glock); - CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); + INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); cfs_wi_data.wi_init = 1; return 0; @@ -532,12 +531,12 @@ cfs_wi_shutdown (void) #ifdef __KERNEL__ /* nobody should contend on this list */ - cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { + list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { sched->ws_stopping = 1; wake_up_all(&sched->ws_waitq); } - cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { + list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { spin_lock(&cfs_wi_data.wi_glock); while (sched->ws_nthreads != 0) { @@ -548,10 +547,10 @@ cfs_wi_shutdown (void) spin_unlock(&cfs_wi_data.wi_glock); } #endif - while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) { - sched = cfs_list_entry(cfs_wi_data.wi_scheds.next, + while (!list_empty(&cfs_wi_data.wi_scheds)) { + sched = list_entry(cfs_wi_data.wi_scheds.next, struct cfs_wi_sched, ws_list); - cfs_list_del(&sched->ws_list); + list_del(&sched->ws_list); LIBCFS_FREE(sched, sizeof(*sched)); } diff --git a/lustre/ptlrpc/nrs.c b/lustre/ptlrpc/nrs.c index 069f6a0..dc3dabd 100644 --- a/lustre/ptlrpc/nrs.c +++ b/lustre/ptlrpc/nrs.c @@ -50,13 +50,7 @@ #include #include "ptlrpc_internal.h" -/* XXX: This is just for liblustre. Remove the #if defined directive when the - * "cfs_" prefix is dropped from cfs_list_head. */ -#if defined (__linux__) && defined(__KERNEL__) extern struct list_head ptlrpc_all_services; -#else -extern struct cfs_list_head ptlrpc_all_services; -#endif /** * NRS core object. diff --git a/lustre/ptlrpc/ptlrpc_internal.h b/lustre/ptlrpc/ptlrpc_internal.h index d758453..f3cb1d5 100644 --- a/lustre/ptlrpc/ptlrpc_internal.h +++ b/lustre/ptlrpc/ptlrpc_internal.h @@ -103,18 +103,11 @@ struct nrs_core { * registration/unregistration, and NRS core lprocfs operations. */ struct mutex nrs_mutex; - /* XXX: This is just for liblustre. Remove the #if defined directive - * when the * "cfs_" prefix is dropped from cfs_list_head. */ -#if defined (__linux__) && defined(__KERNEL__) /** * List of all policy descriptors registered with NRS core; protected * by nrs_core::nrs_mutex. */ struct list_head nrs_policies; -#else - struct cfs_list_head nrs_policies; -#endif - }; int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc); -- 1.8.3.1