/#[ \t]*define[ \t]*\batomic_sub_return\b *( *\w* *, *\w* *)[ \t]*\batomic_sub_return\b *( *\w* *, *\w* *)/d
s/\bCFS_ATOMIC_INIT\b/ATOMIC_INIT/g
/#[ \t]*define[ \t]*\bATOMIC_INIT\b *( *\w* *)[ \t]*\bATOMIC_INIT\b *( *\w* *)/d
+
+################################################################################
+# list operations
+s/\bcfs_list_t\b/struct list_head/g
+s/\b__cfs_list_add\b/__list_add/g
+/#[ \t]*define[ \t]*\b__list_add\b *(.*)[ \t]*\b__list_add\b *(.*)/d
+s/\bcfs_list_add\b/list_add/g
+/#[ \t]*define[ \t]*\blist_add\b *(.*)[ \t]*\blist_add\b *(.*)/d
+s/\bcfs_list_add_tail\b/list_add_tail/g
+/#[ \t]*define[ \t]*\blist_add_tail\b *(.*)[ \t]*\blist_add_tail\b *(.*)/d
+s/\b__cfs_list_del\b/__list_del/g
+/#[ \t]*define[ \t]*\b__list_del\b *(.*)[ \t]*\b__list_del\b *(.*)/d
+s/\bcfs_list_del\b/list_del/g
+/#[ \t]*define[ \t]*\blist_del\b *(.*)[ \t]*\blist_del\b *(.*)/d
+s/\bcfs_list_del_init\b/list_del_init/g
+/#[ \t]*define[ \t]*\blist_del_init\b *(.*)[ \t]*\blist_del_init\b *(.*)/d
+s/\bcfs_list_move\b/list_move/g
+/#[ \t]*define[ \t]*\blist_move\b *(.*)[ \t]*\blist_move\b *(.*)/d
+s/\bcfs_list_move_tail\b/list_move_tail/g
+/#[ \t]*define[ \t]*\blist_move_tail\b *(.*)[ \t]*\blist_move_tail\b *(.*)/d
+s/\bcfs_list_empty\b/list_empty/g
+/#[ \t]*define[ \t]*\blist_empty\b *(.*)[ \t]*\blist_empty\b *(.*)/d
+s/\bcfs_list_empty_careful\b/list_empty_careful/g
+/#[ \t]*define[ \t]*\blist_empty_careful\b *(.*)[ \t]*\blist_empty_careful\b *(.*)/d
+s/\b__cfs_list_splice\b/__list_splice/g
+/#[ \t]*define[ \t]*\b__list_splice\b *(.*)[ \t]*\b__list_splice\b *(.*)/d
+s/\bcfs_list_splice\b/list_splice/g
+/#[ \t]*define[ \t]*\blist_splice\b *(.*)[ \t]*\blist_splice\b *(.*)/d
+s/\bcfs_list_splice_init\b/list_splice_init/g
+/#[ \t]*define[ \t]*\blist_splice_init\b *(.*)[ \t]*\blist_splice_init\b *(.*)/d
+s/\bcfs_list_entry\b/list_entry/g
+/#[ \t]*define[ \t]*\blist_entry\b *(.*)[ \t]*\blist_entry\b *(.*)/d
+s/\bcfs_list_for_each\b/list_for_each/g
+/#[ \t]*define[ \t]*\blist_for_each\b *(.*)[ \t]*\blist_for_each\b *(.*)/d
+s/\bcfs_list_for_each_safe\b/list_for_each_safe/g
+/#[ \t]*define[ \t]*\blist_for_each_safe\b *(.*)[ \t]*\blist_for_each_safe\b *(.*)/d
+s/\bcfs_list_for_each_prev\b/list_for_each_prev/g
+/#[ \t]*define[ \t]*\blist_for_each_prev\b *(.*)[ \t]*\blist_for_each_prev\b *(.*)/d
+s/\bcfs_list_for_each_entry\b/list_for_each_entry/g
+/#[ \t]*define[ \t]*\blist_for_each_entry\b *(.*)[ \t]*\blist_for_each_entry\b *(.*)/d
+s/\bcfs_list_for_each_entry_reverse\b/list_for_each_entry_reverse/g
+/#[ \t]*define[ \t]*\blist_for_each_entry_reverse\b *(.*)[ \t]*\blist_for_each_entry_reverse\b *(.*)/d
+s/\bcfs_list_for_each_entry_safe_reverse\b/list_for_each_entry_safe_reverse/g
+/#[ \t]*define[ \t]*\blist_for_each_entry_safe_reverse\b *(.*)[ \t]*\blist_for_each_entry_safe_reverse\b *(.*)/d
+s/\bcfs_list_for_each_entry_safe\b/list_for_each_entry_safe/g
+/#[ \t]*define[ \t]*\blist_for_each_entry_safe\b *(.*)[ \t]*\blist_for_each_entry_safe\b *(.*)/d
+s/\bcfs_list_for_each_entry_safe_from\b/list_for_each_entry_safe_from/g
+/#[ \t]*define[ \t]*\blist_for_each_entry_safe_from\b *(.*)[ \t]*\blist_for_each_entry_safe_from\b *(.*)/d
+s/\bcfs_list_for_each_entry_continue\b/list_for_each_entry_continue/g
+/#[ \t]*define[ \t]*\blist_for_each_entry_continue\b *(.*)[ \t]*\blist_for_each_entry_continue\b *(.*)/d
+# LIST_HEAD defined in /usr/include/sys/queue.h
+s/\bCFS_LIST_HEAD_INIT\b/LIST_HEAD_INIT/g
+/#[ \t]*define[ \t]*\bLIST_HEAD_INIT\b *(.*)[ \t]*\bLIST_HEAD_INIT\b *(.*)/d
+s/\bCFS_LIST_HEAD\b/LIST_HEAD/g
+/#[ \t]*define[ \t]*\bLIST_HEAD\b *(.*)[ \t]*\bLIST_HEAD\b *(.*)/d
+s/\bCFS_INIT_LIST_HEAD\b/INIT_LIST_HEAD/g
+/#[ \t]*define[ \t]*\bINIT_LIST_HEAD\b *(.*)[ \t]*\bINIT_LIST_HEAD\b *(.*)/d
+s/\bcfs_hlist_head_t\b/struct hlist_head/g
+s/\bcfs_hlist_node_t\b/struct hlist_node/g
+s/\bcfs_hlist_unhashed\b/hlist_unhashed/g
+/#[ \t]*define[ \t]*\bhlist_unhashed\b *(.*)[ \t]*\bhlist_unhashed\b *(.*)/d
+s/\bcfs_hlist_empty\b/hlist_empty/g
+/#[ \t]*define[ \t]*\bhlist_empty\b *(.*)[ \t]*\bhlist_empty\b *(.*)/d
+s/\b__cfs_hlist_del\b/__hlist_del/g
+/#[ \t]*define[ \t]*\b__hlist_del\b *(.*)[ \t]*\b__hlist_del\b *(.*)/d
+s/\bcfs_hlist_del\b/hlist_del/g
+/#[ \t]*define[ \t]*\bhlist_del\b *(.*)[ \t]*\bhlist_del\b *(.*)/d
+s/\bcfs_hlist_del_init\b/hlist_del_init/g
+/#[ \t]*define[ \t]*\bhlist_del_init\b *(.*)[ \t]*\bhlist_del_init\b *(.*)/d
+s/\bcfs_hlist_add_head\b/hlist_add_head/g
+/#[ \t]*define[ \t]*\bhlist_add_head\b *(.*)[ \t]*\bhlist_add_head\b *(.*)/d
+s/\bcfs_hlist_add_before\b/hlist_add_before/g
+/#[ \t]*define[ \t]*\bhlist_add_before\b *(.*)[ \t]*\bhlist_add_before\b *(.*)/d
+s/\bcfs_hlist_add_after\b/hlist_add_after/g
+/#[ \t]*define[ \t]*\bhlist_add_after\b *(.*)[ \t]*\bhlist_add_after\b *(.*)/d
+s/\bcfs_hlist_entry\b/hlist_entry/g
+/#[ \t]*define[ \t]*\bhlist_entry\b *(.*)[ \t]*\bhlist_entry\b *(.*)/d
+s/\bcfs_hlist_for_each\b/hlist_for_each/g
+/#[ \t]*define[ \t]*\bhlist_for_each\b *(.*)[ \t]*\bhlist_for_each\b *(.*)/d
+s/\bcfs_hlist_for_each_safe\b/hlist_for_each_safe/g
+/#[ \t]*define[ \t]*\bhlist_for_each_safe\b *(.*)[ \t]*\bhlist_for_each_safe\b *(.*)/d
+s/\bcfs_hlist_for_each_entry_continue\b/hlist_for_each_entry_continue/g
+/#[ \t]*define[ \t]*\bhlist_for_each_entry_continue\b *(.*)[ \t]*\bhlist_for_each_entry_continue\b *(.*)/d
+s/\bcfs_hlist_for_each_entry_from\b/hlist_for_each_entry_from/g
+/#[ \t]*define[ \t]*\bhlist_for_each_entry_from\b *(.*)[ \t]*\bhlist_for_each_entry_from\b *(.*)/d
+s/\bCFS_HLIST_HEAD_INIT\b/HLIST_HEAD_INIT/g
+/#[ \t]*define[ \t]*\bHLIST_HEAD_INIT\b[ \t]*\bHLIST_HEAD_INIT\b/d
+s/\bCFS_HLIST_HEAD\b/HLIST_HEAD/g
+/#[ \t]*define[ \t]*\bHLIST_HEAD\b *(.*)[ \t]*\bHLIST_HEAD\b *(.*)/d
+s/\bCFS_INIT_HLIST_HEAD\b/INIT_HLIST_HEAD/g
+/#[ \t]*define[ \t]*\bINIT_HLIST_HEAD\b *(.*)[ \t]*\bINIT_HLIST_HEAD\b *(.*)/d
+s/\bCFS_INIT_HLIST_NODE\b/INIT_HLIST_NODE/g
+/#[ \t]*define[ \t]*\bINIT_HLIST_NODE\b *(.*)[ \t]*\bINIT_HLIST_NODE\b *(.*)/d
+s/\bcfs_list_for_each_entry_safe_from\b/list_for_each_entry_safe_from/g
+/cfs_list_for_each_entry_typed/{;N;s/\(cfs_list_for_each_entry_typed\)\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*/list_for_each_entry\2 \3 /}
+/cfs_list_for_each_entry_safe_typed/{;N;s/\(cfs_list_for_each_entry_safe_typed\)\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*\([^,]*,\)[ ,\t,\n]*/list_for_each_entry_safe\2 \3 \4 /}
.flags = 0, \
.event = 0, \
.hits = 0, \
- .linkage = CFS_LIST_HEAD(name.linkage), \
+ .linkage = LIST_HEAD_INIT(name.linkage),\
.magic = KSLEEP_LINK_MAGIC \
}
} cfs_hash_lock_ops_t;
typedef struct cfs_hash_hlist_ops {
- /** return hlist_head of hash-head of @bd */
- cfs_hlist_head_t *(*hop_hhead)(cfs_hash_t *hs, cfs_hash_bd_t *bd);
- /** return hash-head size */
- int (*hop_hhead_size)(cfs_hash_t *hs);
- /** add @hnode to hash-head of @bd */
- int (*hop_hnode_add)(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, cfs_hlist_node_t *hnode);
- /** remove @hnode from hash-head of @bd */
- int (*hop_hnode_del)(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, cfs_hlist_node_t *hnode);
+ /** return hlist_head of hash-head of @bd */
+ struct hlist_head *(*hop_hhead)(cfs_hash_t *hs, cfs_hash_bd_t *bd);
+ /** return hash-head size */
+ int (*hop_hhead_size)(cfs_hash_t *hs);
+ /** add @hnode to hash-head of @bd */
+ int (*hop_hnode_add)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode);
+ /** remove @hnode from hash-head of @bd */
+ int (*hop_hnode_del)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode);
} cfs_hash_hlist_ops_t;
typedef struct cfs_hash_ops {
- /** return hashed value from @key */
- unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask);
- /** return key address of @hnode */
- void * (*hs_key)(cfs_hlist_node_t *hnode);
- /** copy key from @hnode to @key */
- void (*hs_keycpy)(cfs_hlist_node_t *hnode, void *key);
+ /** return hashed value from @key */
+ unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask);
+ /** return key address of @hnode */
+ void * (*hs_key)(struct hlist_node *hnode);
+ /** copy key from @hnode to @key */
+ void (*hs_keycpy)(struct hlist_node *hnode, void *key);
/**
* compare @key with key of @hnode
* returns 1 on a match
*/
- int (*hs_keycmp)(const void *key, cfs_hlist_node_t *hnode);
- /** return object address of @hnode, i.e: container_of(...hnode) */
- void * (*hs_object)(cfs_hlist_node_t *hnode);
- /** get refcount of item, always called with holding bucket-lock */
- void (*hs_get)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
- /** release refcount of item */
- void (*hs_put)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
- /** release refcount of item, always called with holding bucket-lock */
- void (*hs_put_locked)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
- /** it's called before removing of @hnode */
- void (*hs_exit)(cfs_hash_t *hs, cfs_hlist_node_t *hnode);
+ int (*hs_keycmp)(const void *key, struct hlist_node *hnode);
+ /** return object address of @hnode, i.e: container_of(...hnode) */
+ void * (*hs_object)(struct hlist_node *hnode);
+ /** get refcount of item, always called with holding bucket-lock */
+ void (*hs_get)(cfs_hash_t *hs, struct hlist_node *hnode);
+ /** release refcount of item */
+ void (*hs_put)(cfs_hash_t *hs, struct hlist_node *hnode);
+ /** release refcount of item, always called with holding bucket-lock */
+ void (*hs_put_locked)(cfs_hash_t *hs, struct hlist_node *hnode);
+ /** it's called before removing of @hnode */
+ void (*hs_exit)(cfs_hash_t *hs, struct hlist_node *hnode);
} cfs_hash_ops_t;
/** total number of buckets in @hs */
static inline unsigned
cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask)
{
- return CFS_HOP(hs, hash)(hs, key, mask);
+ return CFS_HOP(hs, hash)(hs, key, mask);
}
static inline void *
-cfs_hash_key(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
{
- return CFS_HOP(hs, key)(hnode);
+ return CFS_HOP(hs, key)(hnode);
}
static inline void
-cfs_hash_keycpy(cfs_hash_t *hs, cfs_hlist_node_t *hnode, void *key)
+cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key)
{
- if (CFS_HOP(hs, keycpy) != NULL)
- CFS_HOP(hs, keycpy)(hnode, key);
+ if (CFS_HOP(hs, keycpy) != NULL)
+ CFS_HOP(hs, keycpy)(hnode, key);
}
/**
* Returns 1 on a match,
*/
static inline int
-cfs_hash_keycmp(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
+cfs_hash_keycmp(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
{
- return CFS_HOP(hs, keycmp)(key, hnode);
+ return CFS_HOP(hs, keycmp)(key, hnode);
}
static inline void *
-cfs_hash_object(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+cfs_hash_object(cfs_hash_t *hs, struct hlist_node *hnode)
{
- return CFS_HOP(hs, object)(hnode);
+ return CFS_HOP(hs, object)(hnode);
}
static inline void
-cfs_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
{
- return CFS_HOP(hs, get)(hs, hnode);
+ return CFS_HOP(hs, get)(hs, hnode);
}
static inline void
-cfs_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
{
- LASSERT(CFS_HOP(hs, put_locked) != NULL);
+ LASSERT(CFS_HOP(hs, put_locked) != NULL);
- return CFS_HOP(hs, put_locked)(hs, hnode);
+ return CFS_HOP(hs, put_locked)(hs, hnode);
}
static inline void
-cfs_hash_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
{
- LASSERT(CFS_HOP(hs, put) != NULL);
+ LASSERT(CFS_HOP(hs, put) != NULL);
- return CFS_HOP(hs, put)(hs, hnode);
+ return CFS_HOP(hs, put)(hs, hnode);
}
static inline void
-cfs_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
{
- if (CFS_HOP(hs, exit))
- CFS_HOP(hs, exit)(hs, hnode);
+ if (CFS_HOP(hs, exit))
+ CFS_HOP(hs, exit)(hs, hnode);
}
static inline void cfs_hash_lock(cfs_hash_t *hs, int excl)
}
void cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode);
+ struct hlist_node *hnode);
void cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode);
+ struct hlist_node *hnode);
void cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
- cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode);
+ cfs_hash_bd_t *bd_new, struct hlist_node *hnode);
static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd,
atomic_t *condition)
return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
}
-static inline cfs_hlist_head_t *cfs_hash_bd_hhead(cfs_hash_t *hs,
+static inline struct hlist_head *cfs_hash_bd_hhead(cfs_hash_t *hs,
cfs_hash_bd_t *bd)
{
- return hs->hs_hops->hop_hhead(hs, bd);
+ return hs->hs_hops->hop_hhead(hs, bd);
}
-cfs_hlist_node_t *cfs_hash_bd_lookup_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, const void *key);
-cfs_hlist_node_t *cfs_hash_bd_peek_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, const void *key);
-cfs_hlist_node_t *cfs_hash_bd_findadd_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, const void *key,
- cfs_hlist_node_t *hnode,
- int insist_add);
-cfs_hlist_node_t *cfs_hash_bd_finddel_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bd, const void *key,
- cfs_hlist_node_t *hnode);
+struct hlist_node *cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ const void *key);
+struct hlist_node *cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ const void *key);
+struct hlist_node *cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ const void *key,
+ struct hlist_node *hnode,
+ int insist_add);
+struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ const void *key,
+ struct hlist_node *hnode);
/**
* operations on cfs_hash bucket (bd: bucket descriptor),
void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key,
- cfs_hash_bd_t *bds, int excl)
+ cfs_hash_bd_t *bds, int excl)
{
- cfs_hash_dual_bd_get(hs, key, bds);
- cfs_hash_dual_bd_lock(hs, bds, excl);
+ cfs_hash_dual_bd_get(hs, key, bds);
+ cfs_hash_dual_bd_lock(hs, bds, excl);
}
-cfs_hlist_node_t *cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds,
- const void *key);
-cfs_hlist_node_t *cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds,
- const void *key,
- cfs_hlist_node_t *hnode,
- int insist_add);
-cfs_hlist_node_t *cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds,
- const void *key,
- cfs_hlist_node_t *hnode);
+struct hlist_node *
+cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ const void *key);
+struct hlist_node *
+cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ const void *key, struct hlist_node *hnode,
+ int insist_add);
+struct hlist_node *
+cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ const void *key, struct hlist_node *hnode);
/* Hash init/cleanup functions */
cfs_hash_t *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
- unsigned bkt_bits, unsigned extra_bytes,
- unsigned min_theta, unsigned max_theta,
- cfs_hash_ops_t *ops, unsigned flags);
+ unsigned bkt_bits, unsigned extra_bytes,
+ unsigned min_theta, unsigned max_theta,
+ cfs_hash_ops_t *ops, unsigned flags);
cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs);
void cfs_hash_putref(cfs_hash_t *hs);
/* Hash addition functions */
void cfs_hash_add(cfs_hash_t *hs, const void *key,
- cfs_hlist_node_t *hnode);
+ struct hlist_node *hnode);
int cfs_hash_add_unique(cfs_hash_t *hs, const void *key,
- cfs_hlist_node_t *hnode);
+ struct hlist_node *hnode);
void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
- cfs_hlist_node_t *hnode);
+ struct hlist_node *hnode);
/* Hash deletion functions */
-void *cfs_hash_del(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode);
+void *cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode);
void *cfs_hash_del_key(cfs_hash_t *hs, const void *key);
/* Hash lookup/for_each functions */
#define CFS_HASH_LOOP_HOG 1024
typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *node, void *data);
+ struct hlist_node *node, void *data);
void *cfs_hash_lookup(cfs_hash_t *hs, const void *key);
void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
-int cfs_hash_for_each_nolock(cfs_hash_t *hs,
- cfs_hash_for_each_cb_t, void *data);
-int cfs_hash_for_each_empty(cfs_hash_t *hs,
- cfs_hash_for_each_cb_t, void *data);
+int cfs_hash_for_each_nolock(cfs_hash_t *hs, cfs_hash_for_each_cb_t,
+ void *data);
+int cfs_hash_for_each_empty(cfs_hash_t *hs, cfs_hash_for_each_cb_t,
+ void *data);
void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
- cfs_hash_for_each_cb_t, void *data);
+ cfs_hash_for_each_cb_t, void *data);
typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data);
void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
- cfs_hash_for_each_cb_t, void *data);
+ cfs_hash_for_each_cb_t, void *data);
int cfs_hash_is_empty(cfs_hash_t *hs);
__u64 cfs_hash_size_get(cfs_hash_t *hs);
void cfs_hash_rehash_cancel(cfs_hash_t *hs);
int cfs_hash_rehash(cfs_hash_t *hs, int do_rehash);
void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
- void *new_key, cfs_hlist_node_t *hnode);
+ void *new_key, struct hlist_node *hnode);
#if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
/* Validate hnode references the correct key */
static inline void
cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- LASSERT(cfs_hash_keycmp(hs, key, hnode));
+ LASSERT(cfs_hash_keycmp(hs, key, hnode));
}
/* Validate hnode is in the correct bucket */
static inline void
cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- cfs_hash_bd_t bds[2];
+ cfs_hash_bd_t bds[2];
- cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
- LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
- bds[1].bd_bucket == bd->bd_bucket);
+ cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
+ LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
+ bds[1].bd_bucket == bd->bd_bucket);
}
#else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
static inline void
cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
- cfs_hlist_node_t *hnode) {}
+ struct hlist_node *hnode) {}
static inline void
cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode) {}
+ struct hlist_node *hnode) {}
#endif /* CFS_HASH_DEBUG_LEVEL */
#ifdef __KERNEL__
struct libcfs_ioctl_handler {
- cfs_list_t item;
- int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data);
+ struct list_head item;
+ int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data);
};
-#define DECLARE_IOCTL_HANDLER(ident, func) \
- struct libcfs_ioctl_handler ident = { \
- /* .item = */ CFS_LIST_HEAD_INIT(ident.item), \
- /* .handle_ioctl = */ func \
- }
+#define DECLARE_IOCTL_HANDLER(ident, func) \
+ struct libcfs_ioctl_handler ident = { \
+ /* .item = */ LIST_HEAD_INIT(ident.item), \
+ /* .handle_ioctl = */ func \
+ }
#endif
#define CLASSERT(cond) do {switch(42) {case (cond): case 0: break;}} while (0)
/* support decl needed both by kernel and liblustre */
-int libcfs_isknown_lnd(int type);
-char *libcfs_lnd2modname(int type);
-char *libcfs_lnd2str(int type);
-int libcfs_str2lnd(const char *str);
-char *libcfs_net2str(__u32 net);
-char *libcfs_nid2str(lnet_nid_t nid);
-__u32 libcfs_str2net(const char *str);
-lnet_nid_t libcfs_str2nid(const char *str);
-int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
-char *libcfs_id2str(lnet_process_id_t id);
-void cfs_free_nidlist(cfs_list_t *list);
-int cfs_parse_nidlist(char *str, int len, cfs_list_t *list);
-int cfs_match_nid(lnet_nid_t nid, cfs_list_t *list);
+int libcfs_isknown_lnd(int type);
+char *libcfs_lnd2modname(int type);
+char *libcfs_lnd2str(int type);
+int libcfs_str2lnd(const char *str);
+char *libcfs_net2str(__u32 net);
+char *libcfs_nid2str(lnet_nid_t nid);
+__u32 libcfs_str2net(const char *str);
+lnet_nid_t libcfs_str2nid(const char *str);
+int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
+char *libcfs_id2str(lnet_process_id_t id);
+void cfs_free_nidlist(struct list_head *list);
+int cfs_parse_nidlist(char *str, int len, struct list_head *list);
+int cfs_match_nid(lnet_nid_t nid, struct list_head *list);
/** \addtogroup lnet_addr
* @{ */
/*
* Link to cfs_expr_list::el_exprs.
*/
- cfs_list_t re_link;
- __u32 re_lo;
- __u32 re_hi;
- __u32 re_stride;
+ struct list_head re_link;
+ __u32 re_lo;
+ __u32 re_hi;
+ __u32 re_stride;
};
struct cfs_expr_list {
- cfs_list_t el_link;
- cfs_list_t el_exprs;
+ struct list_head el_link;
+ struct list_head el_exprs;
};
static inline int
void cfs_expr_list_print(struct cfs_expr_list *expr_list);
int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
struct cfs_expr_list **elpp);
-void cfs_expr_list_free_list(cfs_list_t *list);
-int cfs_ip_addr_parse(char *str, int len, cfs_list_t *list);
-int cfs_ip_addr_match(__u32 addr, cfs_list_t *list);
-void cfs_ip_addr_free(cfs_list_t *list);
+void cfs_expr_list_free_list(struct list_head *list);
+int cfs_ip_addr_parse(char *str, int len, struct list_head *list);
+int cfs_ip_addr_match(__u32 addr, struct list_head *list);
+void cfs_ip_addr_free(struct list_head *list);
#ifdef __KERNEL__
#define strtoul(str, endp, base) simple_strtoul(str, endp, base)
typedef int (*cfs_wi_action_t) (struct cfs_workitem *);
typedef struct cfs_workitem {
- /** chain on runq or rerunq */
- cfs_list_t wi_list;
- /** working function */
- cfs_wi_action_t wi_action;
- /** arg for working function */
- void *wi_data;
- /** in running */
- unsigned short wi_running:1;
- /** scheduled */
- unsigned short wi_scheduled:1;
+ /** chain on runq or rerunq */
+ struct list_head wi_list;
+ /** working function */
+ cfs_wi_action_t wi_action;
+ /** arg for working function */
+ void *wi_data;
+ /** in running */
+ unsigned short wi_running:1;
+ /** scheduled */
+ unsigned short wi_scheduled:1;
} cfs_workitem_t;
static inline void
cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
{
- CFS_INIT_LIST_HEAD(&wi->wi_list);
+ INIT_LIST_HEAD(&wi->wi_list);
- wi->wi_running = 0;
- wi->wi_scheduled = 0;
- wi->wi_data = data;
- wi->wi_action = action;
+ wi->wi_running = 0;
+ wi->wi_scheduled = 0;
+ wi->wi_data = data;
+ wi->wi_action = action;
}
void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
# if !KLWT_SUPPORT
typedef struct _lwt_page {
- cfs_list_t lwtp_list;
- struct page *lwtp_page;
- lwt_event_t *lwtp_events;
+ struct list_head lwtp_list;
+ struct page *lwtp_page;
+ lwt_event_t *lwtp_events;
} lwt_page_t;
typedef struct {
p = cpu->lwtc_current_page; \
e = &p->lwtp_events[cpu->lwtc_current_index++]; \
\
- if (cpu->lwtc_current_index >= LWT_EVENTS_PER_PAGE) { \
- cpu->lwtc_current_page = \
- cfs_list_entry (p->lwtp_list.next, \
- lwt_page_t, lwtp_list); \
- cpu->lwtc_current_index = 0; \
- } \
+ if (cpu->lwtc_current_index >= LWT_EVENTS_PER_PAGE) { \
+ cpu->lwtc_current_page = \
+ list_entry (p->lwtp_list.next, \
+ lwt_page_t, lwtp_list); \
+ cpu->lwtc_current_index = 0; \
+ } \
\
e->lwte_when = get_cycles(); \
e->lwte_where = LWTWHERE(__FILE__,__LINE__); \
#include <linux/list.h>
-typedef struct list_head cfs_list_t;
-
-#define __cfs_list_add(new, prev, next) __list_add(new, prev, next)
-#define cfs_list_add(new, head) list_add(new, head)
-
-#define cfs_list_add_tail(new, head) list_add_tail(new, head)
-
-#define __cfs_list_del(prev, next) __list_del(prev, next)
-#define cfs_list_del(entry) list_del(entry)
-#define cfs_list_del_init(entry) list_del_init(entry)
-
-#define cfs_list_move(list, head) list_move(list, head)
-#define cfs_list_move_tail(list, head) list_move_tail(list, head)
-
-#define cfs_list_empty(head) list_empty(head)
-#define cfs_list_empty_careful(head) list_empty_careful(head)
-
-#define __cfs_list_splice(list, head) __list_splice(list, head)
-#define cfs_list_splice(list, head) list_splice(list, head)
-#define cfs_list_splice_tail(list, head) list_splice_tail(list, head)
-
-#define cfs_list_splice_init(list, head) list_splice_init(list, head)
-
-#define cfs_list_entry(ptr, type, member) list_entry(ptr, type, member)
-#define cfs_list_for_each(pos, head) list_for_each(pos, head)
-#define cfs_list_for_each_safe(pos, n, head) list_for_each_safe(pos, n, head)
-#define cfs_list_for_each_prev(pos, head) list_for_each_prev(pos, head)
-#define cfs_list_for_each_entry(pos, head, member) \
- list_for_each_entry(pos, head, member)
-#define cfs_list_for_each_entry_reverse(pos, head, member) \
- list_for_each_entry_reverse(pos, head, member)
-#define cfs_list_for_each_entry_safe_reverse(pos, n, head, member) \
- list_for_each_entry_safe_reverse(pos, n, head, member)
-#define cfs_list_for_each_entry_safe(pos, n, head, member) \
- list_for_each_entry_safe(pos, n, head, member)
-#ifdef list_for_each_entry_safe_from
-#define cfs_list_for_each_entry_safe_from(pos, n, head, member) \
- list_for_each_entry_safe_from(pos, n, head, member)
-#endif /* list_for_each_entry_safe_from */
-#define cfs_list_for_each_entry_continue(pos, head, member) \
- list_for_each_entry_continue(pos, head, member)
-
-#define CFS_LIST_HEAD_INIT(n) LIST_HEAD_INIT(n)
-#define CFS_LIST_HEAD(n) LIST_HEAD(n)
-#define CFS_INIT_LIST_HEAD(p) INIT_LIST_HEAD(p)
-
-typedef struct hlist_head cfs_hlist_head_t;
-typedef struct hlist_node cfs_hlist_node_t;
-
-#define cfs_hlist_unhashed(h) hlist_unhashed(h)
-
-#define cfs_hlist_empty(h) hlist_empty(h)
-
-#define __cfs_hlist_del(n) __hlist_del(n)
-#define cfs_hlist_del(n) hlist_del(n)
-#define cfs_hlist_del_init(n) hlist_del_init(n)
-
-#define cfs_hlist_add_head(n, next) hlist_add_head(n, next)
-#define cfs_hlist_add_before(n, next) hlist_add_before(n, next)
-#define cfs_hlist_add_after(n, next) hlist_add_after(n, next)
-
-#define cfs_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
-#define cfs_hlist_for_each(pos, head) hlist_for_each(pos, head)
-#define cfs_hlist_for_each_safe(pos, n, head) \
- hlist_for_each_safe(pos, n, head)
-#ifdef HAVE_HLIST_FOR_EACH_3ARG
-#define cfs_hlist_for_each_entry(tpos, pos, head, member) \
- pos = NULL; hlist_for_each_entry(tpos, head, member)
-#else
-#define cfs_hlist_for_each_entry(tpos, pos, head, member) \
- hlist_for_each_entry(tpos, pos, head, member)
-#endif
-#define cfs_hlist_for_each_entry_continue(tpos, pos, member) \
- hlist_for_each_entry_continue(tpos, pos, member)
-#define cfs_hlist_for_each_entry_from(tpos, pos, member) \
- hlist_for_each_entry_from(tpos, pos, member)
-#ifdef HAVE_HLIST_FOR_EACH_3ARG
-#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- pos = NULL; hlist_for_each_entry_safe(tpos, n, head, member)
-#else
-#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- hlist_for_each_entry_safe(tpos, pos, n, head, member)
-#endif
-
-#define CFS_HLIST_HEAD_INIT HLIST_HEAD_INIT
-#define CFS_HLIST_HEAD(n) HLIST_HEAD(n)
-#define CFS_INIT_HLIST_HEAD(p) INIT_HLIST_HEAD(p)
-#define CFS_INIT_HLIST_NODE(p) INIT_HLIST_NODE(p)
-
#else /* !defined (__linux__) || !defined(__KERNEL__) */
/*
#define prefetch(a) ((void)a)
-struct cfs_list_head {
- struct cfs_list_head *next, *prev;
+struct list_head {
+ struct list_head *next, *prev;
};
-typedef struct cfs_list_head cfs_list_t;
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
-#define CFS_LIST_HEAD_INIT(name) { &(name), &(name) }
-
-#define CFS_LIST_HEAD(name) \
- cfs_list_t name = CFS_LIST_HEAD_INIT(name)
-
-#define CFS_INIT_LIST_HEAD(ptr) do { \
+#define INIT_LIST_HEAD(ptr) do { \
(ptr)->next = (ptr); (ptr)->prev = (ptr); \
} while (0)
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-static inline void __cfs_list_add(cfs_list_t * new,
- cfs_list_t * prev,
- cfs_list_t * next)
+static inline void __list_add(struct list_head * new,
+ struct list_head * prev,
+ struct list_head * next)
{
next->prev = new;
new->next = next;
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
-static inline void cfs_list_add(cfs_list_t *new,
- cfs_list_t *head)
+static inline void list_add(struct list_head *new,
+ struct list_head *head)
{
- __cfs_list_add(new, head, head->next);
+ __list_add(new, head, head->next);
}
/**
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
-static inline void cfs_list_add_tail(cfs_list_t *new,
- cfs_list_t *head)
+static inline void list_add_tail(struct list_head *new,
+ struct list_head *head)
{
- __cfs_list_add(new, head->prev, head);
+ __list_add(new, head->prev, head);
}
/*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-static inline void __cfs_list_del(cfs_list_t *prev,
- cfs_list_t *next)
+static inline void __list_del(struct list_head *prev,
+ struct list_head *next)
{
next->prev = prev;
prev->next = next;
* Note: list_empty(entry) does not return true after this, the entry is in an
* undefined state.
*/
-static inline void cfs_list_del(cfs_list_t *entry)
+static inline void list_del(struct list_head *entry)
{
- __cfs_list_del(entry->prev, entry->next);
+ __list_del(entry->prev, entry->next);
}
/**
* Remove an entry from the list it is currently in and reinitialize it.
* \param entry the entry to remove.
*/
-static inline void cfs_list_del_init(cfs_list_t *entry)
+static inline void list_del_init(struct list_head *entry)
{
- __cfs_list_del(entry->prev, entry->next);
- CFS_INIT_LIST_HEAD(entry);
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
}
/**
* \param list the entry to move
* \param head the list to move it to
*/
-static inline void cfs_list_move(cfs_list_t *list,
- cfs_list_t *head)
+static inline void list_move(struct list_head *list,
+ struct list_head *head)
{
- __cfs_list_del(list->prev, list->next);
- cfs_list_add(list, head);
+ __list_del(list->prev, list->next);
+ list_add(list, head);
}
/**
* \param list the entry to move
* \param head the list to move it to
*/
-static inline void cfs_list_move_tail(cfs_list_t *list,
- cfs_list_t *head)
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
{
- __cfs_list_del(list->prev, list->next);
- cfs_list_add_tail(list, head);
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
}
/**
* Test whether a list is empty
* \param head the list to test.
*/
-static inline int cfs_list_empty(cfs_list_t *head)
+static inline int list_empty(struct list_head *head)
{
return head->next == head;
}
* Tests whether a list is empty _and_ checks that no other CPU might be
* in the process of modifying either member (next or prev)
*
- * NOTE: using cfs_list_empty_careful() without synchronization
+ * NOTE: using list_empty_careful() without synchronization
* can only be safe if the only activity that can happen
- * to the list entry is cfs_list_del_init(). Eg. it cannot be used
+ * to the list entry is list_del_init(). Eg. it cannot be used
* if another CPU could re-list_add() it.
*/
-static inline int cfs_list_empty_careful(const cfs_list_t *head)
+static inline int list_empty_careful(const struct list_head *head)
{
- cfs_list_t *next = head->next;
- return (next == head) && (next == head->prev);
+ struct list_head *next = head->next;
+ return (next == head) && (next == head->prev);
}
-static inline void __cfs_list_splice(cfs_list_t *list,
- cfs_list_t *head)
+static inline void __list_splice(struct list_head *list,
+ struct list_head *head)
{
- cfs_list_t *first = list->next;
- cfs_list_t *last = list->prev;
- cfs_list_t *at = head->next;
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
first->prev = head;
head->next = first;
* The contents of \a list are added at the start of \a head. \a list is in an
* undefined state on return.
*/
-static inline void cfs_list_splice(cfs_list_t *list,
- cfs_list_t *head)
+static inline void list_splice(struct list_head *list,
+ struct list_head *head)
{
- if (!cfs_list_empty(list))
- __cfs_list_splice(list, head);
+ if (!list_empty(list))
+ __list_splice(list, head);
}
-static inline void cfs_list_splice_tail(cfs_list_t *list, cfs_list_t *head)
+static inline void list_splice_tail(struct list_head *list, struct list_head *head)
{
- if (!cfs_list_empty(list))
- __cfs_list_splice(list, head->prev);
+ if (!list_empty(list))
+ __list_splice(list, head->prev);
}
/**
* The contents of \a list are added at the start of \a head. \a list is empty
* on return.
*/
-static inline void cfs_list_splice_init(cfs_list_t *list,
- cfs_list_t *head)
+static inline void list_splice_init(struct list_head *list,
+ struct list_head *head)
{
- if (!cfs_list_empty(list)) {
- __cfs_list_splice(list, head);
- CFS_INIT_LIST_HEAD(list);
+ if (!list_empty(list)) {
+ __list_splice(list, head);
+ INIT_LIST_HEAD(list);
}
}
* \param type the type of the struct this is embedded in.
* \param member the member name of the list within the struct.
*/
-#define cfs_list_entry(ptr, type, member) \
+#define list_entry(ptr, type, member) \
((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
/**
* Behaviour is undefined if \a pos is removed from the list in the body of the
* loop.
*/
-#define cfs_list_for_each(pos, head) \
+#define list_for_each(pos, head) \
for (pos = (head)->next, prefetch(pos->next); pos != (head); \
pos = pos->next, prefetch(pos->next))
* This is safe to use if \a pos could be removed from the list in the body of
* the loop.
*/
-#define cfs_list_for_each_safe(pos, n, head) \
+#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
* Iterate over a list continuing after existing point
* \param pos the type * to use as a loop counter
* \param head the list head
- * \param member the name of the list_struct within the struct
+ * \param member the name of the list_struct within the struct
*/
-#define cfs_list_for_each_entry_continue(pos, head, member) \
- for (pos = cfs_list_entry(pos->member.next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = cfs_list_entry(pos->member.next, typeof(*pos), member))
+#define list_for_each_entry_continue(pos, head, member) \
+ for (pos = list_entry(pos->member.next, typeof(*pos), member); \
+ prefetch(pos->member.next), &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* \defgroup hlist Hash List
* @{
*/
-typedef struct cfs_hlist_node {
- struct cfs_hlist_node *next, **pprev;
-} cfs_hlist_node_t;
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
-typedef struct cfs_hlist_head {
- cfs_hlist_node_t *first;
-} cfs_hlist_head_t;
+struct hlist_head {
+ struct hlist_node *first;
+};
/* @} */
* @{
*/
-#define CFS_HLIST_HEAD_INIT { NULL_P }
-#define CFS_HLIST_HEAD(name) cfs_hlist_head_t name = { NULL_P }
-#define CFS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL_P)
-#define CFS_INIT_HLIST_NODE(ptr) ((ptr)->next = NULL_P, (ptr)->pprev = NULL_P)
+#define HLIST_HEAD_INIT { NULL_P }
+#define HLIST_HEAD(name) struct hlist_head name = { NULL_P }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL_P)
+#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL_P, (ptr)->pprev = NULL_P)
-static inline int cfs_hlist_unhashed(const cfs_hlist_node_t *h)
+static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
-static inline int cfs_hlist_empty(const cfs_hlist_head_t *h)
+static inline int hlist_empty(const struct hlist_head *h)
{
return !h->first;
}
-static inline void __cfs_hlist_del(cfs_hlist_node_t *n)
+static inline void __hlist_del(struct hlist_node *n)
{
- cfs_hlist_node_t *next = n->next;
- cfs_hlist_node_t **pprev = n->pprev;
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
*pprev = next;
if (next)
next->pprev = pprev;
}
-static inline void cfs_hlist_del(cfs_hlist_node_t *n)
+static inline void hlist_del(struct hlist_node *n)
{
- __cfs_hlist_del(n);
+ __hlist_del(n);
}
-static inline void cfs_hlist_del_init(cfs_hlist_node_t *n)
+static inline void hlist_del_init(struct hlist_node *n)
{
if (n->pprev) {
- __cfs_hlist_del(n);
- CFS_INIT_HLIST_NODE(n);
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
}
}
-static inline void cfs_hlist_add_head(cfs_hlist_node_t *n,
- cfs_hlist_head_t *h)
+static inline void hlist_add_head(struct hlist_node *n,
+ struct hlist_head *h)
{
- cfs_hlist_node_t *first = h->first;
+ struct hlist_node *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
}
/* next must be != NULL */
-static inline void cfs_hlist_add_before(cfs_hlist_node_t *n,
- cfs_hlist_node_t *next)
+static inline void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
*(n->pprev) = n;
}
-static inline void cfs_hlist_add_after(cfs_hlist_node_t *n,
- cfs_hlist_node_t *next)
+static inline void hlist_add_after(struct hlist_node *n,
+ struct hlist_node *next)
{
next->next = n->next;
n->next = next;
next->next->pprev = &next->next;
}
-#define cfs_hlist_entry(ptr, type, member) container_of(ptr,type,member)
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
-#define cfs_hlist_for_each(pos, head) \
+#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos && (prefetch(pos->next), 1); \
pos = pos->next)
-#define cfs_hlist_for_each_safe(pos, n, head) \
+#define hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && (n = pos->next, 1); \
pos = n)
* \param head the head for your list.
* \param member the name of the hlist_node within the struct.
*/
-#define cfs_hlist_for_each_entry(tpos, pos, head, member) \
+#define hlist_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* \param pos the &struct hlist_node to use as a loop counter.
* \param member the name of the hlist_node within the struct.
*/
-#define cfs_hlist_for_each_entry_continue(tpos, pos, member) \
+#define hlist_for_each_entry_continue(tpos, pos, member) \
for (pos = (pos)->next; \
pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* \param pos the &struct hlist_node to use as a loop counter.
* \param member the name of the hlist_node within the struct.
*/
-#define cfs_hlist_for_each_entry_from(tpos, pos, member) \
+#define hlist_for_each_entry_from(tpos, pos, member) \
for (; pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* \param head the head for your list.
* \param member the name of the hlist_node within the struct.
*/
-#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
for (pos = (head)->first; \
pos && ({ n = pos->next; 1; }) && \
- ({ tpos = cfs_hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
/* @} */
#endif /* __linux__ && __KERNEL__ */
-#ifndef cfs_list_for_each_prev
+#ifndef list_for_each_prev
/**
* Iterate over a list in reverse order
* \param pos the &struct list_head to use as a loop counter.
* \param head the head for your list.
*/
-#define cfs_list_for_each_prev(pos, head) \
+#define list_for_each_prev(pos, head) \
for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
pos = pos->prev, prefetch(pos->prev))
-#endif /* cfs_list_for_each_prev */
+#endif /* list_for_each_prev */
-#ifndef cfs_list_for_each_entry
+#ifndef list_for_each_entry
/**
* Iterate over a list of given type
* \param pos the type * to use as a loop counter.
* \param head the head for your list.
* \param member the name of the list_struct within the struct.
*/
-#define cfs_list_for_each_entry(pos, head, member) \
- for (pos = cfs_list_entry((head)->next, typeof(*pos), member), \
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
prefetch(pos->member.next); \
&pos->member != (head); \
- pos = cfs_list_entry(pos->member.next, typeof(*pos), member), \
+ pos = list_entry(pos->member.next, typeof(*pos), member), \
prefetch(pos->member.next))
-#endif /* cfs_list_for_each_entry */
-
-#ifndef cfs_list_for_each_entry_rcu
-#define cfs_list_for_each_entry_rcu(pos, head, member) \
- list_for_each_entry(pos, head, member)
-#endif
+#endif /* list_for_each_entry */
-#ifndef cfs_list_for_each_entry_rcu
-#define cfs_list_for_each_entry_rcu(pos, head, member) \
- list_for_each_entry(pos, head, member)
-#endif
-
-#ifndef cfs_list_for_each_entry_reverse
+#ifndef list_for_each_entry_reverse
/**
* Iterate backwards over a list of given type.
* \param pos the type * to use as a loop counter.
* \param head the head for your list.
* \param member the name of the list_struct within the struct.
*/
-#define cfs_list_for_each_entry_reverse(pos, head, member) \
- for (pos = cfs_list_entry((head)->prev, typeof(*pos), member); \
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member); \
prefetch(pos->member.prev), &pos->member != (head); \
- pos = cfs_list_entry(pos->member.prev, typeof(*pos), member))
-#endif /* cfs_list_for_each_entry_reverse */
+ pos = list_entry(pos->member.prev, typeof(*pos), member))
+#endif /* list_for_each_entry_reverse */
-#ifndef cfs_list_for_each_entry_safe
+#ifndef list_for_each_entry_safe
/**
* Iterate over a list of given type safe against removal of list entry
* \param pos the type * to use as a loop counter.
* \param head the head for your list.
* \param member the name of the list_struct within the struct.
*/
-#define cfs_list_for_each_entry_safe(pos, n, head, member) \
- for (pos = cfs_list_entry((head)->next, typeof(*pos), member), \
- n = cfs_list_entry(pos->member.next, typeof(*pos), member); \
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
- pos = n, n = cfs_list_entry(n->member.next, typeof(*n), member))
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
-#endif /* cfs_list_for_each_entry_safe */
+#endif /* list_for_each_entry_safe */
-#ifndef cfs_list_for_each_entry_safe_from
-/**
- * Iterate over a list continuing from an existing point
- * \param pos the type * to use as a loop cursor.
- * \param n another type * to use as temporary storage
- * \param head the head for your list.
- * \param member the name of the list_struct within the struct.
- *
- * Iterate over list of given type from current point, safe against
- * removal of list entry.
- */
-#define cfs_list_for_each_entry_safe_from(pos, n, head, member) \
- for (n = cfs_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cfs_list_entry(n->member.next, typeof(*n), member))
-#endif /* cfs_list_for_each_entry_safe_from */
-
-#define cfs_list_for_each_entry_typed(pos, head, type, member) \
- for (pos = cfs_list_entry((head)->next, type, member), \
- prefetch(pos->member.next); \
- &pos->member != (head); \
- pos = cfs_list_entry(pos->member.next, type, member), \
- prefetch(pos->member.next))
+/* Temporary until everything is moved over to linux api */
+typedef struct list_head cfs_list_t;
+
+#define __cfs_list_add(new, prev, next) __list_add(new, prev, next)
+#define cfs_list_add(new, head) list_add(new, head)
+
+#define cfs_list_add_tail(new, head) list_add_tail(new, head)
+
+#define __cfs_list_del(prev, next) __list_del(prev, next)
+#define cfs_list_del(entry) list_del(entry)
+#define cfs_list_del_init(entry) list_del_init(entry)
+
+#define cfs_list_move(list, head) list_move(list, head)
+#define cfs_list_move_tail(list, head) list_move_tail(list, head)
+
+#define cfs_list_empty(head) list_empty(head)
+#define cfs_list_empty_careful(head) list_empty_careful(head)
+
+#define __cfs_list_splice(list, head) __list_splice(list, head)
+#define cfs_list_splice(list, head) list_splice(list, head)
+#define cfs_list_splice_tail(list, head) list_splice_tail(list, head)
+
+#define cfs_list_splice_init(list, head) list_splice_init(list, head)
+
+#define cfs_list_entry(ptr, type, member) list_entry(ptr, type, member)
+#define cfs_list_for_each(pos, head) list_for_each(pos, head)
+#define cfs_list_for_each_safe(pos, n, head) list_for_each_safe(pos, n, head)
+
+#define cfs_list_for_each_prev(pos, head) list_for_each_prev(pos, head)
+#define cfs_list_for_each_entry(pos, head, member) \
+ list_for_each_entry(pos, head, member)
+#define cfs_list_for_each_entry_reverse(pos, head, member) \
+ list_for_each_entry_reverse(pos, head, member)
+#define cfs_list_for_each_entry_safe_reverse(pos, n, head, member) \
+ list_for_each_entry_safe_reverse(pos, n, head, member)
+#define cfs_list_for_each_entry_safe(pos, n, head, member) \
+ list_for_each_entry_safe(pos, n, head, member)
+#ifdef list_for_each_entry_safe_from
+#define cfs_list_for_each_entry_safe_from(pos, n, head, member) \
+ list_for_each_entry_safe_from(pos, n, head, member)
+#endif /* list_for_each_entry_safe_from */
+#define cfs_list_for_each_entry_continue(pos, head, member) \
+ list_for_each_entry_continue(pos, head, member)
+
+#define CFS_LIST_HEAD_INIT(n) LIST_HEAD_INIT(n)
+#define CFS_INIT_LIST_HEAD(p) INIT_LIST_HEAD(p)
+
+typedef struct hlist_head cfs_hlist_head_t;
+typedef struct hlist_node cfs_hlist_node_t;
+
+#define cfs_hlist_unhashed(h) hlist_unhashed(h)
+
+#define cfs_hlist_empty(h) hlist_empty(h)
-#define cfs_list_for_each_entry_reverse_typed(pos, head, type, member) \
- for (pos = cfs_list_entry((head)->prev, type, member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = cfs_list_entry(pos->member.prev, type, member))
+#define __cfs_hlist_del(n) __hlist_del(n)
+#define cfs_hlist_del(n) hlist_del(n)
+#define cfs_hlist_del_init(n) hlist_del_init(n)
+
+#define cfs_hlist_add_head(n, next) hlist_add_head(n, next)
+#define cfs_hlist_add_before(n, next) hlist_add_before(n, next)
+#define cfs_hlist_add_after(n, next) hlist_add_after(n, next)
+
+#define cfs_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
+#define cfs_hlist_for_each(pos, head) hlist_for_each(pos, head)
+#define cfs_hlist_for_each_safe(pos, n, head) \
+ hlist_for_each_safe(pos, n, head)
+#ifdef HAVE_HLIST_FOR_EACH_3ARG
+#define cfs_hlist_for_each_entry(tpos, pos, head, member) \
+ pos = NULL; hlist_for_each_entry(tpos, head, member)
+#else
+#define cfs_hlist_for_each_entry(tpos, pos, head, member) \
+ hlist_for_each_entry(tpos, pos, head, member)
+#endif
+#define cfs_hlist_for_each_entry_continue(tpos, pos, member) \
+ hlist_for_each_entry_continue(tpos, pos, member)
+#define cfs_hlist_for_each_entry_from(tpos, pos, member) \
+ hlist_for_each_entry_from(tpos, pos, member)
+#ifdef HAVE_HLIST_FOR_EACH_3ARG
+#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ pos = NULL; hlist_for_each_entry_safe(tpos, n, head, member)
+#else
+#define cfs_hlist_for_each_entry(tpos, pos, head, member) \
+ hlist_for_each_entry(tpos, pos, head, member)
+#define cfs_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ hlist_for_each_entry_safe(tpos, pos, n, head, member)
+#endif
+
+#define cfs_list_for_each_entry_typed(pos, head, type, member) \
+ for (pos = list_entry((head)->next, type, member), \
+ prefetch(pos->member.next); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, type, member), \
+ prefetch(pos->member.next))
#define cfs_list_for_each_entry_safe_typed(pos, n, head, type, member) \
- for (pos = cfs_list_entry((head)->next, type, member), \
- n = cfs_list_entry(pos->member.next, type, member); \
- &pos->member != (head); \
- pos = n, n = cfs_list_entry(n->member.next, type, member))
-
-#define cfs_list_for_each_entry_safe_from_typed(pos, n, head, type, member) \
- for (n = cfs_list_entry(pos->member.next, type, member); \
- &pos->member != (head); \
- pos = n, n = cfs_list_entry(n->member.next, type, member))
-
-#define cfs_hlist_for_each_entry_typed(tpos, pos, head, type, member) \
- for (pos = (head)->first; \
- pos && (prefetch(pos->next), 1) && \
- (tpos = cfs_hlist_entry(pos, type, member), 1); \
- pos = pos->next)
+ for (pos = list_entry((head)->next, type, member), \
+ n = list_entry(pos->member.next, type, member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, type, member))
-#define cfs_hlist_for_each_entry_safe_typed(tpos, pos, n, head, type, member) \
- for (pos = (head)->first; \
- pos && (n = pos->next, 1) && \
- (tpos = cfs_hlist_entry(pos, type, member), 1); \
- pos = n)
+#define CFS_HLIST_HEAD_INIT HLIST_HEAD_INIT
+#define CFS_HLIST_HEAD(n) HLIST_HEAD(n)
+#define CFS_INIT_HLIST_HEAD(p) INIT_HLIST_HEAD(p)
+#define CFS_INIT_HLIST_NODE(p) INIT_HLIST_NODE(p)
+
+#define CFS_LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
#endif /* __LIBCFS_LUSTRE_LIST_H__ */
};
struct upcall_cache_entry {
- cfs_list_t ue_hash;
- __u64 ue_key;
+ struct list_head ue_hash;
+ __u64 ue_key;
atomic_t ue_refcount;
- int ue_flags;
+ int ue_flags;
wait_queue_head_t ue_waitq;
- cfs_time_t ue_acquire_expire;
- cfs_time_t ue_expire;
+ cfs_time_t ue_acquire_expire;
+ cfs_time_t ue_expire;
union {
- struct md_identity identity;
+ struct md_identity identity;
} u;
};
};
struct upcall_cache {
- cfs_list_t uc_hashtable[UC_CACHE_HASH_SIZE];
+ struct list_head uc_hashtable[UC_CACHE_HASH_SIZE];
spinlock_t uc_lock;
rwlock_t uc_upcall_rwlock;
***************************************************************************/
struct radix_tree_root {
- cfs_list_t list;
- void *rnode;
+ struct list_head list;
+ void *rnode;
};
struct radix_tree_node {
- cfs_list_t _node;
- unsigned long index;
- void *item;
+ struct list_head _node;
+ unsigned long index;
+ void *item;
};
-#define RADIX_TREE_INIT(mask) { \
- NOT_IMPLEMENTED \
+#define RADIX_TREE_INIT(mask) { \
+ NOT_IMPLEMENTED \
}
#define RADIX_TREE(name, mask) \
struct radix_tree_root name = RADIX_TREE_INIT(mask)
-#define INIT_RADIX_TREE(root, mask) \
-do { \
- CFS_INIT_LIST_HEAD(&((struct radix_tree_root *)root)->list); \
- ((struct radix_tree_root *)root)->rnode = NULL; \
+#define INIT_RADIX_TREE(root, mask) \
+do { \
+ INIT_LIST_HEAD(&((struct radix_tree_root *)root)->list); \
+ ((struct radix_tree_root *)root)->rnode = NULL; \
} while (0)
static inline int radix_tree_insert(struct radix_tree_root *root,
- unsigned long idx, void *item)
+ unsigned long idx, void *item)
{
- struct radix_tree_node *node;
- node = malloc(sizeof(*node));
- if (!node)
- return -ENOMEM;
-
- CFS_INIT_LIST_HEAD(&node->_node);
- node->index = idx;
- node->item = item;
- cfs_list_add_tail(&node->_node, &root->list);
- root->rnode = (void *)1001;
- return 0;
+ struct radix_tree_node *node;
+ node = malloc(sizeof(*node));
+ if (!node)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&node->_node);
+ node->index = idx;
+ node->item = item;
+ list_add_tail(&node->_node, &root->list);
+ root->rnode = (void *)1001;
+ return 0;
}
-static inline struct radix_tree_node *radix_tree_lookup0(struct radix_tree_root *root,
- unsigned long idx)
+static inline struct radix_tree_node *
+radix_tree_lookup0(struct radix_tree_root *root, unsigned long idx)
{
- struct radix_tree_node *node;
+ struct radix_tree_node *node;
- if (cfs_list_empty(&root->list))
- return NULL;
+ if (list_empty(&root->list))
+ return NULL;
- cfs_list_for_each_entry_typed(node, &root->list,
- struct radix_tree_node, _node)
- if (node->index == idx)
- return node;
+ list_for_each_entry(node, &root->list, _node)
+ if (node->index == idx)
+ return node;
- return NULL;
+ return NULL;
}
static inline void *radix_tree_lookup(struct radix_tree_root *root,
- unsigned long idx)
+ unsigned long idx)
{
- struct radix_tree_node *node = radix_tree_lookup0(root, idx);
+ struct radix_tree_node *node = radix_tree_lookup0(root, idx);
- if (node)
- return node->item;
- return node;
+ if (node)
+ return node->item;
+ return node;
}
static inline void *radix_tree_delete(struct radix_tree_root *root,
- unsigned long idx)
+ unsigned long idx)
{
- struct radix_tree_node *p = radix_tree_lookup0(root, idx);
- void *item;
+ struct radix_tree_node *p = radix_tree_lookup0(root, idx);
+ void *item;
- if (p == NULL)
- return NULL;
+ if (p == NULL)
+ return NULL;
- cfs_list_del_init(&p->_node);
- item = p->item;
- free(p);
- if (cfs_list_empty(&root->list))
- root->rnode = NULL;
+ list_del_init(&p->_node);
+ item = p->item;
+ free(p);
+ if (list_empty(&root->list))
+ root->rnode = NULL;
- return item;
+ return item;
}
static inline unsigned int
#define LIBLUSTRE_HANDLE_UNALIGNED_PAGE
struct page {
- void *addr;
- unsigned long index;
- cfs_list_t list;
- unsigned long private;
-
- /* internally used by liblustre file i/o */
- int _offset;
- int _count;
+ void *addr;
+ unsigned long index;
+ struct list_head list;
+ unsigned long private;
+
+ /* internally used by liblustre file i/o */
+ int _offset;
+ int _count;
#ifdef LIBLUSTRE_HANDLE_UNALIGNED_PAGE
- int _managed;
+ int _managed;
#endif
- cfs_list_t _node;
+ struct list_head _node;
};
*/
typedef struct cfs_waitlink {
- cfs_list_t sleeping;
- void *process;
+ struct list_head sleeping;
+ void *process;
} wait_queue_t;
typedef struct cfs_waitq {
- cfs_list_t sleepers;
+ struct list_head sleepers;
} wait_queue_head_t;
#define CFS_DECL_WAITQ(wq) wait_queue_head_t wq
*/
struct timer_list {
- cfs_list_t tl_list;
+ struct list_head tl_list;
void (*function)(ulong_ptr_t unused);
ulong_ptr_t data;
long expires;
extern void kmem_cache_free(struct kmem_cache *, void *);
/*
- * shrinker
+ * shrinker
*/
typedef int (*shrink_callback)(int nr_to_scan, gfp_t gfp_mask);
struct shrinker {
- shrink_callback cb;
+ shrink_callback cb;
int seeks; /* seeks to recreate an obj */
/* These are for internal use */
- cfs_list_t list;
+ struct list_head list;
long nr; /* objs pending delete */
};
void stop_shrinker_timer();
/*
- * Page allocator slabs
+ * Page allocator slabs
*/
extern struct kmem_cache *cfs_page_t_slab;
#define CFS_SYMBOL_LEN 64
struct cfs_symbol {
- char name[CFS_SYMBOL_LEN];
- void *value;
- int ref;
- cfs_list_t sym_list;
+ char name[CFS_SYMBOL_LEN];
+ void *value;
+ int ref;
+ struct list_head sym_list;
};
extern int cfs_symbol_register(const char *, const void *);
/* the mantaner of the cfs_sysctl_table trees */
struct ctl_table_header
{
- struct ctl_table * ctl_table;
- cfs_list_t ctl_entry;
+ struct ctl_table *ctl_table;
+ struct list_head ctl_entry;
};
/* proc root entries, support routines */
* Helpers for iteration over list_head-s in seq_files
*/
-extern cfs_list_t *seq_list_start(cfs_list_t *head, loff_t pos);
-extern cfs_list_t *seq_list_start_head(cfs_list_t *head, loff_t pos);
-extern cfs_list_t *seq_list_next(void *v, cfs_list_t *head, loff_t *ppos);
+extern struct list_head *seq_list_start(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_start_head(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos);
/*
* declaration of proc kernel process routines
unsigned int flags;
spinlock_t guard;
- cfs_list_t waiters;
-
+ struct list_head waiters;
} wait_queue_head_t;
#define CFS_WAITQ_CHAN_NORMAL (0)
#define CFS_WAITQ_CHAN_FORWARD (1)
-
-
typedef struct cfs_waitlink_channel {
- cfs_list_t link;
- wait_queue_head_t * waitq;
- wait_queue_t * waitl;
+ struct list_head link;
+ wait_queue_head_t *waitq;
+ wait_queue_t *waitl;
} cfs_waitlink_channel_t;
struct cfs_waitlink {
// ks definitions
//
-// iovec is defined in libcfs: winnt_prim.h
+// iovec is defined in libcfs: winnt_prim.h
// lnetkiov_t is defined in lnet/types.h
typedef struct socket ks_tconn_t, cfs_socket_t;
#define TCP_SOCKET_WINDOW 6
-/* Flags we can use with send/ and recv.
+/* Flags we can use with send/ and recv.
Added those for 1003.1g not all are supported yet
*/
-
+
#define MSG_OOB 1
#define MSG_PEEK 2
#define MSG_DONTROUTE 4
typedef struct _KS_TSDU {
- ULONG Magic; /* magic */
- ULONG Flags; /* flags */
+ ULONG Magic; /* magic */
+ ULONG Flags; /* flags */
- cfs_list_t Link; /* link list */
+ struct list_head Link; /* link list */
- ULONG TotalLength; /* total size of KS_TSDU */
- ULONG StartOffset; /* offset of the first Tsdu unit */
- ULONG LastOffset; /* end offset of the last Tsdu unit */
+ ULONG TotalLength; /* total size of KS_TSDU */
+ ULONG StartOffset; /* offset of the first Tsdu unit */
+ ULONG LastOffset; /* end offset of the last Tsdu unit */
/*
union {
} KS_TSDU_MDL, *PKS_TSDU_MDL;
typedef struct ks_engine_mgr {
- spinlock_t lock;
- int stop;
- event_t exit;
- event_t start;
- cfs_list_t list;
+ spinlock_t lock;
+ int stop;
+ event_t exit;
+ event_t start;
+ struct list_head list;
} ks_engine_mgr_t;
typedef struct ks_engine_slot {
- ks_tconn_t * tconn;
- void * tsdumgr;
- cfs_list_t link;
- int queued;
- ks_engine_mgr_t * emgr;
+ ks_tconn_t *tconn;
+ void *tsdumgr;
+ struct list_head link;
+ int queued;
+ ks_engine_mgr_t *emgr;
} ks_engine_slot_t;
typedef struct _KS_TSDUMGR {
- cfs_list_t TsduList;
+ struct list_head TsduList;
ULONG NumOfTsdu;
ULONG TotalBytes;
KEVENT Event;
//
typedef struct ks_backlogs {
-
- cfs_list_t list; /* list to link the backlog connections */
- int num; /* number of backlogs in the list */
-
+ struct list_head list; /* list to link the backlog connections */
+ int num; /* number of backlogs in the list */
} ks_backlogs_t;
typedef struct ks_daemon {
-
- ks_tconn_t * tconn; /* the listener connection object */
- unsigned short nbacklogs; /* number of listening backlog conns */
- unsigned short port; /* listening port number */
- int shutdown; /* daemon threads is to exit */
- cfs_list_t list; /* to be attached into ks_nal_data_t */
+ ks_tconn_t *tconn; /* the listener connection object */
+ unsigned short nbacklogs; /* number of listening backlog conns */
+ unsigned short port; /* listening port number */
+ int shutdown; /* daemon threads is to exit */
+ struct list_head list; /* to be attached into ks_nal_data_t */
} ks_daemon_t;
atomic_t kstc_refcount; /* reference count of ks_tconn_t */
- cfs_list_t kstc_list; /* linked to global ksocknal_data */
+ struct list_head kstc_list; /* linked to global ksocknal_data */
union {
int kstc_busy; /* referred by ConnectEventCallback ? */
int kstc_accepted; /* the connection is built ready ? */
- cfs_list_t kstc_link; /* linked to parent tdi connection */
- ks_tconn_t * kstc_parent; /* pointers to it's listener parent */
- } child;
+ struct list_head kstc_link; /* linked to parent tdi connection */
+ ks_tconn_t *kstc_parent; /* pointers to it's listener parent */
+ } child;
struct {
ks_tconn_info_t kstc_info; /* Connection Info if Connected */
HANDLE ksnd_pnp_handle; /* the handle for pnp changes */
spinlock_t ksnd_addrs_lock; /* serialize ip address list */
- LIST_ENTRY ksnd_addrs_list; /* list of the ip addresses */
- int ksnd_naddrs; /* number of the ip addresses */
+ LIST_ENTRY ksnd_addrs_list; /* list of the ip addresses */
+ int ksnd_naddrs; /* number of the ip addresses */
- /*
- * Tdilnd internal defintions
- */
-
- int ksnd_init; /* initialisation state */
+ /*
+ * Tdilnd internal defintions
+ */
+ int ksnd_init; /* initialisation state */
- TDI_PROVIDER_INFO ksnd_provider; /* tdi tcp/ip provider's information */
+ TDI_PROVIDER_INFO ksnd_provider; /* tdi tcp/ip provider's information */
spinlock_t ksnd_tconn_lock; /* tdi connections access lock*/
int ksnd_ntconns; /* number of tconns in list */
- cfs_list_t ksnd_tconns; /* tdi connections list */
+ struct list_head ksnd_tconns; /* tdi connections list */
struct kmem_cache *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/
event_t ksnd_tconn_exit; /* event signal by last tconn */
spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */
- int ksnd_ntsdus; /* number of tsdu buffers allocated */
- ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */
- struct kmem_cache *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
-
- int ksnd_nfreetsdus; /* number of tsdu buffers in the freed list */
- cfs_list_t ksnd_freetsdus; /* List of the freed Tsdu buffer. */
+ int ksnd_ntsdus; /* number of tsdu buffers allocated */
+ ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */
+ struct kmem_cache *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
- int ksnd_engine_nums; /* number of tcp sending engine threads */
- ks_engine_mgr_t *ksnd_engine_mgr; /* tcp sending engine structure */
+ int ksnd_nfreetsdus; /* number of tsdu buffers in the freed list */
+ struct list_head ksnd_freetsdus; /* List of the freed Tsdu buffer. */
+ int ksnd_engine_nums; /* number of tcp sending engine threads */
+ ks_engine_mgr_t *ksnd_engine_mgr; /* tcp sending engine structure */
} ks_tdi_data_t;
int
extern void zfree(zone_t zone, void *addr);
struct cfs_zone_nob {
- struct list_head *z_nob; /* Pointer to z_link */
- struct list_head z_link; /* Do NOT access it directly */
+ struct list_head *z_nob; /* Pointer to z_link */
+ struct list_head z_link; /* Do NOT access it directly */
};
static struct cfs_zone_nob cfs_zone_nob;
return NULL;
}
- cname = _MALLOC(strlen(name) + 1, M_TEMP, M_WAITOK);
- LASSERT(cname != NULL);
- mc->mc_cache = zinit(objsize, (KMEM_MAX_ZONE * objsize), 0, strcpy(cname, name));
- mc->mc_size = objsize;
- CFS_INIT_LIST_HEAD(&mc->mc_link);
- strncpy(mc->mc_name, name, 1 + strlen(name));
- return mc;
+ cname = _MALLOC(strlen(name) + 1, M_TEMP, M_WAITOK);
+ LASSERT(cname != NULL);
+ mc->mc_cache = zinit(objsize, (KMEM_MAX_ZONE * objsize), 0, strcpy(cname, name));
+ mc->mc_size = objsize;
+ INIT_LIST_HEAD(&mc->mc_link);
+ strncpy(mc->mc_name, name, 1 + strlen(name));
+ return mc;
}
void mem_cache_destroy(struct kmem_cache *mc)
{
- /*
- * zone can NOT be destroyed after creating,
- * so just keep it in list.
- *
- * We will not lost a zone after we unload
- * libcfs, it can be found by from libcfs.zone
- */
- return;
+ /*
+ * zone can NOT be destroyed after creating,
+ * so just keep it in list.
+ *
+ * We will not lost a zone after we unload
+ * libcfs, it can be found by from libcfs.zone
+ */
+ return;
}
#define mem_cache_alloc(mc) zalloc((mc)->mc_cache)
assert(cfs_sysctl_isvalid());
- nob = _MALLOC(sizeof(struct cfs_zone_nob),
- M_TEMP, M_WAITOK | M_ZERO);
- CFS_INIT_LIST_HEAD(&nob->z_link);
- nob->z_nob = &nob->z_link;
- oid = cfs_alloc_sysctl_struct(NULL, OID_AUTO, CTLFLAG_RD | CTLFLAG_KERN,
- "zone", nob, sizeof(struct cfs_zone_nob));
- if (oid == NULL) {
- _FREE(nob, M_TEMP);
- return -ENOMEM;
- }
- sysctl_register_oid(oid);
-
- cfs_zone_nob.z_nob = nob->z_nob;
- }
+ nob = _MALLOC(sizeof(struct cfs_zone_nob),
+ M_TEMP, M_WAITOK | M_ZERO);
+ INIT_LIST_HEAD(&nob->z_link);
+ nob->z_nob = &nob->z_link;
+ oid = cfs_alloc_sysctl_struct(NULL, OID_AUTO, CTLFLAG_RD | CTLFLAG_KERN,
+ "zone", nob, sizeof(struct cfs_zone_nob));
+ if (oid == NULL) {
+ _FREE(nob, M_TEMP);
+ return -ENOMEM;
+ }
+ sysctl_register_oid(oid);
+
+ cfs_zone_nob.z_nob = nob->z_nob;
+ }
spin_lock_init(&cfs_zone_guard);
#endif
- CFS_INIT_LIST_HEAD(&page_death_row);
+ INIT_LIST_HEAD(&page_death_row);
spin_lock_init(&page_death_row_phylax);
raw_page_cache = kmem_cache_create("raw-page", PAGE_CACHE_SIZE,
- 0, 0, NULL);
+ 0, 0, NULL);
return 0;
}
struct cfs_symbol *sym = NULL;
struct cfs_symbol *new = NULL;
- MALLOC(new, struct cfs_symbol *, sizeof(struct cfs_symbol), M_TEMP, M_WAITOK|M_ZERO);
- strncpy(new->name, name, CFS_SYMBOL_LEN);
- new->value = (void *)value;
- new->ref = 0;
- CFS_INIT_LIST_HEAD(&new->sym_list);
-
- down_write(&cfs_symbol_lock);
- list_for_each(walker, &cfs_symbol_list) {
- sym = list_entry (walker, struct cfs_symbol, sym_list);
- if (!strcmp(sym->name, name)) {
- up_write(&cfs_symbol_lock);
- FREE(new, M_TEMP);
- return KERN_NAME_EXISTS;
- }
-
- }
- list_add_tail(&new->sym_list, &cfs_symbol_list);
- up_write(&cfs_symbol_lock);
-
- return KERN_SUCCESS;
+ MALLOC(new, struct cfs_symbol *, sizeof(struct cfs_symbol), M_TEMP, M_WAITOK|M_ZERO);
+ strncpy(new->name, name, CFS_SYMBOL_LEN);
+ new->value = (void *)value;
+ new->ref = 0;
+ INIT_LIST_HEAD(&new->sym_list);
+
+ down_write(&cfs_symbol_lock);
+ list_for_each(walker, &cfs_symbol_list) {
+ sym = list_entry (walker, struct cfs_symbol, sym_list);
+ if (!strcmp(sym->name, name)) {
+ up_write(&cfs_symbol_lock);
+ FREE(new, M_TEMP);
+ return KERN_NAME_EXISTS;
+ }
+ }
+ list_add_tail(&new->sym_list, &cfs_symbol_list);
+ up_write(&cfs_symbol_lock);
+ return KERN_SUCCESS;
}
kern_return_t
cfs_symbol_unregister(const char *name)
{
- struct list_head *walker;
- struct list_head *nxt;
- struct cfs_symbol *sym = NULL;
-
- down_write(&cfs_symbol_lock);
- list_for_each_safe(walker, nxt, &cfs_symbol_list) {
- sym = list_entry (walker, struct cfs_symbol, sym_list);
- if (!strcmp(sym->name, name)) {
- LASSERT(sym->ref == 0);
- list_del (&sym->sym_list);
- FREE(sym, M_TEMP);
- break;
- }
- }
- up_write(&cfs_symbol_lock);
+ struct list_head *walker;
+ struct list_head *nxt;
+ struct cfs_symbol *sym = NULL;
+
+ down_write(&cfs_symbol_lock);
+ list_for_each_safe(walker, nxt, &cfs_symbol_list) {
+ sym = list_entry(walker, struct cfs_symbol, sym_list);
+ if (!strcmp(sym->name, name)) {
+ LASSERT(sym->ref == 0);
+ list_del(&sym->sym_list);
+ FREE(sym, M_TEMP);
+ break;
+ }
+ }
+ up_write(&cfs_symbol_lock);
- return KERN_SUCCESS;
+ return KERN_SUCCESS;
}
void
cfs_symbol_init()
{
- CFS_INIT_LIST_HEAD(&cfs_symbol_list);
- init_rwsem(&cfs_symbol_lock);
+ INIT_LIST_HEAD(&cfs_symbol_list);
+ init_rwsem(&cfs_symbol_lock);
}
void
cfs_symbol_fini()
{
- struct list_head *walker;
- struct cfs_symbol *sym = NULL;
+ struct list_head *walker;
+ struct cfs_symbol *sym = NULL;
- down_write(&cfs_symbol_lock);
- list_for_each(walker, &cfs_symbol_list) {
- sym = list_entry (walker, struct cfs_symbol, sym_list);
- LASSERT(sym->ref == 0);
- list_del (&sym->sym_list);
- FREE(sym, M_TEMP);
- }
- up_write(&cfs_symbol_lock);
+ down_write(&cfs_symbol_lock);
+ list_for_each(walker, &cfs_symbol_list) {
+ sym = list_entry(walker, struct cfs_symbol, sym_list);
+ LASSERT(sym->ref == 0);
+ list_del(&sym->sym_list);
+ FREE(sym, M_TEMP);
+ }
+ up_write(&cfs_symbol_lock);
- fini_rwsem(&cfs_symbol_lock);
- return;
+ fini_rwsem(&cfs_symbol_lock);
+ return;
}
struct kernel_thread_arg
SLASSERT(chan != NULL);
kspin_init(&chan->guard);
- CFS_INIT_LIST_HEAD(&chan->waiters);
+ INIT_LIST_HEAD(&chan->waiters);
ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
}
{
SLASSERT(link != NULL);
- CFS_INIT_LIST_HEAD(&link->linkage);
+ INIT_LIST_HEAD(&link->linkage);
link->flags = 0;
link->event = current_thread();
link->hits = 0;
* debugging check for recursive call to libcfs_debug_msg()
*/
if (trace_owner == current_thread()) {
- /*
- * Cannot assert here.
- */
+ /*
+ * Cannot assert here.
+ */
printk(KERN_EMERG "recursive call to %s", __FUNCTION__);
/*
- * "The death of God left the angels in a strange position."
+ * "The death of God left the angels in a strange position."
*/
cfs_enter_debugger();
}
tcd = &trace_data[0].tcd;
- CFS_INIT_LIST_HEAD(&pages);
+ INIT_LIST_HEAD(&pages);
if (get_preemption_level() == 0)
nr_pages = trace_refill_stock(tcd, GFP_IOFS, &pages);
else
* new element is always added to head of hlist
*/
typedef struct {
- cfs_hlist_head_t hh_head; /**< entries list */
+ struct hlist_head hh_head; /**< entries list */
} cfs_hash_head_t;
static int
cfs_hash_hh_hhead_size(cfs_hash_t *hs)
{
- return sizeof(cfs_hash_head_t);
+ return sizeof(cfs_hash_head_t);
}
-static cfs_hlist_head_t *
+static struct hlist_head *
cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
{
- cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
+ cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].hh_head;
+ return &head[bd->bd_offset].hh_head;
}
static int
cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- cfs_hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
- return -1; /* unknown depth */
+ hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
+ return -1; /* unknown depth */
}
static int
cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- cfs_hlist_del_init(hnode);
- return -1; /* unknown depth */
+ hlist_del_init(hnode);
+ return -1; /* unknown depth */
}
/**
* new element is always added to head of hlist
*/
typedef struct {
- cfs_hlist_head_t hd_head; /**< entries list */
- unsigned int hd_depth; /**< list length */
+ struct hlist_head hd_head; /**< entries list */
+ unsigned int hd_depth; /**< list length */
} cfs_hash_head_dep_t;
static int
cfs_hash_hd_hhead_size(cfs_hash_t *hs)
{
- return sizeof(cfs_hash_head_dep_t);
+ return sizeof(cfs_hash_head_dep_t);
}
-static cfs_hlist_head_t *
+static struct hlist_head *
cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
{
- cfs_hash_head_dep_t *head;
+ cfs_hash_head_dep_t *head;
- head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].hd_head;
+ head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
+ return &head[bd->bd_offset].hd_head;
}
static int
cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
- cfs_hash_head_dep_t, hd_head);
- cfs_hlist_add_head(hnode, &hh->hd_head);
- return ++hh->hd_depth;
+ cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
+ cfs_hash_head_dep_t, hd_head);
+ hlist_add_head(hnode, &hh->hd_head);
+ return ++hh->hd_depth;
}
static int
cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
- cfs_hash_head_dep_t, hd_head);
- cfs_hlist_del_init(hnode);
- return --hh->hd_depth;
+ cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
+ cfs_hash_head_dep_t, hd_head);
+ hlist_del_init(hnode);
+ return --hh->hd_depth;
}
/**
* new element is always added to tail of hlist
*/
typedef struct {
- cfs_hlist_head_t dh_head; /**< entries list */
- cfs_hlist_node_t *dh_tail; /**< the last entry */
+ struct hlist_head dh_head; /**< entries list */
+ struct hlist_node *dh_tail; /**< the last entry */
} cfs_hash_dhead_t;
static int
cfs_hash_dh_hhead_size(cfs_hash_t *hs)
{
- return sizeof(cfs_hash_dhead_t);
+ return sizeof(cfs_hash_dhead_t);
}
-static cfs_hlist_head_t *
+static struct hlist_head *
cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
{
- cfs_hash_dhead_t *head;
+ cfs_hash_dhead_t *head;
- head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].dh_head;
+ head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
+ return &head[bd->bd_offset].dh_head;
}
static int
cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
- cfs_hash_dhead_t, dh_head);
+ cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
+ cfs_hash_dhead_t, dh_head);
- if (dh->dh_tail != NULL) /* not empty */
- cfs_hlist_add_after(dh->dh_tail, hnode);
- else /* empty list */
- cfs_hlist_add_head(hnode, &dh->dh_head);
- dh->dh_tail = hnode;
- return -1; /* unknown depth */
+ if (dh->dh_tail != NULL) /* not empty */
+ hlist_add_after(dh->dh_tail, hnode);
+ else /* empty list */
+ hlist_add_head(hnode, &dh->dh_head);
+ dh->dh_tail = hnode;
+ return -1; /* unknown depth */
}
static int
cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnd)
+ struct hlist_node *hnd)
{
- cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
- cfs_hash_dhead_t, dh_head);
+ cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
+ cfs_hash_dhead_t, dh_head);
- if (hnd->next == NULL) { /* it's the tail */
- dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
- container_of(hnd->pprev, cfs_hlist_node_t, next);
- }
- cfs_hlist_del_init(hnd);
- return -1; /* unknown depth */
+ if (hnd->next == NULL) { /* it's the tail */
+ dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
+ container_of(hnd->pprev, struct hlist_node, next);
+ }
+ hlist_del_init(hnd);
+ return -1; /* unknown depth */
}
/**
* new element is always added to tail of hlist
*/
typedef struct {
- cfs_hlist_head_t dd_head; /**< entries list */
- cfs_hlist_node_t *dd_tail; /**< the last entry */
- unsigned int dd_depth; /**< list length */
+ struct hlist_head dd_head; /**< entries list */
+ struct hlist_node *dd_tail; /**< the last entry */
+ unsigned int dd_depth; /**< list length */
} cfs_hash_dhead_dep_t;
static int
cfs_hash_dd_hhead_size(cfs_hash_t *hs)
{
- return sizeof(cfs_hash_dhead_dep_t);
+ return sizeof(cfs_hash_dhead_dep_t);
}
-static cfs_hlist_head_t *
+static struct hlist_head *
cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
{
- cfs_hash_dhead_dep_t *head;
+ cfs_hash_dhead_dep_t *head;
- head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
- return &head[bd->bd_offset].dd_head;
+ head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
+ return &head[bd->bd_offset].dd_head;
}
static int
cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
- cfs_hash_dhead_dep_t, dd_head);
+ cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
+ cfs_hash_dhead_dep_t, dd_head);
- if (dh->dd_tail != NULL) /* not empty */
- cfs_hlist_add_after(dh->dd_tail, hnode);
- else /* empty list */
- cfs_hlist_add_head(hnode, &dh->dd_head);
- dh->dd_tail = hnode;
- return ++dh->dd_depth;
+ if (dh->dd_tail != NULL) /* not empty */
+ hlist_add_after(dh->dd_tail, hnode);
+ else /* empty list */
+ hlist_add_head(hnode, &dh->dd_head);
+ dh->dd_tail = hnode;
+ return ++dh->dd_depth;
}
static int
cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnd)
+ struct hlist_node *hnd)
{
- cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
- cfs_hash_dhead_dep_t, dd_head);
+ cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
+ cfs_hash_dhead_dep_t, dd_head);
- if (hnd->next == NULL) { /* it's the tail */
- dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
- container_of(hnd->pprev, cfs_hlist_node_t, next);
- }
- cfs_hlist_del_init(hnd);
- return --dh->dd_depth;
+ if (hnd->next == NULL) { /* it's the tail */
+ dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
+ container_of(hnd->pprev, struct hlist_node, next);
+ }
+ hlist_del_init(hnd);
+ return --dh->dd_depth;
}
static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
void
cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- int rc;
+ int rc;
rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
cfs_hash_bd_dep_record(hs, bd, rc);
void
cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
hs->hs_hops->hop_hnode_del(hs, bd, hnode);
void
cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
- cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode)
+ cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
{
cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
CFS_HS_LOOKUP_MASK_DEL)
} cfs_hash_lookup_intent_t;
-static cfs_hlist_node_t *
+static struct hlist_node *
cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- const void *key, cfs_hlist_node_t *hnode,
- cfs_hash_lookup_intent_t intent)
+ const void *key, struct hlist_node *hnode,
+ cfs_hash_lookup_intent_t intent)
{
- cfs_hlist_head_t *hhead = cfs_hash_bd_hhead(hs, bd);
- cfs_hlist_node_t *ehnode;
- cfs_hlist_node_t *match;
- int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
+ struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
+ struct hlist_node *ehnode;
+ struct hlist_node *match;
+ int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
- /* with this function, we can avoid a lot of useless refcount ops,
- * which are expensive atomic operations most time. */
- match = intent_add ? NULL : hnode;
- cfs_hlist_for_each(ehnode, hhead) {
- if (!cfs_hash_keycmp(hs, key, ehnode))
- continue;
+ /* with this function, we can avoid a lot of useless refcount ops,
+ * which are expensive atomic operations most time. */
+ match = intent_add ? NULL : hnode;
+ hlist_for_each(ehnode, hhead) {
+ if (!cfs_hash_keycmp(hs, key, ehnode))
+ continue;
if (match != NULL && match != ehnode) /* can't match */
continue;
return hnode;
}
-cfs_hlist_node_t *
+struct hlist_node *
cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
{
- return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
- CFS_HS_LOOKUP_IT_FIND);
+ return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
+ CFS_HS_LOOKUP_IT_FIND);
}
EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
-cfs_hlist_node_t *
+struct hlist_node *
cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
{
return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
- CFS_HS_LOOKUP_IT_PEEK);
+ CFS_HS_LOOKUP_IT_PEEK);
}
EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
-cfs_hlist_node_t *
+struct hlist_node *
cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- const void *key, cfs_hlist_node_t *hnode,
+ const void *key, struct hlist_node *hnode,
int noref)
{
- return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
- CFS_HS_LOOKUP_IT_ADD |
- (!noref * CFS_HS_LOOKUP_MASK_REF));
+ return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
+ CFS_HS_LOOKUP_IT_ADD |
+ (!noref * CFS_HS_LOOKUP_MASK_REF));
}
EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
-cfs_hlist_node_t *
+struct hlist_node *
cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- const void *key, cfs_hlist_node_t *hnode)
+ const void *key, struct hlist_node *hnode)
{
- /* hnode can be NULL, we find the first item with @key */
- return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
- CFS_HS_LOOKUP_IT_FINDDEL);
+ /* hnode can be NULL, we find the first item with @key */
+ return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
+ CFS_HS_LOOKUP_IT_FINDDEL);
}
EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
}
}
-static cfs_hlist_node_t *
+static struct hlist_node *
cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
- unsigned n, const void *key)
+ unsigned n, const void *key)
{
- cfs_hlist_node_t *ehnode;
- unsigned i;
+ struct hlist_node *ehnode;
+ unsigned i;
- cfs_hash_for_each_bd(bds, n, i) {
- ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
- CFS_HS_LOOKUP_IT_FIND);
- if (ehnode != NULL)
- return ehnode;
- }
- return NULL;
+ cfs_hash_for_each_bd(bds, n, i) {
+ ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
+ CFS_HS_LOOKUP_IT_FIND);
+ if (ehnode != NULL)
+ return ehnode;
+ }
+ return NULL;
}
-static cfs_hlist_node_t *
+static struct hlist_node *
cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
- cfs_hash_bd_t *bds, unsigned n, const void *key,
- cfs_hlist_node_t *hnode, int noref)
+ cfs_hash_bd_t *bds, unsigned n, const void *key,
+ struct hlist_node *hnode, int noref)
{
- cfs_hlist_node_t *ehnode;
- int intent;
- unsigned i;
+ struct hlist_node *ehnode;
+ int intent;
+ unsigned i;
LASSERT(hnode != NULL);
intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
return hnode;
}
-static cfs_hlist_node_t *
+static struct hlist_node *
cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
- unsigned n, const void *key,
- cfs_hlist_node_t *hnode)
+ unsigned n, const void *key,
+ struct hlist_node *hnode)
{
- cfs_hlist_node_t *ehnode;
- unsigned i;
+ struct hlist_node *ehnode;
+ unsigned i;
- cfs_hash_for_each_bd(bds, n, i) {
- ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
- CFS_HS_LOOKUP_IT_FINDDEL);
- if (ehnode != NULL)
- return ehnode;
- }
- return NULL;
+ cfs_hash_for_each_bd(bds, n, i) {
+ ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
+ CFS_HS_LOOKUP_IT_FINDDEL);
+ if (ehnode != NULL)
+ return ehnode;
+ }
+ return NULL;
}
static void
}
EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
-cfs_hlist_node_t *
+struct hlist_node *
cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
const void *key)
{
}
EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
-cfs_hlist_node_t *
+struct hlist_node *
cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
- const void *key, cfs_hlist_node_t *hnode,
- int noref)
+ const void *key, struct hlist_node *hnode,
+ int noref)
{
- return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
- hnode, noref);
+ return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
+ hnode, noref);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
-cfs_hlist_node_t *
+struct hlist_node *
cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
- const void *key, cfs_hlist_node_t *hnode)
+ const void *key, struct hlist_node *hnode)
{
- return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
+ return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
min(old_size, new_size) * sizeof(*old_bkts));
}
- for (i = old_size; i < new_size; i++) {
- cfs_hlist_head_t *hhead;
- cfs_hash_bd_t bd;
+ for (i = old_size; i < new_size; i++) {
+ struct hlist_head *hhead;
+ cfs_hash_bd_t bd;
LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
if (new_bkts[i] == NULL) {
return NULL;
}
- new_bkts[i]->hsb_index = i;
- new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
- new_bkts[i]->hsb_depmax = -1; /* unknown */
- bd.bd_bucket = new_bkts[i];
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
- CFS_INIT_HLIST_HEAD(hhead);
+ new_bkts[i]->hsb_index = i;
+ new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
+ new_bkts[i]->hsb_depmax = -1; /* unknown */
+ bd.bd_bucket = new_bkts[i];
+ cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
+ INIT_HLIST_HEAD(hhead);
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_no_bktlock(hs))
static void
cfs_hash_destroy(cfs_hash_t *hs)
{
- cfs_hlist_node_t *hnode;
- cfs_hlist_node_t *pos;
- cfs_hash_bd_t bd;
- int i;
- ENTRY;
+ struct hlist_node *hnode;
+ struct hlist_node *pos;
+ cfs_hash_bd_t bd;
+ int i;
+ ENTRY;
- LASSERT(hs != NULL);
- LASSERT(!cfs_hash_is_exiting(hs) &&
- !cfs_hash_is_iterating(hs));
+ LASSERT(hs != NULL);
+ LASSERT(!cfs_hash_is_exiting(hs) &&
+ !cfs_hash_is_iterating(hs));
/**
* prohibit further rehashes, don't need any lock because
LASSERT(hs->hs_buckets != NULL &&
hs->hs_rehash_buckets == NULL);
- cfs_hash_for_each_bucket(hs, &bd, i) {
- cfs_hlist_head_t *hhead;
+ cfs_hash_for_each_bucket(hs, &bd, i) {
+ struct hlist_head *hhead;
- LASSERT(bd.bd_bucket != NULL);
- /* no need to take this lock, just for consistent code */
- cfs_hash_bd_lock(hs, &bd, 1);
+ LASSERT(bd.bd_bucket != NULL);
+ /* no need to take this lock, just for consistent code */
+ cfs_hash_bd_lock(hs, &bd, 1);
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- cfs_hlist_for_each_safe(hnode, pos, hhead) {
- LASSERTF(!cfs_hash_with_assert_empty(hs),
- "hash %s bucket %u(%u) is not "
- " empty: %u items left\n",
- hs->hs_name, bd.bd_bucket->hsb_index,
- bd.bd_offset, bd.bd_bucket->hsb_count);
- /* can't assert key valicate, because we
- * can interrupt rehash */
- cfs_hash_bd_del_locked(hs, &bd, hnode);
- cfs_hash_exit(hs, hnode);
- }
- }
+ hlist_for_each_safe(hnode, pos, hhead) {
+ LASSERTF(!cfs_hash_with_assert_empty(hs),
+ "hash %s bucket %u(%u) is not "
+ " empty: %u items left\n",
+ hs->hs_name, bd.bd_bucket->hsb_index,
+ bd.bd_offset, bd.bd_bucket->hsb_count);
+ /* can't assert key valicate, because we
+ * can interrupt rehash */
+ cfs_hash_bd_del_locked(hs, &bd, hnode);
+ cfs_hash_exit(hs, hnode);
+ }
+ }
LASSERT(bd.bd_bucket->hsb_count == 0);
cfs_hash_bd_unlock(hs, &bd, 1);
cond_resched();
* ops->hs_get function will be called when the item is added.
*/
void
-cfs_hash_add(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
+cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
{
cfs_hash_bd_t bd;
int bits;
- LASSERT(cfs_hlist_unhashed(hnode));
+ LASSERT(hlist_unhashed(hnode));
cfs_hash_lock(hs, 0);
cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
}
EXPORT_SYMBOL(cfs_hash_add);
-static cfs_hlist_node_t *
+static struct hlist_node *
cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
- cfs_hlist_node_t *hnode, int noref)
+ struct hlist_node *hnode, int noref)
{
- cfs_hlist_node_t *ehnode;
- cfs_hash_bd_t bds[2];
- int bits = 0;
+ struct hlist_node *ehnode;
+ cfs_hash_bd_t bds[2];
+ int bits = 0;
- LASSERT(cfs_hlist_unhashed(hnode));
+ LASSERT(hlist_unhashed(hnode));
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
* Returns 0 on success or -EALREADY on key collisions.
*/
int
-cfs_hash_add_unique(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
+cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
{
- return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
- -EALREADY : 0;
+ return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
+ -EALREADY : 0;
}
EXPORT_SYMBOL(cfs_hash_add_unique);
*/
void *
cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
- cfs_hlist_node_t *hnode)
+ struct hlist_node *hnode)
{
- hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
+ hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
- return cfs_hash_object(hs, hnode);
+ return cfs_hash_object(hs, hnode);
}
EXPORT_SYMBOL(cfs_hash_findadd_unique);
* on the removed object.
*/
void *
-cfs_hash_del(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
+cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
{
void *obj = NULL;
int bits = 0;
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
/* NB: do nothing if @hnode is not in hash table */
- if (hnode == NULL || !cfs_hlist_unhashed(hnode)) {
+ if (hnode == NULL || !hlist_unhashed(hnode)) {
if (bds[1].bd_bucket == NULL && hnode != NULL) {
cfs_hash_bd_del_locked(hs, &bds[0], hnode);
} else {
cfs_hash_lookup(cfs_hash_t *hs, const void *key)
{
void *obj = NULL;
- cfs_hlist_node_t *hnode;
+ struct hlist_node *hnode;
cfs_hash_bd_t bds[2];
cfs_hash_lock(hs, 0);
*/
static __u64
cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
- void *data, int remove_safe)
-{
- cfs_hlist_node_t *hnode;
- cfs_hlist_node_t *pos;
- cfs_hash_bd_t bd;
- __u64 count = 0;
- int excl = !!remove_safe;
- int loop = 0;
- int i;
- ENTRY;
-
- cfs_hash_for_each_enter(hs);
-
- cfs_hash_lock(hs, 0);
- LASSERT(!cfs_hash_is_rehashing(hs));
-
- cfs_hash_for_each_bucket(hs, &bd, i) {
- cfs_hlist_head_t *hhead;
-
- cfs_hash_bd_lock(hs, &bd, excl);
- if (func == NULL) { /* only glimpse size */
- count += bd.bd_bucket->hsb_count;
- cfs_hash_bd_unlock(hs, &bd, excl);
- continue;
- }
+ void *data, int remove_safe)
+{
+ struct hlist_node *hnode;
+ struct hlist_node *pos;
+ cfs_hash_bd_t bd;
+ __u64 count = 0;
+ int excl = !!remove_safe;
+ int loop = 0;
+ int i;
+ ENTRY;
+
+ cfs_hash_for_each_enter(hs);
+
+ cfs_hash_lock(hs, 0);
+ LASSERT(!cfs_hash_is_rehashing(hs));
+
+ cfs_hash_for_each_bucket(hs, &bd, i) {
+ struct hlist_head *hhead;
+
+ cfs_hash_bd_lock(hs, &bd, excl);
+ if (func == NULL) { /* only glimpse size */
+ count += bd.bd_bucket->hsb_count;
+ cfs_hash_bd_unlock(hs, &bd, excl);
+ continue;
+ }
- cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- cfs_hlist_for_each_safe(hnode, pos, hhead) {
- cfs_hash_bucket_validate(hs, &bd, hnode);
- count++;
- loop++;
- if (func(hs, &bd, hnode, data)) {
- cfs_hash_bd_unlock(hs, &bd, excl);
- goto out;
- }
- }
- }
- cfs_hash_bd_unlock(hs, &bd, excl);
- if (loop < CFS_HASH_LOOP_HOG)
- continue;
+ cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
+ hlist_for_each_safe(hnode, pos, hhead) {
+ cfs_hash_bucket_validate(hs, &bd, hnode);
+ count++;
+ loop++;
+ if (func(hs, &bd, hnode, data)) {
+ cfs_hash_bd_unlock(hs, &bd, excl);
+ goto out;
+ }
+ }
+ }
+ cfs_hash_bd_unlock(hs, &bd, excl);
+ if (loop < CFS_HASH_LOOP_HOG)
+ continue;
loop = 0;
cfs_hash_unlock(hs, 0);
cond_resched();
cfs_hash_lock(hs, 0);
}
out:
- cfs_hash_unlock(hs, 0);
+ cfs_hash_unlock(hs, 0);
- cfs_hash_for_each_exit(hs);
- RETURN(count);
+ cfs_hash_for_each_exit(hs);
+ RETURN(count);
}
typedef struct {
static int
cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
cfs_hash_cond_arg_t *cond = data;
static int
cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+ struct hlist_node *hnode, void *data)
{
- *(int *)data = 0;
- return 1; /* return 1 to break the loop */
+ *(int *)data = 0;
+ return 1; /* return 1 to break the loop */
}
int
static int
cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
{
- cfs_hlist_node_t *hnode;
- cfs_hlist_node_t *tmp;
+ struct hlist_node *hnode;
+ struct hlist_node *tmp;
cfs_hash_bd_t bd;
__u32 version;
int count = 0;
cfs_hash_lock(hs, 0);
LASSERT(!cfs_hash_is_rehashing(hs));
- cfs_hash_for_each_bucket(hs, &bd, i) {
- cfs_hlist_head_t *hhead;
+ cfs_hash_for_each_bucket(hs, &bd, i) {
+ struct hlist_head *hhead;
cfs_hash_bd_lock(hs, &bd, 0);
version = cfs_hash_bd_version_get(&bd);
void
cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
- cfs_hash_for_each_cb_t func, void *data)
+ cfs_hash_for_each_cb_t func, void *data)
{
- cfs_hlist_head_t *hhead;
- cfs_hlist_node_t *hnode;
- cfs_hash_bd_t bd;
+ struct hlist_head *hhead;
+ struct hlist_node *hnode;
+ cfs_hash_bd_t bd;
cfs_hash_for_each_enter(hs);
cfs_hash_lock(hs, 0);
if (hindex >= CFS_HASH_NHLIST(hs))
goto out;
- cfs_hash_bd_index_set(hs, hindex, &bd);
+ cfs_hash_bd_index_set(hs, hindex, &bd);
- cfs_hash_bd_lock(hs, &bd, 0);
- hhead = cfs_hash_bd_hhead(hs, &bd);
- cfs_hlist_for_each(hnode, hhead) {
- if (func(hs, &bd, hnode, data))
- break;
- }
- cfs_hash_bd_unlock(hs, &bd, 0);
- out:
- cfs_hash_unlock(hs, 0);
- cfs_hash_for_each_exit(hs);
+ cfs_hash_bd_lock(hs, &bd, 0);
+ hhead = cfs_hash_bd_hhead(hs, &bd);
+ hlist_for_each(hnode, hhead) {
+ if (func(hs, &bd, hnode, data))
+ break;
+ }
+ cfs_hash_bd_unlock(hs, &bd, 0);
+out:
+ cfs_hash_unlock(hs, 0);
+ cfs_hash_for_each_exit(hs);
}
EXPORT_SYMBOL(cfs_hash_hlist_for_each);
*/
void
cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
- cfs_hash_for_each_cb_t func, void *data)
+ cfs_hash_for_each_cb_t func, void *data)
{
- cfs_hlist_node_t *hnode;
- cfs_hash_bd_t bds[2];
- unsigned i;
+ struct hlist_node *hnode;
+ cfs_hash_bd_t bds[2];
+ unsigned i;
- cfs_hash_lock(hs, 0);
+ cfs_hash_lock(hs, 0);
- cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
+ cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
- cfs_hash_for_each_bd(bds, 2, i) {
- cfs_hlist_head_t *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
+ cfs_hash_for_each_bd(bds, 2, i) {
+ struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
- cfs_hlist_for_each(hnode, hlist) {
- cfs_hash_bucket_validate(hs, &bds[i], hnode);
+ hlist_for_each(hnode, hlist) {
+ cfs_hash_bucket_validate(hs, &bds[i], hnode);
- if (cfs_hash_keycmp(hs, key, hnode)) {
- if (func(hs, &bds[i], hnode, data))
- break;
- }
- }
- }
+ if (cfs_hash_keycmp(hs, key, hnode)) {
+ if (func(hs, &bds[i], hnode, data))
+ break;
+ }
+ }
+ }
- cfs_hash_dual_bd_unlock(hs, bds, 0);
- cfs_hash_unlock(hs, 0);
+ cfs_hash_dual_bd_unlock(hs, bds, 0);
+ cfs_hash_unlock(hs, 0);
}
EXPORT_SYMBOL(cfs_hash_for_each_key);
static int
cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
{
- cfs_hash_bd_t new;
- cfs_hlist_head_t *hhead;
- cfs_hlist_node_t *hnode;
- cfs_hlist_node_t *pos;
- void *key;
- int c = 0;
-
- /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
- cfs_hash_bd_for_each_hlist(hs, old, hhead) {
- cfs_hlist_for_each_safe(hnode, pos, hhead) {
- key = cfs_hash_key(hs, hnode);
- LASSERT(key != NULL);
- /* Validate hnode is in the correct bucket. */
- cfs_hash_bucket_validate(hs, old, hnode);
- /*
- * Delete from old hash bucket; move to new bucket.
- * ops->hs_key must be defined.
- */
- cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
- hs->hs_rehash_bits, key, &new);
- cfs_hash_bd_move_locked(hs, old, &new, hnode);
- c++;
- }
- }
-
- return c;
+ cfs_hash_bd_t new;
+ struct hlist_head *hhead;
+ struct hlist_node *hnode;
+ struct hlist_node *pos;
+ void *key;
+ int c = 0;
+
+ /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
+ cfs_hash_bd_for_each_hlist(hs, old, hhead) {
+ hlist_for_each_safe(hnode, pos, hhead) {
+ key = cfs_hash_key(hs, hnode);
+ LASSERT(key != NULL);
+ /* Validate hnode is in the correct bucket. */
+ cfs_hash_bucket_validate(hs, old, hnode);
+ /*
+ * Delete from old hash bucket; move to new bucket.
+ * ops->hs_key must be defined.
+ */
+ cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
+ hs->hs_rehash_bits, key, &new);
+ cfs_hash_bd_move_locked(hs, old, &new, hnode);
+ c++;
+ }
+ }
+ return c;
}
static int
* not be called.
*/
void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
- void *new_key, cfs_hlist_node_t *hnode)
+ void *new_key, struct hlist_node *hnode)
{
cfs_hash_bd_t bds[3];
cfs_hash_bd_t old_bds[2];
cfs_hash_bd_t new_bd;
- LASSERT(!cfs_hlist_unhashed(hnode));
+ LASSERT(!hlist_unhashed(hnode));
cfs_hash_lock(hs, 0);
* group from any fs */
/** A single group registration has a uid and a file pointer */
struct kkuc_reg {
- cfs_list_t kr_chain;
+ struct list_head kr_chain;
int kr_uid;
struct file *kr_fp;
void *kr_data;
};
-static cfs_list_t kkuc_groups[KUC_GRP_MAX+1] = {};
+static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {};
/* Protect message sending against remove and adds */
static DECLARE_RWSEM(kg_sem);
down_write(&kg_sem);
if (kkuc_groups[group].next == NULL)
- CFS_INIT_LIST_HEAD(&kkuc_groups[group]);
- cfs_list_add(®->kr_chain, &kkuc_groups[group]);
+ INIT_LIST_HEAD(&kkuc_groups[group]);
+ list_add(®->kr_chain, &kkuc_groups[group]);
up_write(&kg_sem);
CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group);
}
down_write(&kg_sem);
- cfs_list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
- if ((uid == 0) || (uid == reg->kr_uid)) {
- cfs_list_del(®->kr_chain);
- CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n",
- reg->kr_uid, reg->kr_fp, group);
- if (reg->kr_fp != NULL)
+ list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
+ if ((uid == 0) || (uid == reg->kr_uid)) {
+ list_del(®->kr_chain);
+ CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n",
+ reg->kr_uid, reg->kr_fp, group);
+ if (reg->kr_fp != NULL)
fput(reg->kr_fp);
if (pdata != NULL)
*pdata = reg->kr_data;
}
up_write(&kg_sem);
- RETURN(0);
+ RETURN(0);
}
EXPORT_SYMBOL(libcfs_kkuc_group_rem);
ENTRY;
down_read(&kg_sem);
- cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
+ list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
if (reg->kr_fp != NULL) {
rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
if (rc == 0)
int rc = 0;
ENTRY;
- if (group > KUC_GRP_MAX) {
- CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
- RETURN(-EINVAL);
- }
+ if (group > KUC_GRP_MAX) {
+ CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
+ RETURN(-EINVAL);
+ }
- /* no link for this group */
- if (kkuc_groups[group].next == NULL)
- RETURN(0);
+ /* no link for this group */
+ if (kkuc_groups[group].next == NULL)
+ RETURN(0);
down_read(&kg_sem);
- cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
- if (reg->kr_fp != NULL) {
- rc = cb_func(reg->kr_data, cb_arg);
- }
- }
+ list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
+ if (reg->kr_fp != NULL) {
+ rc = cb_func(reg->kr_data, cb_arg);
+ }
+ }
up_read(&kg_sem);
- RETURN(rc);
+ RETURN(rc);
}
EXPORT_SYMBOL(libcfs_kkuc_group_foreach);
{
struct cfs_range_expr *expr;
- cfs_list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
+ list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
if (value >= expr->re_lo && value <= expr->re_hi &&
((value - expr->re_lo) % expr->re_stride) == 0)
return 1;
int count = 0;
int i;
- cfs_list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
+ list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
for (i = expr->re_lo; i <= expr->re_hi; i++) {
if (((i - expr->re_lo) % expr->re_stride) == 0)
count++;
return -ENOMEM;
count = 0;
- cfs_list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
+ list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
for (i = expr->re_lo; i <= expr->re_hi; i++) {
if (((i - expr->re_lo) % expr->re_stride) == 0)
val[count++] = i;
void
cfs_expr_list_free(struct cfs_expr_list *expr_list)
{
- while (!cfs_list_empty(&expr_list->el_exprs)) {
+ while (!list_empty(&expr_list->el_exprs)) {
struct cfs_range_expr *expr;
- expr = cfs_list_entry(expr_list->el_exprs.next,
+ expr = list_entry(expr_list->el_exprs.next,
struct cfs_range_expr, re_link),
- cfs_list_del(&expr->re_link);
+ list_del(&expr->re_link);
LIBCFS_FREE(expr, sizeof(*expr));
}
{
struct cfs_range_expr *expr;
- cfs_list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
+ list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
CDEBUG(D_WARNING, "%d-%d/%d\n",
expr->re_lo, expr->re_hi, expr->re_stride);
}
src.ls_str = str;
src.ls_len = len;
- CFS_INIT_LIST_HEAD(&expr_list->el_exprs);
+ INIT_LIST_HEAD(&expr_list->el_exprs);
if (src.ls_str[0] == '[' &&
src.ls_str[src.ls_len - 1] == ']') {
if (rc != 0)
break;
- cfs_list_add_tail(&expr->re_link,
+ list_add_tail(&expr->re_link,
&expr_list->el_exprs);
}
} else {
rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
if (rc == 0) {
- cfs_list_add_tail(&expr->re_link,
+ list_add_tail(&expr->re_link,
&expr_list->el_exprs);
}
}
* \retval none
*/
void
-cfs_expr_list_free_list(cfs_list_t *list)
+cfs_expr_list_free_list(struct list_head *list)
{
struct cfs_expr_list *el;
- while (!cfs_list_empty(list)) {
- el = cfs_list_entry(list->next,
+ while (!list_empty(list)) {
+ el = list_entry(list->next,
struct cfs_expr_list, el_link);
- cfs_list_del(&el->el_link);
+ list_del(&el->el_link);
cfs_expr_list_free(el);
}
}
EXPORT_SYMBOL(cfs_expr_list_free_list);
int
-cfs_ip_addr_parse(char *str, int len, cfs_list_t *list)
+cfs_ip_addr_parse(char *str, int len, struct list_head *list)
{
struct cfs_expr_list *el;
struct cfs_lstr src;
if (rc != 0)
goto out;
- cfs_list_add_tail(&el->el_link, list);
+ list_add_tail(&el->el_link, list);
i++;
}
* \retval 0 otherwise
*/
int
-cfs_ip_addr_match(__u32 addr, cfs_list_t *list)
+cfs_ip_addr_match(__u32 addr, struct list_head *list)
{
struct cfs_expr_list *el;
int i = 0;
- cfs_list_for_each_entry_reverse(el, list, el_link) {
+ list_for_each_entry_reverse(el, list, el_link) {
if (!cfs_expr_list_match(addr & 0xff, el))
return 0;
addr >>= 8;
EXPORT_SYMBOL(cfs_ip_addr_match);
void
-cfs_ip_addr_free(cfs_list_t *list)
+cfs_ip_addr_free(struct list_head *list)
{
cfs_expr_list_free_list(list);
}
goto failed;
}
- cfs_list_for_each_entry(range, &el->el_exprs, re_link) {
+ list_for_each_entry(range, &el->el_exprs, re_link) {
for (i = range->re_lo; i <= range->re_hi; i++) {
if ((i - range->re_lo) % range->re_stride != 0)
continue;
if (!clear)
continue;
- for (j = 0; j < lwt_pages_per_cpu; j++) {
+ for (j = 0; j < lwt_pages_per_cpu; j++) {
memset(p->lwtp_events, 0, PAGE_CACHE_SIZE);
- p = cfs_list_entry (p->lwtp_list.next,
- lwt_page_t, lwtp_list);
- }
- }
+ p = list_entry(p->lwtp_list.next,
+ lwt_page_t, lwtp_list);
+ }
+ }
if (enable) {
lwt_enabled = 1;
bytes_per_page))
return -EFAULT;
- user_ptr = ((char *)user_ptr) + bytes_per_page;
- p = cfs_list_entry(p->lwtp_list.next,
- lwt_page_t, lwtp_list);
- }
- }
-
- return (0);
+ user_ptr = ((char *)user_ptr) + bytes_per_page;
+ p = list_entry(p->lwtp_list.next,
+ lwt_page_t, lwtp_list);
+ }
+ }
+ return (0);
}
int lwt_init ()
memset(lwtp->lwtp_events, 0, PAGE_CACHE_SIZE);
if (j == 0) {
- CFS_INIT_LIST_HEAD (&lwtp->lwtp_list);
+ INIT_LIST_HEAD (&lwtp->lwtp_list);
lwt_cpus[i].lwtc_current_page = lwtp;
} else {
- cfs_list_add (&lwtp->lwtp_list,
- &lwt_cpus[i].lwtc_current_page->lwtp_list);
+ list_add(&lwtp->lwtp_list,
+ &lwt_cpus[i].lwtc_current_page->lwtp_list);
}
}
while (lwt_cpus[i].lwtc_current_page != NULL) {
lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
- if (cfs_list_empty (&lwtp->lwtp_list)) {
- lwt_cpus[i].lwtc_current_page = NULL;
- } else {
- lwt_cpus[i].lwtc_current_page =
- cfs_list_entry (lwtp->lwtp_list.next,
- lwt_page_t, lwtp_list);
-
- cfs_list_del (&lwtp->lwtp_list);
- }
-
- __free_page (lwtp->lwtp_page);
- LIBCFS_FREE (lwtp, sizeof (*lwtp));
- }
+ if (list_empty (&lwtp->lwtp_list)) {
+ lwt_cpus[i].lwtc_current_page = NULL;
+ } else {
+ lwt_cpus[i].lwtc_current_page =
+ list_entry(lwtp->lwtp_list.next,
+ lwt_page_t, lwtp_list);
+ list_del (&lwtp->lwtp_list);
+ }
+ __free_page (lwtp->lwtp_page);
+ LIBCFS_FREE (lwtp, sizeof (*lwtp));
+ }
}
EXPORT_SYMBOL(lwt_enabled);
}
static struct rw_semaphore ioctl_list_sem;
-static cfs_list_t ioctl_list;
+static struct list_head ioctl_list;
int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
{
- int rc = 0;
+ int rc = 0;
down_write(&ioctl_list_sem);
- if (!cfs_list_empty(&hand->item))
- rc = -EBUSY;
- else
- cfs_list_add_tail(&hand->item, &ioctl_list);
+ if (!list_empty(&hand->item))
+ rc = -EBUSY;
+ else
+ list_add_tail(&hand->item, &ioctl_list);
up_write(&ioctl_list_sem);
- return rc;
+ return rc;
}
EXPORT_SYMBOL(libcfs_register_ioctl);
int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
{
- int rc = 0;
+ int rc = 0;
down_write(&ioctl_list_sem);
- if (cfs_list_empty(&hand->item))
- rc = -ENOENT;
- else
- cfs_list_del_init(&hand->item);
+ if (list_empty(&hand->item))
+ rc = -ENOENT;
+ else
+ list_del_init(&hand->item);
up_write(&ioctl_list_sem);
- return rc;
+ return rc;
}
EXPORT_SYMBOL(libcfs_deregister_ioctl);
}
default: {
- struct libcfs_ioctl_handler *hand;
- err = -EINVAL;
+ struct libcfs_ioctl_handler *hand;
+
+ err = -EINVAL;
down_read(&ioctl_list_sem);
- cfs_list_for_each_entry_typed(hand, &ioctl_list,
- struct libcfs_ioctl_handler, item) {
- err = hand->handle_ioctl(cmd, data);
- if (err != -EINVAL) {
- if (err == 0)
- err = libcfs_ioctl_popdata(arg,
- data, sizeof (*data));
- break;
- }
- }
+ list_for_each_entry(hand, &ioctl_list, item) {
+ err = hand->handle_ioctl(cmd, data);
+ if (err != -EINVAL) {
+ if (err == 0)
+ err = libcfs_ioctl_popdata(arg,
+ data, sizeof (*data));
+ break;
+ }
+ }
up_read(&ioctl_list_sem);
- break;
- }
- }
+ break;
+ }
+ }
- RETURN(err);
+ RETURN(err);
}
static int libcfs_ioctl(struct cfs_psdev_file *pfile,
init_rwsem(&cfs_tracefile_sem);
mutex_init(&cfs_trace_thread_mutex);
init_rwsem(&ioctl_list_sem);
- CFS_INIT_LIST_HEAD(&ioctl_list);
+ INIT_LIST_HEAD(&ioctl_list);
init_waitqueue_head(&cfs_race_waitq);
rc = libcfs_debug_init(5 * 1024 * 1024);
static void libcfs_decnum_addr2str(__u32 addr, char *str);
static void libcfs_hexnum_addr2str(__u32 addr, char *str);
static int libcfs_num_str2addr(const char *str, int nob, __u32 *addr);
-static int libcfs_num_parse(char *str, int len, cfs_list_t *list);
-static int libcfs_num_match(__u32 addr, cfs_list_t *list);
+static int libcfs_num_parse(char *str, int len, struct list_head *list);
+static int libcfs_num_match(__u32 addr, struct list_head *list);
struct netstrfns {
- int nf_type;
- char *nf_name;
- char *nf_modname;
- void (*nf_addr2str)(__u32 addr, char *str);
- int (*nf_str2addr)(const char *str, int nob, __u32 *addr);
- int (*nf_parse_addrlist)(char *str, int len,
- cfs_list_t *list);
- int (*nf_match_addr)(__u32 addr, cfs_list_t *list);
+ int nf_type;
+ char *nf_name;
+ char *nf_modname;
+ void (*nf_addr2str)(__u32 addr, char *str);
+ int (*nf_str2addr)(const char *str, int nob, __u32 *addr);
+ int (*nf_parse_addrlist)(char *str, int len,
+ struct list_head *list);
+ int (*nf_match_addr)(__u32 addr, struct list_head *list);
};
static struct netstrfns libcfs_netstrfns[] = {
* One of this is created for each \<net\> parsed.
*/
struct nidrange {
- /**
- * Link to list of this structures which is built on nid range
- * list parsing.
- */
- cfs_list_t nr_link;
- /**
- * List head for addrrange::ar_link.
- */
- cfs_list_t nr_addrranges;
- /**
- * Flag indicating that *@<net> is found.
- */
- int nr_all;
- /**
- * Pointer to corresponding element of libcfs_netstrfns.
- */
- struct netstrfns *nr_netstrfns;
- /**
- * Number of network. E.g. 5 if \<net\> is "elan5".
- */
- int nr_netnum;
+ /**
+ * Link to list of this structures which is built on nid range
+ * list parsing.
+ */
+ struct list_head nr_link;
+ /**
+ * List head for addrrange::ar_link.
+ */
+ struct list_head nr_addrranges;
+ /**
+ * Flag indicating that *@<net> is found.
+ */
+ int nr_all;
+ /**
+ * Pointer to corresponding element of libcfs_netstrfns.
+ */
+ struct netstrfns *nr_netstrfns;
+ /**
+ * Number of network. E.g. 5 if \<net\> is "elan5".
+ */
+ int nr_netnum;
};
/**
* Structure to represent \<addrrange\> token of the syntax.
*/
struct addrrange {
- /**
- * Link to nidrange::nr_addrranges.
- */
- cfs_list_t ar_link;
- /**
+ /**
+ * Link to nidrange::nr_addrranges.
+ */
+ struct list_head ar_link;
+ /**
* List head for cfs_expr_list::el_list.
- */
- cfs_list_t ar_numaddr_ranges;
+ */
+ struct list_head ar_numaddr_ranges;
};
/**
* \retval errno otherwise
*/
static int
-libcfs_num_parse(char *str, int len, cfs_list_t *list)
+libcfs_num_parse(char *str, int len, struct list_head *list)
{
struct cfs_expr_list *el;
int rc;
rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
if (rc == 0)
- cfs_list_add_tail(&el->el_link, list);
+ list_add_tail(&el->el_link, list);
return rc;
}
static int
parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange)
{
- struct addrrange *addrrange;
+ struct addrrange *addrrange;
- if (src->ls_len == 1 && src->ls_str[0] == '*') {
- nidrange->nr_all = 1;
- return 1;
- }
+ if (src->ls_len == 1 && src->ls_str[0] == '*') {
+ nidrange->nr_all = 1;
+ return 1;
+ }
- LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
- if (addrrange == NULL)
- return 0;
- cfs_list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
- CFS_INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);
+ LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
+ if (addrrange == NULL)
+ return 0;
+ list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
+ INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);
- return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str,
- src->ls_len,
- &addrrange->ar_numaddr_ranges);
+ return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str,
+ src->ls_len,
+ &addrrange->ar_numaddr_ranges);
}
/**
*/
static struct nidrange *
add_nidrange(const struct cfs_lstr *src,
- cfs_list_t *nidlist)
+ struct list_head *nidlist)
{
struct netstrfns *nf;
struct nidrange *nr;
return NULL;
}
- cfs_list_for_each_entry(nr, nidlist, nr_link) {
- if (nr->nr_netstrfns != nf)
- continue;
- if (nr->nr_netnum != netnum)
- continue;
- return nr;
- }
+ list_for_each_entry(nr, nidlist, nr_link) {
+ if (nr->nr_netstrfns != nf)
+ continue;
+ if (nr->nr_netnum != netnum)
+ continue;
+ return nr;
+ }
- LIBCFS_ALLOC(nr, sizeof(struct nidrange));
- if (nr == NULL)
- return NULL;
- cfs_list_add_tail(&nr->nr_link, nidlist);
- CFS_INIT_LIST_HEAD(&nr->nr_addrranges);
- nr->nr_netstrfns = nf;
- nr->nr_all = 0;
- nr->nr_netnum = netnum;
+ LIBCFS_ALLOC(nr, sizeof(struct nidrange));
+ if (nr == NULL)
+ return NULL;
+ list_add_tail(&nr->nr_link, nidlist);
+ INIT_LIST_HEAD(&nr->nr_addrranges);
+ nr->nr_netstrfns = nf;
+ nr->nr_all = 0;
+ nr->nr_netnum = netnum;
- return nr;
+ return nr;
}
/**
* \retval 0 otherwise
*/
static int
-parse_nidrange(struct cfs_lstr *src, cfs_list_t *nidlist)
+parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
{
struct cfs_lstr addrrange;
struct cfs_lstr net;
* \retval none
*/
static void
-free_addrranges(cfs_list_t *list)
+free_addrranges(struct list_head *list)
{
- while (!cfs_list_empty(list)) {
+ while (!list_empty(list)) {
struct addrrange *ar;
- ar = cfs_list_entry(list->next, struct addrrange, ar_link);
+ ar = list_entry(list->next, struct addrrange, ar_link);
cfs_expr_list_free_list(&ar->ar_numaddr_ranges);
- cfs_list_del(&ar->ar_link);
+ list_del(&ar->ar_link);
LIBCFS_FREE(ar, sizeof(struct addrrange));
}
}
* \retval none
*/
void
-cfs_free_nidlist(cfs_list_t *list)
+cfs_free_nidlist(struct list_head *list)
{
- cfs_list_t *pos, *next;
- struct nidrange *nr;
-
- cfs_list_for_each_safe(pos, next, list) {
- nr = cfs_list_entry(pos, struct nidrange, nr_link);
- free_addrranges(&nr->nr_addrranges);
- cfs_list_del(pos);
- LIBCFS_FREE(nr, sizeof(struct nidrange));
- }
+ struct list_head *pos, *next;
+ struct nidrange *nr;
+
+ list_for_each_safe(pos, next, list) {
+ nr = list_entry(pos, struct nidrange, nr_link);
+ free_addrranges(&nr->nr_addrranges);
+ list_del(pos);
+ LIBCFS_FREE(nr, sizeof(struct nidrange));
+ }
}
/**
* \retval 0 otherwise
*/
int
-cfs_parse_nidlist(char *str, int len, cfs_list_t *nidlist)
+cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
{
struct cfs_lstr src;
struct cfs_lstr res;
- int rc;
- ENTRY;
+ int rc;
+ ENTRY;
- src.ls_str = str;
- src.ls_len = len;
- CFS_INIT_LIST_HEAD(nidlist);
- while (src.ls_str) {
+ src.ls_str = str;
+ src.ls_len = len;
+ INIT_LIST_HEAD(nidlist);
+ while (src.ls_str) {
rc = cfs_gettok(&src, ' ', &res);
- if (rc == 0) {
- cfs_free_nidlist(nidlist);
- RETURN(0);
- }
- rc = parse_nidrange(&res, nidlist);
- if (rc == 0) {
- cfs_free_nidlist(nidlist);
- RETURN(0);
- }
- }
- RETURN(1);
+ if (rc == 0) {
+ cfs_free_nidlist(nidlist);
+ RETURN(0);
+ }
+ rc = parse_nidrange(&res, nidlist);
+ if (rc == 0) {
+ cfs_free_nidlist(nidlist);
+ RETURN(0);
+ }
+ }
+ RETURN(1);
}
/*
* \retval 0 otherwise
*/
static int
-libcfs_num_match(__u32 addr, cfs_list_t *numaddr)
+libcfs_num_match(__u32 addr, struct list_head *numaddr)
{
struct cfs_expr_list *el;
- LASSERT(!cfs_list_empty(numaddr));
- el = cfs_list_entry(numaddr->next, struct cfs_expr_list, el_link);
+ LASSERT(!list_empty(numaddr));
+ el = list_entry(numaddr->next, struct cfs_expr_list, el_link);
return cfs_expr_list_match(addr, el);
}
* \retval 1 on match
* \retval 0 otherwises
*/
-int cfs_match_nid(lnet_nid_t nid, cfs_list_t *nidlist)
+int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
{
- struct nidrange *nr;
- struct addrrange *ar;
- ENTRY;
-
- cfs_list_for_each_entry(nr, nidlist, nr_link) {
- if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid)))
- continue;
- if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid)))
- continue;
- if (nr->nr_all)
- RETURN(1);
- cfs_list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
- if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
- &ar->ar_numaddr_ranges))
- RETURN(1);
- }
- RETURN(0);
+ struct nidrange *nr;
+ struct addrrange *ar;
+ ENTRY;
+
+ list_for_each_entry(nr, nidlist, nr_link) {
+ if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid)))
+ continue;
+ if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid)))
+ continue;
+ if (nr->nr_all)
+ RETURN(1);
+ list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
+ if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
+ &ar->ar_numaddr_ranges))
+ RETURN(1);
+ }
+ RETURN(0);
}
#ifdef __KERNEL__
atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
- struct cfs_trace_cpu_data *tcd);
+ struct cfs_trace_cpu_data *tcd);
static inline struct cfs_trace_page *
-cfs_tage_from_list(cfs_list_t *list)
+cfs_tage_from_list(struct list_head *list)
{
- return cfs_list_entry(list, struct cfs_trace_page, linkage);
+ return list_entry(list, struct cfs_trace_page, linkage);
}
static struct cfs_trace_page *cfs_tage_alloc(int gfp)
}
static void cfs_tage_to_tail(struct cfs_trace_page *tage,
- cfs_list_t *queue)
+ struct list_head *queue)
{
- __LASSERT(tage != NULL);
- __LASSERT(queue != NULL);
+ __LASSERT(tage != NULL);
+ __LASSERT(queue != NULL);
- cfs_list_move_tail(&tage->linkage, queue);
+ list_move_tail(&tage->linkage, queue);
}
int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
- cfs_list_t *stock)
+ struct list_head *stock)
{
- int i;
+ int i;
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
+ /*
+ * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
- for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
- struct cfs_trace_page *tage;
+ for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
+ struct cfs_trace_page *tage;
- tage = cfs_tage_alloc(gfp);
- if (tage == NULL)
- break;
- cfs_list_add_tail(&tage->linkage, stock);
- }
- return i;
+ tage = cfs_tage_alloc(gfp);
+ if (tage == NULL)
+ break;
+ list_add_tail(&tage->linkage, stock);
+ }
+ return i;
}
/* return a page that has 'len' bytes left at the end */
struct cfs_trace_page *tage;
if (tcd->tcd_cur_pages > 0) {
- __LASSERT(!cfs_list_empty(&tcd->tcd_pages));
+ __LASSERT(!list_empty(&tcd->tcd_pages));
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
if (tage->used + len <= PAGE_CACHE_SIZE)
return tage;
}
- if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
+ if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
if (tcd->tcd_cur_stock_pages > 0) {
tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
--tcd->tcd_cur_stock_pages;
- cfs_list_del_init(&tage->linkage);
+ list_del_init(&tage->linkage);
} else {
tage = cfs_tage_alloc(GFP_ATOMIC);
if (unlikely(tage == NULL)) {
tage->used = 0;
tage->cpu = smp_processor_id();
tage->type = tcd->tcd_type;
- cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
+ list_add_tail(&tage->linkage, &tcd->tcd_pages);
tcd->tcd_cur_pages++;
if (tcd->tcd_cur_pages > 8 && thread_running) {
static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
{
- int pgcount = tcd->tcd_cur_pages / 10;
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
+ int pgcount = tcd->tcd_cur_pages / 10;
+ struct page_collection pc;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
+ /*
+ * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
if (printk_ratelimit())
printk(KERN_WARNING "debug daemon buffer overflowed; "
- "discarding 10%% of pages (%d of %ld)\n",
- pgcount + 1, tcd->tcd_cur_pages);
+ "discarding 10%% of pages (%d of %ld)\n",
+ pgcount + 1, tcd->tcd_cur_pages);
- CFS_INIT_LIST_HEAD(&pc.pc_pages);
+ INIT_LIST_HEAD(&pc.pc_pages);
- cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
- struct cfs_trace_page, linkage) {
- if (pgcount-- == 0)
- break;
+ list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
+ if (pgcount-- == 0)
+ break;
- cfs_list_move_tail(&tage->linkage, &pc.pc_pages);
- tcd->tcd_cur_pages--;
- }
- put_pages_on_tcd_daemon_list(&pc, tcd);
+ list_move_tail(&tage->linkage, &pc.pc_pages);
+ tcd->tcd_cur_pages--;
+ }
+ put_pages_on_tcd_daemon_list(&pc, tcd);
}
/* return a page that has 'len' bytes left at the end */
static void
panic_collect_pages(struct page_collection *pc)
{
- /* Do the collect_pages job on a single CPU: assumes that all other
- * CPUs have been stopped during a panic. If this isn't true for some
- * arch, this will have to be implemented separately in each arch. */
- int i;
- int j;
- struct cfs_trace_cpu_data *tcd;
+ /* Do the collect_pages job on a single CPU: assumes that all other
+ * CPUs have been stopped during a panic. If this isn't true for some
+ * arch, this will have to be implemented separately in each arch. */
+ int i;
+ int j;
+ struct cfs_trace_cpu_data *tcd;
- CFS_INIT_LIST_HEAD(&pc->pc_pages);
+ INIT_LIST_HEAD(&pc->pc_pages);
- cfs_tcd_for_each(tcd, i, j) {
- cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
+ cfs_tcd_for_each(tcd, i, j) {
+ list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ tcd->tcd_cur_pages = 0;
- if (pc->pc_want_daemon_pages) {
- cfs_list_splice_init(&tcd->tcd_daemon_pages,
- &pc->pc_pages);
- tcd->tcd_cur_daemon_pages = 0;
- }
- }
+ if (pc->pc_want_daemon_pages) {
+ list_splice_init(&tcd->tcd_daemon_pages,
+ &pc->pc_pages);
+ tcd->tcd_cur_daemon_pages = 0;
+ }
+ }
}
static void collect_pages_on_all_cpus(struct page_collection *pc)
struct cfs_trace_cpu_data *tcd;
int i, cpu;
- cfs_for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu) {
- cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
- if (pc->pc_want_daemon_pages) {
- cfs_list_splice_init(&tcd->tcd_daemon_pages,
- &pc->pc_pages);
- tcd->tcd_cur_daemon_pages = 0;
- }
- }
- }
+ cfs_for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu) {
+ list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ tcd->tcd_cur_pages = 0;
+ if (pc->pc_want_daemon_pages) {
+ list_splice_init(&tcd->tcd_daemon_pages,
+ &pc->pc_pages);
+ tcd->tcd_cur_daemon_pages = 0;
+ }
+ }
+ }
}
static void collect_pages(struct page_collection *pc)
{
- CFS_INIT_LIST_HEAD(&pc->pc_pages);
+ INIT_LIST_HEAD(&pc->pc_pages);
- if (libcfs_panic_in_progress)
- panic_collect_pages(pc);
- else
- collect_pages_on_all_cpus(pc);
+ if (libcfs_panic_in_progress)
+ panic_collect_pages(pc);
+ else
+ collect_pages_on_all_cpus(pc);
}
static void put_pages_back_on_all_cpus(struct page_collection *pc)
{
struct cfs_trace_cpu_data *tcd;
- cfs_list_t *cur_head;
+ struct list_head *cur_head;
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
int i, cpu;
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
cur_head = tcd->tcd_pages.next;
- cfs_list_for_each_entry_safe_typed(tage, tmp,
- &pc->pc_pages,
- struct cfs_trace_page,
- linkage) {
+ list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
+ linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
+ __LASSERT_TAGE_INVARIANT(tage);
- if (tage->cpu != cpu || tage->type != i)
- continue;
+ if (tage->cpu != cpu || tage->type != i)
+ continue;
- cfs_tage_to_tail(tage, cur_head);
- tcd->tcd_cur_pages++;
- }
- }
- }
+ cfs_tage_to_tail(tage, cur_head);
+ tcd->tcd_cur_pages++;
+ }
+ }
+ }
}
static void put_pages_back(struct page_collection *pc)
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
- cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
- struct cfs_trace_page, linkage) {
-
- __LASSERT_TAGE_INVARIANT(tage);
+ list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
+ __LASSERT_TAGE_INVARIANT(tage);
- if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
- continue;
+ if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
+ continue;
- cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
- tcd->tcd_cur_daemon_pages++;
+ cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
+ tcd->tcd_cur_daemon_pages++;
- if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
- struct cfs_trace_page *victim;
+ if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
+ struct cfs_trace_page *victim;
- __LASSERT(!cfs_list_empty(&tcd->tcd_daemon_pages));
- victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
+ __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
+ victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
__LASSERT_TAGE_INVARIANT(victim);
- cfs_list_del(&victim->linkage);
- cfs_tage_free(victim);
- tcd->tcd_cur_daemon_pages--;
- }
- }
+ list_del(&victim->linkage);
+ cfs_tage_free(victim);
+ tcd->tcd_cur_daemon_pages--;
+ }
+ }
}
static void put_pages_on_daemon_list(struct page_collection *pc)
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
- pc.pc_want_daemon_pages = 1;
- collect_pages(&pc);
- cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
- struct cfs_trace_page, linkage) {
+ pc.pc_want_daemon_pages = 1;
+ collect_pages(&pc);
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
char *p, *file, *fn;
struct page *page;
p += len;
}
- cfs_list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
+ list_del(&tage->linkage);
+ cfs_tage_free(tage);
+ }
}
int cfs_tracefile_dump_all_pages(char *filename)
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
- if (cfs_list_empty(&pc.pc_pages)) {
+ if (list_empty(&pc.pc_pages)) {
rc = 0;
goto close;
}
/* ok, for now, just write the pages. in the future we'll be building
* iobufs with the pages and calling generic_direct_IO */
MMSPACE_OPEN;
- cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
- struct cfs_trace_page, linkage) {
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
printk(KERN_WARNING "wanted to write %u but wrote "
"%d\n", tage->used, rc);
put_pages_back(&pc);
- __LASSERT(cfs_list_empty(&pc.pc_pages));
+ __LASSERT(list_empty(&pc.pc_pages));
break;
}
- cfs_list_del(&tage->linkage);
+ list_del(&tage->linkage);
cfs_tage_free(tage);
}
MMSPACE_CLOSE;
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
- pc.pc_want_daemon_pages = 1;
- collect_pages(&pc);
- cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
- struct cfs_trace_page, linkage) {
+ pc.pc_want_daemon_pages = 1;
+ collect_pages(&pc);
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
+ __LASSERT_TAGE_INVARIANT(tage);
- cfs_list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
+ list_del(&tage->linkage);
+ cfs_tage_free(tage);
+ }
}
int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
pc.pc_want_daemon_pages = 0;
collect_pages(&pc);
- if (cfs_list_empty(&pc.pc_pages))
+ if (list_empty(&pc.pc_pages))
goto end_loop;
filp = NULL;
cfs_tracefile_read_unlock();
if (filp == NULL) {
put_pages_on_daemon_list(&pc);
- __LASSERT(cfs_list_empty(&pc.pc_pages));
+ __LASSERT(list_empty(&pc.pc_pages));
goto end_loop;
}
MMSPACE_OPEN;
- cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
- struct cfs_trace_page,
- linkage) {
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
static loff_t f_pos;
__LASSERT_TAGE_INVARIANT(tage);
printk(KERN_WARNING "wanted to write %u "
"but wrote %d\n", tage->used, rc);
put_pages_back(&pc);
- __LASSERT(cfs_list_empty(&pc.pc_pages));
+ __LASSERT(list_empty(&pc.pc_pages));
}
}
MMSPACE_CLOSE;
filp_close(filp, NULL);
put_pages_on_daemon_list(&pc);
- if (!cfs_list_empty(&pc.pc_pages)) {
+ if (!list_empty(&pc.pc_pages)) {
int i;
printk(KERN_ALERT "Lustre: trace pages aren't "
printk(KERN_ERR "\n");
i = 0;
- cfs_list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
linkage)
printk(KERN_ERR "page %d belongs to cpu "
"%d\n", ++i, tage->cpu);
printk(KERN_ERR "There are %d pages unwritten\n",
i);
}
- __LASSERT(cfs_list_empty(&pc.pc_pages));
+ __LASSERT(list_empty(&pc.pc_pages));
end_loop:
if (atomic_read(&tctl->tctl_shutdown)) {
if (last_loop == 0) {
int cfs_tracefile_init(int max_pages)
{
- struct cfs_trace_cpu_data *tcd;
- int i;
- int j;
- int rc;
- int factor;
-
- rc = cfs_tracefile_init_arch();
- if (rc != 0)
- return rc;
-
- cfs_tcd_for_each(tcd, i, j) {
- /* tcd_pages_factor is initialized int tracefile_init_arch. */
- factor = tcd->tcd_pages_factor;
- CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
- CFS_INIT_LIST_HEAD(&tcd->tcd_stock_pages);
- CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
- tcd->tcd_cur_pages = 0;
- tcd->tcd_cur_stock_pages = 0;
- tcd->tcd_cur_daemon_pages = 0;
- tcd->tcd_max_pages = (max_pages * factor) / 100;
- LASSERT(tcd->tcd_max_pages > 0);
- tcd->tcd_shutting_down = 0;
- }
-
- return 0;
+ struct cfs_trace_cpu_data *tcd;
+ int i;
+ int j;
+ int rc;
+ int factor;
+
+ rc = cfs_tracefile_init_arch();
+ if (rc != 0)
+ return rc;
+
+ cfs_tcd_for_each(tcd, i, j) {
+ /* tcd_pages_factor is initialized int tracefile_init_arch. */
+ factor = tcd->tcd_pages_factor;
+ INIT_LIST_HEAD(&tcd->tcd_pages);
+ INIT_LIST_HEAD(&tcd->tcd_stock_pages);
+ INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
+ tcd->tcd_cur_pages = 0;
+ tcd->tcd_cur_stock_pages = 0;
+ tcd->tcd_cur_daemon_pages = 0;
+ tcd->tcd_max_pages = (max_pages * factor) / 100;
+ LASSERT(tcd->tcd_max_pages > 0);
+ tcd->tcd_shutting_down = 0;
+ }
+ return 0;
}
static void trace_cleanup_on_all_cpus(void)
{
- struct cfs_trace_cpu_data *tcd;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
- int i, cpu;
-
- cfs_for_each_possible_cpu(cpu) {
- cfs_tcd_for_each_type_lock(tcd, i, cpu) {
- tcd->tcd_shutting_down = 1;
+ struct cfs_trace_cpu_data *tcd;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+ int i, cpu;
- cfs_list_for_each_entry_safe_typed(tage, tmp,
- &tcd->tcd_pages,
- struct cfs_trace_page,
- linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
+ cfs_for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu) {
+ tcd->tcd_shutting_down = 1;
- cfs_list_del(&tage->linkage);
- cfs_tage_free(tage);
- }
+ list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
+ __LASSERT_TAGE_INVARIANT(tage);
- tcd->tcd_cur_pages = 0;
- }
- }
+ list_del(&tage->linkage);
+ cfs_tage_free(tage);
+ }
+ tcd->tcd_cur_pages = 0;
+ }
+ }
}
static void cfs_trace_cleanup(void)
{
struct page_collection pc;
- CFS_INIT_LIST_HEAD(&pc.pc_pages);
+ INIT_LIST_HEAD(&pc.pc_pages);
trace_cleanup_on_all_cpus();
/*
* pages with trace records not yet processed by tracefiled.
*/
- cfs_list_t tcd_pages;
+ struct list_head tcd_pages;
/* number of pages on ->tcd_pages */
- unsigned long tcd_cur_pages;
+ unsigned long tcd_cur_pages;
/*
* pages with trace records already processed by
* (put_pages_on_daemon_list()). LRU pages from this list are
* discarded when list grows too large.
*/
- cfs_list_t tcd_daemon_pages;
+ struct list_head tcd_daemon_pages;
/* number of pages on ->tcd_daemon_pages */
- unsigned long tcd_cur_daemon_pages;
+ unsigned long tcd_cur_daemon_pages;
/*
* Maximal number of pages allowed on ->tcd_pages and
* TCD_STOCK_PAGES pagesful are consumed by trace records all
* emitted in non-blocking contexts. Which is quite unlikely.
*/
- cfs_list_t tcd_stock_pages;
+ struct list_head tcd_stock_pages;
/* number of pages on ->tcd_stock_pages */
unsigned long tcd_cur_stock_pages;
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
struct page_collection {
- cfs_list_t pc_pages;
+ struct list_head pc_pages;
/*
* if this flag is set, collect_pages() will spill both
* ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
* only ->tcd_pages are spilled.
*/
- int pc_want_daemon_pages;
+ int pc_want_daemon_pages;
};
/* XXX nikita: this declaration is internal to tracefile.c and should probably
/*
* page itself
*/
- struct page *page;
+ struct page *page;
/*
* linkage into one of the lists in trace_data_union or
* page_collection
*/
- cfs_list_t linkage;
+ struct list_head linkage;
/*
* number of bytes used within this page
*/
- unsigned int used;
+ unsigned int used;
/*
* cpu that owns this page
*/
- unsigned short cpu;
+ unsigned short cpu;
/*
* type(context) of this page
*/
- unsigned short type;
+ unsigned short type;
};
extern void cfs_set_ptldebug_header(struct ptldebug_header *header,
}
int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
- cfs_list_t *stock);
+ struct list_head *stock);
int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
return NULL;
UC_CACHE_SET_NEW(entry);
- CFS_INIT_LIST_HEAD(&entry->ue_hash);
+ INIT_LIST_HEAD(&entry->ue_hash);
entry->ue_key = key;
atomic_set(&entry->ue_refcount, 0);
init_waitqueue_head(&entry->ue_waitq);
static void free_entry(struct upcall_cache *cache,
struct upcall_cache_entry *entry)
{
- if (cache->uc_ops->free_entry)
- cache->uc_ops->free_entry(cache, entry);
+ if (cache->uc_ops->free_entry)
+ cache->uc_ops->free_entry(cache, entry);
- cfs_list_del(&entry->ue_hash);
- CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
- entry, entry->ue_key);
- LIBCFS_FREE(entry, sizeof(*entry));
+ list_del(&entry->ue_hash);
+ CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
+ entry, entry->ue_key);
+ LIBCFS_FREE(entry, sizeof(*entry));
}
static inline int upcall_compare(struct upcall_cache *cache,
UC_CACHE_SET_EXPIRED(entry);
}
- cfs_list_del_init(&entry->ue_hash);
+ list_del_init(&entry->ue_hash);
if (!atomic_read(&entry->ue_refcount))
free_entry(cache, entry);
return 1;
__u64 key, void *args)
{
struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
- cfs_list_t *head;
+ struct list_head *head;
wait_queue_t wait;
int rc, found;
ENTRY;
LASSERT(cache);
- head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
+ head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
find_again:
- found = 0;
+ found = 0;
spin_lock(&cache->uc_lock);
- cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
- /* check invalid & expired items */
- if (check_unlink_entry(cache, entry))
- continue;
- if (upcall_compare(cache, entry, key, args) == 0) {
- found = 1;
- break;
- }
- }
+ list_for_each_entry_safe(entry, next, head, ue_hash) {
+ /* check invalid & expired items */
+ if (check_unlink_entry(cache, entry))
+ continue;
+ if (upcall_compare(cache, entry, key, args) == 0) {
+ found = 1;
+ break;
+ }
+ }
- if (!found) {
- if (!new) {
+ if (!found) {
+ if (!new) {
spin_unlock(&cache->uc_lock);
- new = alloc_entry(cache, key, args);
- if (!new) {
- CERROR("fail to alloc entry\n");
- RETURN(ERR_PTR(-ENOMEM));
- }
- goto find_again;
- } else {
- cfs_list_add(&new->ue_hash, head);
- entry = new;
- }
- } else {
- if (new) {
- free_entry(cache, new);
- new = NULL;
- }
- cfs_list_move(&entry->ue_hash, head);
- }
- get_entry(entry);
+ new = alloc_entry(cache, key, args);
+ if (!new) {
+ CERROR("fail to alloc entry\n");
+ RETURN(ERR_PTR(-ENOMEM));
+ }
+ goto find_again;
+ } else {
+ list_add(&new->ue_hash, head);
+ entry = new;
+ }
+ } else {
+ if (new) {
+ free_entry(cache, new);
+ new = NULL;
+ }
+ list_move(&entry->ue_hash, head);
+ }
+ get_entry(entry);
/* acquire for new one */
if (UC_CACHE_IS_NEW(entry)) {
int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
void *args)
{
- struct upcall_cache_entry *entry = NULL;
- cfs_list_t *head;
- int found = 0, rc = 0;
- ENTRY;
+ struct upcall_cache_entry *entry = NULL;
+ struct list_head *head;
+ int found = 0, rc = 0;
+ ENTRY;
- LASSERT(cache);
+ LASSERT(cache);
- head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
+ head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
spin_lock(&cache->uc_lock);
- cfs_list_for_each_entry(entry, head, ue_hash) {
- if (downcall_compare(cache, entry, key, args) == 0) {
- found = 1;
- get_entry(entry);
- break;
- }
- }
+ list_for_each_entry(entry, head, ue_hash) {
+ if (downcall_compare(cache, entry, key, args) == 0) {
+ found = 1;
+ get_entry(entry);
+ break;
+ }
+ }
if (!found) {
CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
cache->uc_name, entry, entry->ue_key);
out:
- if (rc) {
- UC_CACHE_SET_INVALID(entry);
- cfs_list_del_init(&entry->ue_hash);
- }
- UC_CACHE_CLEAR_ACQUIRING(entry);
+ if (rc) {
+ UC_CACHE_SET_INVALID(entry);
+ list_del_init(&entry->ue_hash);
+ }
+ UC_CACHE_CLEAR_ACQUIRING(entry);
spin_unlock(&cache->uc_lock);
wake_up_all(&entry->ue_waitq);
put_entry(cache, entry);
spin_lock(&cache->uc_lock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
- cfs_list_for_each_entry_safe(entry, next,
+ list_for_each_entry_safe(entry, next,
&cache->uc_hashtable[i], ue_hash) {
if (!force && atomic_read(&entry->ue_refcount)) {
UC_CACHE_SET_EXPIRED(entry);
void upcall_cache_flush_idle(struct upcall_cache *cache)
{
- cache_flush(cache, 0);
+ cache_flush(cache, 0);
}
EXPORT_SYMBOL(upcall_cache_flush_idle);
void upcall_cache_flush_all(struct upcall_cache *cache)
{
- cache_flush(cache, 1);
+ cache_flush(cache, 1);
}
EXPORT_SYMBOL(upcall_cache_flush_all);
void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
{
- cfs_list_t *head;
+ struct list_head *head;
struct upcall_cache_entry *entry;
int found = 0;
ENTRY;
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
spin_lock(&cache->uc_lock);
- cfs_list_for_each_entry(entry, head, ue_hash) {
+ list_for_each_entry(entry, head, ue_hash) {
if (upcall_compare(cache, entry, key, args) == 0) {
found = 1;
break;
struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
struct upcall_cache_ops *ops)
{
- struct upcall_cache *cache;
- int i;
- ENTRY;
+ struct upcall_cache *cache;
+ int i;
+ ENTRY;
- LIBCFS_ALLOC(cache, sizeof(*cache));
- if (!cache)
- RETURN(ERR_PTR(-ENOMEM));
+ LIBCFS_ALLOC(cache, sizeof(*cache));
+ if (!cache)
+ RETURN(ERR_PTR(-ENOMEM));
spin_lock_init(&cache->uc_lock);
rwlock_init(&cache->uc_upcall_rwlock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
- CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
- strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
- /* upcall pathname proc tunable */
- strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
- cache->uc_entry_expire = 20 * 60;
- cache->uc_acquire_expire = 30;
- cache->uc_ops = ops;
-
- RETURN(cache);
+ INIT_LIST_HEAD(&cache->uc_hashtable[i]);
+ strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
+ /* upcall pathname proc tunable */
+ strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
+ cache->uc_entry_expire = 20 * 60;
+ cache->uc_acquire_expire = 30;
+ cache->uc_ops = ops;
+
+ RETURN(cache);
}
EXPORT_SYMBOL(upcall_cache_init);
void cfs_init_timer(struct timer_list *t)
{
- CFS_INIT_LIST_HEAD(&t->tl_list);
+ INIT_LIST_HEAD(&t->tl_list);
}
void cfs_timer_init(struct timer_list *l, cfs_timer_func_t *func, void *arg)
{
- CFS_INIT_LIST_HEAD(&l->tl_list);
+ INIT_LIST_HEAD(&l->tl_list);
l->function = func;
l->data = (ulong_ptr_t)arg;
return;
#include "tracefile.h"
struct lc_watchdog {
- spinlock_t lcw_lock; /* check or change lcw_list */
- int lcw_refcount; /* must hold lcw_pending_timers_lock */
- struct timer_list lcw_timer; /* kernel timer */
- cfs_list_t lcw_list; /* chain on pending list */
- cfs_time_t lcw_last_touched; /* last touched stamp */
- struct task_struct *lcw_task; /* owner task */
- void (*lcw_callback)(pid_t, void *);
- void *lcw_data;
+ spinlock_t lcw_lock; /* check or change lcw_list */
+ int lcw_refcount; /* must hold lcw_pending_timers_lock */
+ struct timer_list lcw_timer; /* kernel timer */
+ struct list_head lcw_list; /* chain on pending list */
+ cfs_time_t lcw_last_touched;/* last touched stamp */
+ struct task_struct *lcw_task; /* owner task */
+ void (*lcw_callback)(pid_t, void *);
+ void *lcw_data;
- pid_t lcw_pid;
+ pid_t lcw_pid;
enum {
LC_WATCHDOG_DISABLED,
*/
/* BH lock! */
static DEFINE_SPINLOCK(lcw_pending_timers_lock);
-static cfs_list_t lcw_pending_timers = CFS_LIST_HEAD_INIT(lcw_pending_timers);
+static struct list_head lcw_pending_timers = LIST_HEAD_INIT(lcw_pending_timers);
/* Last time a watchdog expired */
static cfs_time_t lcw_last_watchdog_time;
lcw->lcw_state = LC_WATCHDOG_EXPIRED;
spin_lock_bh(&lcw->lcw_lock);
- LASSERT(cfs_list_empty(&lcw->lcw_list));
+ LASSERT(list_empty(&lcw->lcw_list));
spin_lock_bh(&lcw_pending_timers_lock);
lcw->lcw_refcount++; /* +1 for pending list */
- cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
+ list_add(&lcw->lcw_list, &lcw_pending_timers);
wake_up(&lcw_event_waitq);
spin_unlock_bh(&lcw_pending_timers_lock);
return 1;
spin_lock_bh(&lcw_pending_timers_lock);
- rc = !cfs_list_empty(&lcw_pending_timers);
+ rc = !list_empty(&lcw_pending_timers);
spin_unlock_bh(&lcw_pending_timers_lock);
return rc;
}
{
int rc = 0;
struct lc_watchdog *lcw;
- CFS_LIST_HEAD (zombies);
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
ENTRY;
CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
spin_lock_bh(&lcw_pending_timers_lock);
- rc = !cfs_list_empty(&lcw_pending_timers);
+ rc = !list_empty(&lcw_pending_timers);
spin_unlock_bh(&lcw_pending_timers_lock);
if (rc) {
CERROR("pending timers list was not empty at "
}
spin_lock_bh(&lcw_pending_timers_lock);
- while (!cfs_list_empty(&lcw_pending_timers)) {
- int is_dumplog;
-
- lcw = cfs_list_entry(lcw_pending_timers.next,
- struct lc_watchdog, lcw_list);
- /* +1 ref for callback to make sure lwc wouldn't be
- * deleted after releasing lcw_pending_timers_lock */
- lcw->lcw_refcount++;
+ while (!list_empty(&lcw_pending_timers)) {
+ int is_dumplog;
+
+ lcw = list_entry(lcw_pending_timers.next,
+ struct lc_watchdog, lcw_list);
+ /* +1 ref for callback to make sure lwc wouldn't be
+ * deleted after releasing lcw_pending_timers_lock */
+ lcw->lcw_refcount++;
spin_unlock_bh(&lcw_pending_timers_lock);
/* lock ordering */
spin_lock_bh(&lcw->lcw_lock);
spin_lock_bh(&lcw_pending_timers_lock);
- if (cfs_list_empty(&lcw->lcw_list)) {
+ if (list_empty(&lcw->lcw_list)) {
/* already removed from pending list */
lcw->lcw_refcount--; /* -1 ref for callback */
if (lcw->lcw_refcount == 0)
- cfs_list_add(&lcw->lcw_list, &zombies);
+ list_add(&lcw->lcw_list, &zombies);
spin_unlock_bh(&lcw->lcw_lock);
- /* still hold lcw_pending_timers_lock */
- continue;
- }
+ /* still hold lcw_pending_timers_lock */
+ continue;
+ }
- cfs_list_del_init(&lcw->lcw_list);
- lcw->lcw_refcount--; /* -1 ref for pending list */
+ list_del_init(&lcw->lcw_list);
+ lcw->lcw_refcount--; /* -1 ref for pending list */
spin_unlock_bh(&lcw_pending_timers_lock);
spin_unlock_bh(&lcw->lcw_lock);
spin_lock_bh(&lcw_pending_timers_lock);
lcw->lcw_refcount--; /* -1 ref for callback */
if (lcw->lcw_refcount == 0)
- cfs_list_add(&lcw->lcw_list, &zombies);
+ list_add(&lcw->lcw_list, &zombies);
}
spin_unlock_bh(&lcw_pending_timers_lock);
- while (!cfs_list_empty(&zombies)) {
- lcw = cfs_list_entry(zombies.next,
+ while (!list_empty(&zombies)) {
+ lcw = list_entry(zombies.next,
struct lc_watchdog, lcw_list);
- cfs_list_del_init(&lcw->lcw_list);
+ list_del_init(&lcw->lcw_list);
LIBCFS_FREE(lcw, sizeof(*lcw));
}
}
lcw->lcw_data = data;
lcw->lcw_state = LC_WATCHDOG_DISABLED;
- CFS_INIT_LIST_HEAD(&lcw->lcw_list);
- cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
+ INIT_LIST_HEAD(&lcw->lcw_list);
+ cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
mutex_lock(&lcw_refcount_mutex);
if (++lcw_refcount == 1)
static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
{
spin_lock_bh(&lcw->lcw_lock);
- if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+ if (unlikely(!list_empty(&lcw->lcw_list))) {
spin_lock_bh(&lcw_pending_timers_lock);
- cfs_list_del_init(&lcw->lcw_list);
+ list_del_init(&lcw->lcw_list);
lcw->lcw_refcount--; /* -1 ref for pending list */
spin_unlock_bh(&lcw_pending_timers_lock);
}
spin_lock_bh(&lcw->lcw_lock);
spin_lock_bh(&lcw_pending_timers_lock);
- if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
- cfs_list_del_init(&lcw->lcw_list);
+ if (unlikely(!list_empty(&lcw->lcw_list))) {
+ list_del_init(&lcw->lcw_list);
lcw->lcw_refcount--; /* -1 ref for pending list */
}
}
spinlock_t shrinker_guard = {0};
-CFS_LIST_HEAD(shrinker_hdr);
+struct list_head shrinker_hdr = LIST_HEAD_INIT(shrinker_hdr);
struct timer_list shrinker_timer = {0};
struct shrinker *set_shrinker(int seeks, shrink_callback cb)
s->seeks = seeks;
s->nr = 2;
spin_lock(&shrinker_guard);
- cfs_list_add(&s->list, &shrinker_hdr);
+ list_add(&s->list, &shrinker_hdr);
spin_unlock(&shrinker_guard);
}
struct shrinker *tmp;
spin_lock(&shrinker_guard);
#if TRUE
- cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
- struct shrinker, list) {
+ list_for_each_entry(tmp, &shrinker_hdr, list) {
if (tmp == s) {
- cfs_list_del(&tmp->list);
+ list_del(&tmp->list);
break;
}
}
#else
- cfs_list_del(&s->list);
+ list_del(&s->list);
#endif
spin_unlock(&shrinker_guard);
kfree(s);
struct shrinker *s;
spin_lock(&shrinker_guard);
- cfs_list_for_each_entry_typed(s, &shrinker_hdr,
- struct shrinker, list) {
+ list_for_each_entry(s, &shrinker_hdr, list) {
s->cb(s->nr, __GFP_FS);
}
spin_unlock(&shrinker_guard);
static DECLARE_RWSEM(cfs_symbol_lock);
-CFS_LIST_HEAD(cfs_symbol_list);
+struct list_head cfs_symbol_list = LIST_HEAD_INIT(cfs_symbol_list);
int libcfs_is_mp_system = FALSE;
void *
cfs_symbol_get(const char *name)
{
- cfs_list_t *walker;
+ struct list_head *walker;
struct cfs_symbol *sym = NULL;
down_read(&cfs_symbol_lock);
- cfs_list_for_each(walker, &cfs_symbol_list) {
- sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+ list_for_each(walker, &cfs_symbol_list) {
+ sym = list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
sym->ref ++;
break;
void
cfs_symbol_put(const char *name)
{
- cfs_list_t *walker;
+ struct list_head *walker;
struct cfs_symbol *sym = NULL;
down_read(&cfs_symbol_lock);
- cfs_list_for_each(walker, &cfs_symbol_list) {
- sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+ list_for_each(walker, &cfs_symbol_list) {
+ sym = list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
LASSERT(sym->ref > 0);
sym->ref--;
int
cfs_symbol_register(const char *name, const void *value)
{
- cfs_list_t *walker;
+ struct list_head *walker;
struct cfs_symbol *sym = NULL;
struct cfs_symbol *new = NULL;
strncpy(new->name, name, CFS_SYMBOL_LEN);
new->value = (void *)value;
new->ref = 0;
- CFS_INIT_LIST_HEAD(&new->sym_list);
+ INIT_LIST_HEAD(&new->sym_list);
down_write(&cfs_symbol_lock);
- cfs_list_for_each(walker, &cfs_symbol_list) {
- sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+ list_for_each(walker, &cfs_symbol_list) {
+ sym = list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
up_write(&cfs_symbol_lock);
kfree(new);
return 0; /* alreay registerred */
}
}
- cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
+ list_add_tail(&new->sym_list, &cfs_symbol_list);
up_write(&cfs_symbol_lock);
return 0;
void
cfs_symbol_unregister(const char *name)
{
- cfs_list_t *walker;
- cfs_list_t *nxt;
+ struct list_head *walker;
+ struct list_head *nxt;
struct cfs_symbol *sym = NULL;
down_write(&cfs_symbol_lock);
- cfs_list_for_each_safe(walker, nxt, &cfs_symbol_list) {
- sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+ list_for_each_safe(walker, nxt, &cfs_symbol_list) {
+ sym = list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
LASSERT(sym->ref == 0);
- cfs_list_del (&sym->sym_list);
+ list_del (&sym->sym_list);
kfree(sym);
break;
}
void
cfs_symbol_clean()
{
- cfs_list_t *walker;
+ struct list_head *walker;
struct cfs_symbol *sym = NULL;
down_write(&cfs_symbol_lock);
- cfs_list_for_each(walker, &cfs_symbol_list) {
- sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+ list_for_each(walker, &cfs_symbol_list) {
+ sym = list_entry (walker, struct cfs_symbol, sym_list);
LASSERT(sym->ref == 0);
- cfs_list_del (&sym->sym_list);
+ list_del (&sym->sym_list);
kfree(sym);
}
up_write(&cfs_symbol_lock);
cfs_proc_entry_t * root = NULL;
memset(&(root_table_header), 0, sizeof(struct ctl_table_header));
- CFS_INIT_LIST_HEAD(&(root_table_header.ctl_entry));
+ INIT_LIST_HEAD(&(root_table_header.ctl_entry));
INIT_PROCFS_LOCK();
proc_entry_cache = kmem_cache_create(NULL, sizeof(cfs_proc_entry_t),
int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp,
void *newval, size_t newlen)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
if (nlen <= 0 || nlen >= CTL_MAXNAME)
return -ENOTDIR;
tmp = &root_table_header.ctl_entry;
do {
struct ctl_table_header *head =
- cfs_list_entry(tmp, struct ctl_table_header, ctl_entry);
+ list_entry(tmp, struct ctl_table_header, ctl_entry);
void *context = NULL;
int error = parse_table(name, nlen, oldval, oldlenp,
newval, newlen, head->ctl_table,
return NULL;
tmp->ctl_table = table;
- CFS_INIT_LIST_HEAD(&tmp->ctl_entry);
- cfs_list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
+ INIT_LIST_HEAD(&tmp->ctl_entry);
+ list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
#ifdef CONFIG_PROC_FS
register_proc_table(table, cfs_proc_sys);
#endif
*/
void unregister_sysctl_table(struct ctl_table_header * header)
{
- cfs_list_del(&header->ctl_entry);
+ list_del(&header->ctl_entry);
#ifdef CONFIG_PROC_FS
unregister_proc_table(header->ctl_table, cfs_proc_sys);
#endif
}
EXPORT_SYMBOL(seq_puts);
-cfs_list_t *seq_list_start(cfs_list_t *head, loff_t pos)
+struct list_head *seq_list_start(struct list_head *head, loff_t pos)
{
- cfs_list_t *lh;
+ struct list_head *lh;
- cfs_list_for_each(lh, head)
+ list_for_each(lh, head)
if (pos-- == 0)
return lh;
EXPORT_SYMBOL(seq_list_start);
-cfs_list_t *seq_list_start_head(cfs_list_t *head,
+struct list_head *seq_list_start_head(struct list_head *head,
loff_t pos)
{
if (!pos)
EXPORT_SYMBOL(seq_list_start_head);
-cfs_list_t *seq_list_next(void *v, cfs_list_t *head,
+struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos)
{
- cfs_list_t *lh;
+ struct list_head *lh;
- lh = ((cfs_list_t *)v)->next;
+ lh = ((struct list_head *)v)->next;
++*ppos;
return lh == head ? NULL : lh;
}
{
waitq->magic = CFS_WAITQ_MAGIC;
waitq->flags = 0;
- CFS_INIT_LIST_HEAD(&(waitq->waiters));
+ INIT_LIST_HEAD(&(waitq->waiters));
spin_lock_init(&(waitq->guard));
}
atomic_inc(&slot->count);
- CFS_INIT_LIST_HEAD(&(link->waitq[0].link));
- CFS_INIT_LIST_HEAD(&(link->waitq[1].link));
+ INIT_LIST_HEAD(&(link->waitq[0].link));
+ INIT_LIST_HEAD(&(link->waitq[1].link));
link->waitq[0].waitl = link->waitq[1].waitl = link;
}
LASSERT(link->waitq[waitqid].waitq == NULL);
link->waitq[waitqid].waitq = waitq;
if (link->flags & CFS_WAITQ_EXCLUSIVE) {
- cfs_list_add_tail(&link->waitq[waitqid].link, &waitq->waiters);
+ list_add_tail(&link->waitq[waitqid].link, &waitq->waiters);
} else {
- cfs_list_add(&link->waitq[waitqid].link, &waitq->waiters);
+ list_add(&link->waitq[waitqid].link, &waitq->waiters);
}
spin_unlock(&(waitq->guard));
}
if (i < CFS_WAITQ_CHANNELS) {
link->waitq[i].waitq = NULL;
- cfs_list_del_init(&link->waitq[i].link);
+ list_del_init(&link->waitq[i].link);
} else {
cfs_enter_debugger();
}
LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
spin_lock(&waitq->guard);
- cfs_list_for_each_entry_typed(scan, &waitq->waiters,
- cfs_waitlink_channel_t,
+ list_for_each_entry(scan, &waitq->waiters,
link) {
wait_queue_t *waitl = scan->waitl;
spin_lock(&(ks_data.ksnd_tsdu_lock));
- if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) {
+ if (!list_empty (&(ks_data.ksnd_freetsdus))) {
LASSERT(ks_data.ksnd_nfreetsdus > 0);
- KsTsdu = cfs_list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
- cfs_list_del(&(KsTsdu->Link));
+ KsTsdu = list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
+ list_del(&(KsTsdu->Link));
ks_data.ksnd_nfreetsdus--;
} else {
if (ks_data.ksnd_nfreetsdus > 128) {
KsFreeKsTsdu(KsTsdu);
} else {
- cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
+ list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
ks_data.ksnd_nfreetsdus++;
}
spin_unlock(&(ks_data.ksnd_tsdu_lock));
*Length = 0;
- cfs_list_for_each_entry_typed(KsTsdu,
- &TsduMgr->TsduList,KS_TSDU, Link) {
+ list_for_each_entry(KsTsdu, &TsduMgr->TsduList, Link) {
ULONG start = 0;
LASSERT(TsduMgr->TotalBytes >= length);
- while (!cfs_list_empty(&TsduMgr->TsduList)) {
+ while (!list_empty(&TsduMgr->TsduList)) {
ULONG start = 0;
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
start = KsTsdu->StartOffset;
if (KsTsdu->StartOffset >= KsTsdu->LastOffset) {
/* remove KsTsdu from list */
- cfs_list_del(&KsTsdu->Link);
+ list_del(&KsTsdu->Link);
TsduMgr->NumOfTsdu--;
KsPutKsTsdu(KsTsdu);
}
/* retrieve the latest Tsdu buffer form TsduMgr
list if the list is not empty. */
- if (cfs_list_empty(&(TsduMgr->TsduList))) {
+ if (list_empty(&(TsduMgr->TsduList))) {
LASSERT(TsduMgr->NumOfTsdu == 0);
KsTsdu = NULL;
} else {
LASSERT(TsduMgr->NumOfTsdu > 0);
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
/* if this Tsdu does not contain enough space, we need
allocate a new Tsdu queue. */
if (NULL == KsTsdu) {
KsTsdu = KsAllocateKsTsdu();
if (NULL != KsTsdu) {
- cfs_list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
+ list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
TsduMgr->NumOfTsdu++;
}
}
} else {
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
/* remove the KsTsdu from TsduMgr list to release the lock */
- cfs_list_del(&(KsTsdu->Link));
+ list_del(&(KsTsdu->Link));
TsduMgr->NumOfTsdu--;
while (length > BytesRecved) {
KsTsdu = NULL;
} else {
TsduMgr->NumOfTsdu++;
- cfs_list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
+ list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
}
}
FALSE
);
- CFS_INIT_LIST_HEAD(
+ INIT_LIST_HEAD(
&(TsduMgr->TsduList)
);
KsRemoveTdiEngine(TsduMgr);
KeSetEvent(&(TsduMgr->Event), 0, FALSE);
- while (!cfs_list_empty(&TsduMgr->TsduList)) {
+ while (!list_empty(&TsduMgr->TsduList)) {
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
if (KsTsdu->StartOffset == KsTsdu->LastOffset) {
// KsTsdu is empty now, we need free it ...
//
- cfs_list_del(&(KsTsdu->Link));
+ list_del(&(KsTsdu->Link));
TsduMgr->NumOfTsdu--;
KsFreeKsTsdu(KsTsdu);
LASSERT(parent->kstc_type == kstt_listener);
LASSERT(parent->kstc_state == ksts_listening);
- if (cfs_list_empty(&(parent->listener.kstc_listening.list))) {
+ if (list_empty(&(parent->listener.kstc_listening.list))) {
child = NULL;
} else {
- cfs_list_t * tmp;
+ struct list_head * tmp;
/* check the listening queue and try to get a free connecton */
- cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
- child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
+ list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+ child = list_entry (tmp, ks_tconn_t, child.kstc_link);
spin_lock(&(child->kstc_lock));
if (!child->child.kstc_busy) {
/* attach it into global list in ks_data */
- cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
+ list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
ks_data.ksnd_ntconns++;
spin_unlock(&(ks_data.ksnd_tconn_lock));
spin_lock(&(ks_data.ksnd_tconn_lock));
/* remove it from the global list */
- cfs_list_del(&tconn->kstc_list);
+ list_del(&tconn->kstc_list);
ks_data.ksnd_ntconns--;
/* if this is the last tconn, it would be safe for
RtlInitUnicodeString(&(tconn->kstc_dev), TCP_DEVICE_NAME);
- CFS_INIT_LIST_HEAD(&(tconn->listener.kstc_listening.list));
- CFS_INIT_LIST_HEAD(&(tconn->listener.kstc_accepted.list));
+ INIT_LIST_HEAD(&(tconn->listener.kstc_listening.list));
+ INIT_LIST_HEAD(&(tconn->listener.kstc_accepted.list));
cfs_init_event( &(tconn->listener.kstc_accept_event),
TRUE,
if (tconn->child.kstc_queued) {
- cfs_list_del(&(tconn->child.kstc_link));
+ list_del(&(tconn->child.kstc_link));
if (tconn->child.kstc_queueno) {
if (!engs->queued) {
spin_lock(&engm->lock);
if (!engs->queued) {
- cfs_list_add_tail(&engs->link, &engm->list);
+ list_add_tail(&engs->link, &engm->list);
engs->queued = TRUE;
engs->tconn = tconn;
engs->emgr = engm;
LASSERT(engm != NULL);
spin_lock(&engm->lock);
if (engs->queued) {
- cfs_list_del(&engs->link);
+ list_del(&engs->link);
engs->queued = FALSE;
engs->tconn = NULL;
engs->emgr = NULL;
tflags = TDI_SEND_NON_BLOCKING;
}
- if (cfs_list_empty(&TsduMgr->TsduList)) {
+ if (list_empty(&TsduMgr->TsduList)) {
LASSERT(TsduMgr->TotalBytes == 0);
ks_unlock_tsdumgr(TsduMgr);
goto errorout;
{
ks_engine_mgr_t * engm = context;
ks_engine_slot_t * engs;
- cfs_list_t * list;
+ struct list_head * list;
ks_tconn_t * tconn;
cfs_set_thread_priority(31);
cfs_wait_event_internal(&engm->start, 0);
spin_lock(&engm->lock);
- if (cfs_list_empty(&engm->list)) {
+ if (list_empty(&engm->list)) {
spin_unlock(&engm->lock);
continue;
}
list = engm->list.next;
- cfs_list_del(list);
- engs = cfs_list_entry(list, ks_engine_slot_t, link);
+ list_del(list);
+ engs = list_entry(list, ks_engine_slot_t, link);
LASSERT(engs->emgr == engm);
LASSERT(engs->queued);
engs->emgr = NULL;
RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t));
spin_lock_init(&ks_data.ksnd_tconn_lock);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
+ INIT_LIST_HEAD(&ks_data.ksnd_tconns);
cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
ks_data.ksnd_tconn_slab = kmem_cache_create("tcon", sizeof(ks_tconn_t),
/* initialize tsdu related globals */
spin_lock_init(&ks_data.ksnd_tsdu_lock);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
+ INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
ks_data.ksnd_tsdu_slab = kmem_cache_create("tsdu", ks_data.ksnd_tsdu_size,
0, 0, NULL);
spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
+ INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
kthread_run(KsDeliveryEngineThread, &ks_data.ksnd_engine_mgr[i], "");
}
ks_fini_tdi_data()
{
PKS_TSDU KsTsdu = NULL;
- cfs_list_t * list = NULL;
+ struct list_head * list = NULL;
int i;
/* clean up the pnp handler and address slots */
/* we need wait until all the tconn are freed */
spin_lock(&(ks_data.ksnd_tconn_lock));
- if (cfs_list_empty(&(ks_data.ksnd_tconns))) {
+ if (list_empty(&(ks_data.ksnd_tconns))) {
cfs_wake_event(&ks_data.ksnd_tconn_exit);
}
spin_unlock(&(ks_data.ksnd_tconn_lock));
/* clean up all the tsud buffers in the free list */
spin_lock(&(ks_data.ksnd_tsdu_lock));
- cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
- KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
+ list_for_each (list, &ks_data.ksnd_freetsdus) {
+ KsTsdu = list_entry (list, KS_TSDU, Link);
kmem_cache_free(
ks_data.ksnd_tsdu_slab,
if (backlog) {
spin_lock(&backlog->kstc_lock);
/* attch it into the listing list of daemon */
- cfs_list_add( &backlog->child.kstc_link,
+ list_add( &backlog->child.kstc_link,
&parent->listener.kstc_listening.list );
parent->listener.kstc_listening.num++;
void
ks_stop_listen(ks_tconn_t *tconn)
{
- cfs_list_t * list;
+ struct list_head * list;
ks_tconn_t * backlog;
/* reset all tdi event callbacks to NULL */
cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
/* cleanup all the listening backlog child connections */
- cfs_list_for_each (list, &(tconn->listener.kstc_listening.list)) {
- backlog = cfs_list_entry(list, ks_tconn_t, child.kstc_link);
+ list_for_each (list, &(tconn->listener.kstc_listening.list)) {
+ backlog = list_entry(list, ks_tconn_t, child.kstc_link);
/* destory and free it */
ks_put_tconn(backlog);
ks_tconn_t ** child
)
{
- cfs_list_t * tmp;
+ struct list_head * tmp;
ks_tconn_t * backlog = NULL;
ks_replenish_backlogs(parent, parent->listener.nbacklog);
/* check the listening queue and try to search the accepted connecton */
- cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
- backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
+ list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+ backlog = list_entry (tmp, ks_tconn_t, child.kstc_link);
spin_lock(&(backlog->kstc_lock));
LASSERT(backlog->kstc_state == ksts_connected);
LASSERT(backlog->child.kstc_busy);
- cfs_list_del(&(backlog->child.kstc_link));
- cfs_list_add(&(backlog->child.kstc_link),
+ list_del(&(backlog->child.kstc_link));
+ list_add(&(backlog->child.kstc_link),
&(parent->listener.kstc_accepted.list));
parent->listener.kstc_accepted.num++;
parent->listener.kstc_listening.num--;
#define CFS_WS_NAME_LEN 16
typedef struct cfs_wi_sched {
- cfs_list_t ws_list; /* chain on global list */
+ struct list_head ws_list; /* chain on global list */
#ifdef __KERNEL__
/** serialised workitems */
- spinlock_t ws_lock;
+ spinlock_t ws_lock;
/** where schedulers sleep */
wait_queue_head_t ws_waitq;
#endif
/** concurrent workitems */
- cfs_list_t ws_runq;
+ struct list_head ws_runq;
/** rescheduled running-workitems, a workitem can be rescheduled
* while running in wi_action(), but we don't to execute it again
* unless it returns from wi_action(), so we put it on ws_rerunq
* while rescheduling, and move it to runq after it returns
* from wi_action() */
- cfs_list_t ws_rerunq;
+ struct list_head ws_rerunq;
/** CPT-table for this scheduler */
struct cfs_cpt_table *ws_cptab;
/** CPT id for affinity */
/** serialize */
spinlock_t wi_glock;
/** list of all schedulers */
- cfs_list_t wi_scheds;
+ struct list_head wi_scheds;
/** WI module is initialized */
int wi_init;
/** shutting down the whole WI module */
{
cfs_wi_sched_lock(sched);
if (sched->ws_stopping) {
- cfs_wi_sched_unlock(sched);
- return 0;
- }
+ cfs_wi_sched_unlock(sched);
+ return 0;
+ }
- if (!cfs_list_empty(&sched->ws_runq)) {
- cfs_wi_sched_unlock(sched);
- return 0;
- }
- cfs_wi_sched_unlock(sched);
- return 1;
+ if (!list_empty(&sched->ws_runq)) {
+ cfs_wi_sched_unlock(sched);
+ return 0;
+ }
+ cfs_wi_sched_unlock(sched);
+ return 1;
}
#else /* !__KERNEL__ */
LASSERT(wi->wi_running);
#endif
if (wi->wi_scheduled) { /* cancel pending schedules */
- LASSERT(!cfs_list_empty(&wi->wi_list));
- cfs_list_del_init(&wi->wi_list);
+ LASSERT(!list_empty(&wi->wi_list));
+ list_del_init(&wi->wi_list);
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
}
- LASSERT(cfs_list_empty(&wi->wi_list));
+ LASSERT(list_empty(&wi->wi_list));
wi->wi_scheduled = 1; /* LBUG future schedule attempts */
cfs_wi_sched_unlock(sched);
rc = !(wi->wi_running);
if (wi->wi_scheduled) { /* cancel pending schedules */
- LASSERT(!cfs_list_empty(&wi->wi_list));
- cfs_list_del_init(&wi->wi_list);
+ LASSERT(!list_empty(&wi->wi_list));
+ list_del_init(&wi->wi_list);
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
- wi->wi_scheduled = 0;
- }
+ wi->wi_scheduled = 0;
+ }
- LASSERT (cfs_list_empty(&wi->wi_list));
+ LASSERT (list_empty(&wi->wi_list));
- cfs_wi_sched_unlock(sched);
- return rc;
+ cfs_wi_sched_unlock(sched);
+ return rc;
}
EXPORT_SYMBOL(cfs_wi_deschedule);
cfs_wi_sched_lock(sched);
if (!wi->wi_scheduled) {
- LASSERT (cfs_list_empty(&wi->wi_list));
+ LASSERT (list_empty(&wi->wi_list));
wi->wi_scheduled = 1;
sched->ws_nscheduled++;
if (!wi->wi_running) {
- cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
+ list_add_tail(&wi->wi_list, &sched->ws_runq);
#ifdef __KERNEL__
wake_up(&sched->ws_waitq);
#endif
} else {
- cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
+ list_add(&wi->wi_list, &sched->ws_rerunq);
}
}
- LASSERT (!cfs_list_empty(&wi->wi_list));
+ LASSERT (!list_empty(&wi->wi_list));
cfs_wi_sched_unlock(sched);
return;
}
cfs_wi_sched_lock(sched);
while (!sched->ws_stopping) {
- int nloops = 0;
- int rc;
- cfs_workitem_t *wi;
-
- while (!cfs_list_empty(&sched->ws_runq) &&
- nloops < CFS_WI_RESCHED) {
- wi = cfs_list_entry(sched->ws_runq.next,
- cfs_workitem_t, wi_list);
+ int nloops = 0;
+ int rc;
+ cfs_workitem_t *wi;
+
+ while (!list_empty(&sched->ws_runq) &&
+ nloops < CFS_WI_RESCHED) {
+ wi = list_entry(sched->ws_runq.next,
+ cfs_workitem_t, wi_list);
LASSERT(wi->wi_scheduled && !wi->wi_running);
- cfs_list_del_init(&wi->wi_list);
+ list_del_init(&wi->wi_list);
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
if (rc != 0) /* WI should be dead, even be freed! */
continue;
- wi->wi_running = 0;
- if (cfs_list_empty(&wi->wi_list))
+ wi->wi_running = 0;
+ if (list_empty(&wi->wi_list))
continue;
LASSERT(wi->wi_scheduled);
- /* wi is rescheduled, should be on rerunq now, we
- * move it to runq so it can run action now */
- cfs_list_move_tail(&wi->wi_list, &sched->ws_runq);
+ /* wi is rescheduled, should be on rerunq now, we
+ * move it to runq so it can run action now */
+ list_move_tail(&wi->wi_list, &sched->ws_runq);
}
- if (!cfs_list_empty(&sched->ws_runq)) {
+ if (!list_empty(&sched->ws_runq)) {
cfs_wi_sched_unlock(sched);
/* don't sleep because some workitems still
* expect me to come back soon */
struct cfs_wi_sched *tmp;
/** rerunq is always empty for userspace */
- cfs_list_for_each_entry(tmp,
- &cfs_wi_data.wi_scheds, ws_list) {
- if (!cfs_list_empty(&tmp->ws_runq)) {
+ list_for_each_entry(tmp, &cfs_wi_data.wi_scheds, ws_list) {
+ if (!list_empty(&tmp->ws_runq)) {
sched = tmp;
break;
}
if (sched == NULL)
break;
- wi = cfs_list_entry(sched->ws_runq.next,
+ wi = list_entry(sched->ws_runq.next,
cfs_workitem_t, wi_list);
- cfs_list_del_init(&wi->wi_list);
+ list_del_init(&wi->wi_list);
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
return;
}
- LASSERT(!cfs_list_empty(&sched->ws_list));
+ LASSERT(!list_empty(&sched->ws_list));
sched->ws_stopping = 1;
spin_unlock(&cfs_wi_data.wi_glock);
}
}
- cfs_list_del(&sched->ws_list);
+ list_del(&sched->ws_list);
spin_unlock(&cfs_wi_data.wi_glock);
#endif
spin_lock_init(&sched->ws_lock);
init_waitqueue_head(&sched->ws_waitq);
#endif
- CFS_INIT_LIST_HEAD(&sched->ws_runq);
- CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
- CFS_INIT_LIST_HEAD(&sched->ws_list);
+ INIT_LIST_HEAD(&sched->ws_runq);
+ INIT_LIST_HEAD(&sched->ws_rerunq);
+ INIT_LIST_HEAD(&sched->ws_list);
#ifdef __KERNEL__
for (; nthrs > 0; nthrs--) {
spin_lock(&cfs_wi_data.wi_glock);
/* make up for cfs_wi_sched_destroy */
- cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
+ list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
sched->ws_starting--;
spin_unlock(&cfs_wi_data.wi_glock);
}
#endif
spin_lock(&cfs_wi_data.wi_glock);
- cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
+ list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
spin_unlock(&cfs_wi_data.wi_glock);
*sched_pp = sched;
memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
spin_lock_init(&cfs_wi_data.wi_glock);
- CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
+ INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
cfs_wi_data.wi_init = 1;
return 0;
#ifdef __KERNEL__
/* nobody should contend on this list */
- cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
+ list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
sched->ws_stopping = 1;
wake_up_all(&sched->ws_waitq);
}
- cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
+ list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_nthreads != 0) {
spin_unlock(&cfs_wi_data.wi_glock);
}
#endif
- while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) {
- sched = cfs_list_entry(cfs_wi_data.wi_scheds.next,
+ while (!list_empty(&cfs_wi_data.wi_scheds)) {
+ sched = list_entry(cfs_wi_data.wi_scheds.next,
struct cfs_wi_sched, ws_list);
- cfs_list_del(&sched->ws_list);
+ list_del(&sched->ws_list);
LIBCFS_FREE(sched, sizeof(*sched));
}
#include <libcfs/libcfs.h>
#include "ptlrpc_internal.h"
-/* XXX: This is just for liblustre. Remove the #if defined directive when the
- * "cfs_" prefix is dropped from cfs_list_head. */
-#if defined (__linux__) && defined(__KERNEL__)
extern struct list_head ptlrpc_all_services;
-#else
-extern struct cfs_list_head ptlrpc_all_services;
-#endif
/**
* NRS core object.
* registration/unregistration, and NRS core lprocfs operations.
*/
struct mutex nrs_mutex;
- /* XXX: This is just for liblustre. Remove the #if defined directive
- * when the * "cfs_" prefix is dropped from cfs_list_head. */
-#if defined (__linux__) && defined(__KERNEL__)
/**
* List of all policy descriptors registered with NRS core; protected
* by nrs_core::nrs_mutex.
*/
struct list_head nrs_policies;
-#else
- struct cfs_list_head nrs_policies;
-#endif
-
};
int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc);