static inline void *
lnet_freelist_alloc (lnet_freelist_t *fl)
{
- /* ALWAYS called with liblock held */
- lnet_freeobj_t *o;
+ /* ALWAYS called with liblock held */
+ lnet_freeobj_t *o;
- if (cfs_list_empty (&fl->fl_list))
- return (NULL);
+ if (list_empty(&fl->fl_list))
+ return NULL;
- o = cfs_list_entry (fl->fl_list.next, lnet_freeobj_t, fo_list);
- cfs_list_del (&o->fo_list);
- return ((void *)&o->fo_contents);
+ o = list_entry(fl->fl_list.next, lnet_freeobj_t, fo_list);
+ list_del(&o->fo_list);
+ return (void *)&o->fo_contents;
}
static inline void
lnet_freelist_free (lnet_freelist_t *fl, void *obj)
{
- /* ALWAYS called with liblock held */
- lnet_freeobj_t *o = cfs_list_entry (obj, lnet_freeobj_t, fo_contents);
+ /* ALWAYS called with liblock held */
+ lnet_freeobj_t *o = list_entry(obj, lnet_freeobj_t, fo_contents);
- cfs_list_add (&o->fo_list, &fl->fl_list);
+ list_add(&o->fo_list, &fl->fl_list);
}
lnet_res_unlock(0);
if (md != NULL)
- CFS_INIT_LIST_HEAD(&md->md_list);
+ INIT_LIST_HEAD(&md->md_list);
return md;
}
LIBCFS_ALLOC(md, size);
- if (md != NULL) {
- /* Set here in case of early free */
- md->md_options = umd->options;
- md->md_niov = niov;
- CFS_INIT_LIST_HEAD(&md->md_list);
- }
+ if (md != NULL) {
+ /* Set here in case of early free */
+ md->md_options = umd->options;
+ md->md_niov = niov;
+ INIT_LIST_HEAD(&md->md_list);
+ }
- return (md);
+ return md;
}
static inline void
{
/* ALWAYS called with resource lock held */
/* NB: cookie is still useful, don't reset it */
- cfs_list_del(&lh->lh_hash_chain);
+ list_del(&lh->lh_hash_chain);
}
static inline void
return hash_long(nid, LNET_PEER_HASH_BITS);
}
-static inline cfs_list_t *
+static inline struct list_head *
lnet_net2rnethash(__u32 net)
{
return &the_lnet.ln_remote_nets_hash[(LNET_NETNUM(net) +
}
/* match-table functions */
-cfs_list_t *lnet_mt_match_head(struct lnet_match_table *mtable,
+struct list_head *lnet_mt_match_head(struct lnet_match_table *mtable,
lnet_process_id_t id, __u64 mbits);
struct lnet_match_table *lnet_mt_of_attach(unsigned int index,
lnet_process_id_t id, __u64 mbits,
/* portals match/attach functions */
void lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
- cfs_list_t *matches, cfs_list_t *drops);
+ struct list_head *matches, struct list_head *drops);
void lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md);
int lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg);
lnet_msg_t *lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *get_msg);
void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *msg, unsigned int len);
void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int rc);
-void lnet_drop_delayed_msg_list(cfs_list_t *head, char *reason);
-void lnet_recv_delayed_msg_list(cfs_list_t *head);
+void lnet_drop_delayed_msg_list(struct list_head *head, char *reason);
+void lnet_recv_delayed_msg_list(struct list_head *head);
int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt);
void lnet_msg_container_cleanup(struct lnet_msg_container *container);
int lnet_ping(lnet_process_id_t id, int timeout_ms,
lnet_process_id_t *ids, int n_ids);
-int lnet_parse_ip2nets (char **networksp, char *ip2nets);
-int lnet_parse_routes (char *route_str, int *im_a_router);
-int lnet_parse_networks (cfs_list_t *nilist, char *networks);
+int lnet_parse_ip2nets(char **networksp, char *ip2nets);
+int lnet_parse_routes(char *route_str, int *im_a_router);
+int lnet_parse_networks(struct list_head *nilist, char *networks);
int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt);
lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable,
struct lnet_libmd;
typedef struct lnet_msg {
- cfs_list_t msg_activelist;
- cfs_list_t msg_list; /* Q for credits/MD */
+ struct list_head msg_activelist;
+ struct list_head msg_list; /* Q for credits/MD */
- lnet_process_id_t msg_target;
+ lnet_process_id_t msg_target;
/* where is it from, it's only for building event */
lnet_nid_t msg_from;
__u32 msg_type;
typedef struct lnet_libhandle {
- cfs_list_t lh_hash_chain;
- __u64 lh_cookie;
+ struct list_head lh_hash_chain;
+ __u64 lh_cookie;
} lnet_libhandle_t;
#define lh_entry(ptr, type, member) \
((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
typedef struct lnet_eq {
- cfs_list_t eq_list;
+ struct list_head eq_list;
lnet_libhandle_t eq_lh;
lnet_seq_t eq_enq_seq;
lnet_seq_t eq_deq_seq;
} lnet_eq_t;
typedef struct lnet_me {
- cfs_list_t me_list;
- lnet_libhandle_t me_lh;
- lnet_process_id_t me_match_id;
- unsigned int me_portal;
- unsigned int me_pos; /* hash offset in mt_hash */
- __u64 me_match_bits;
- __u64 me_ignore_bits;
- lnet_unlink_t me_unlink;
- struct lnet_libmd *me_md;
+ struct list_head me_list;
+ lnet_libhandle_t me_lh;
+ lnet_process_id_t me_match_id;
+ unsigned int me_portal;
+ unsigned int me_pos; /* hash offset in mt_hash */
+ __u64 me_match_bits;
+ __u64 me_ignore_bits;
+ lnet_unlink_t me_unlink;
+ struct lnet_libmd *me_md;
} lnet_me_t;
typedef struct lnet_libmd {
- cfs_list_t md_list;
- lnet_libhandle_t md_lh;
- lnet_me_t *md_me;
- char *md_start;
- unsigned int md_offset;
- unsigned int md_length;
- unsigned int md_max_size;
- int md_threshold;
- int md_refcount;
- unsigned int md_options;
- unsigned int md_flags;
- void *md_user_ptr;
- lnet_eq_t *md_eq;
- unsigned int md_niov; /* # frags */
- union {
- struct iovec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
- } md_iov;
+ struct list_head md_list;
+ lnet_libhandle_t md_lh;
+ lnet_me_t *md_me;
+ char *md_start;
+ unsigned int md_offset;
+ unsigned int md_length;
+ unsigned int md_max_size;
+ int md_threshold;
+ int md_refcount;
+ unsigned int md_options;
+ unsigned int md_flags;
+ void *md_user_ptr;
+ lnet_eq_t *md_eq;
+ unsigned int md_niov; /* # frags */
+ union {
+ struct iovec iov[LNET_MAX_IOV];
+ lnet_kiov_t kiov[LNET_MAX_IOV];
+ } md_iov;
} lnet_libmd_t;
#define LNET_MD_FLAG_ZOMBIE (1 << 0)
#ifdef LNET_USE_LIB_FREELIST
typedef struct
{
- void *fl_objs; /* single contiguous array of objects */
- int fl_nobjs; /* the number of them */
- int fl_objsize; /* the size (including overhead) of each of them */
- cfs_list_t fl_list; /* where they are enqueued */
+ /* single contiguous array of objects */
+ void *fl_objs;
+ /* the number of them */
+ int fl_nobjs;
+ /* the size (including overhead) of each of them */
+ int fl_objsize;
+ /* where they are enqueued */
+ struct list_head fl_list;
} lnet_freelist_t;
typedef struct
{
- cfs_list_t fo_list; /* enqueue on fl_list */
- void *fo_contents; /* aligned contents */
+ struct list_head fo_list; /* enqueue on fl_list */
+ void *fo_contents; /* aligned contents */
} lnet_freeobj_t;
#endif
typedef struct {
- /* info about peers we are trying to fail */
- cfs_list_t tp_list; /* ln_test_peers */
- lnet_nid_t tp_nid; /* matching nid */
- unsigned int tp_threshold; /* # failures to simulate */
+ /* info about peers we are trying to fail */
+ struct list_head tp_list; /* ln_test_peers */
+ lnet_nid_t tp_nid; /* matching nid */
+ unsigned int tp_threshold; /* # failures to simulate */
} lnet_test_peer_t;
#define LNET_COOKIE_TYPE_MD 1
typedef struct lnet_lnd
{
- /* fields managed by portals */
- cfs_list_t lnd_list; /* stash in the LND table */
- int lnd_refcount; /* # active instances */
+ /* fields managed by portals */
+ struct list_head lnd_list; /* stash in the LND table */
+ int lnd_refcount; /* # active instances */
/* fields initialised by the LND */
unsigned int lnd_type;
int tq_credits; /* # tx credits free */
int tq_credits_min; /* lowest it's been */
int tq_credits_max; /* total # tx credits */
- cfs_list_t tq_delayed; /* delayed TXs */
+ struct list_head tq_delayed; /* delayed TXs */
};
#define LNET_MAX_INTERFACES 16
pthread_mutex_t ni_lock;
# endif
#endif
- cfs_list_t ni_list; /* chain on ln_nis */
- cfs_list_t ni_cptlist; /* chain on ln_nis_cpt */
+ struct list_head ni_list; /* chain on ln_nis */
+ struct list_head ni_cptlist; /* chain on ln_nis_cpt */
int ni_maxtxcredits; /* # tx credits */
/* # per-peer send credits */
int ni_peertxcredits;
#define LNET_PINGINFO_SIZE offsetof(lnet_ping_info_t, pi_ni[LNET_MAX_RTR_NIS])
typedef struct {
/* chain on the_lnet.ln_zombie_rcd or ln_deathrow_rcd */
- cfs_list_t rcd_list;
+ struct list_head rcd_list;
lnet_handle_md_t rcd_mdh; /* ping buffer MD */
struct lnet_peer *rcd_gateway; /* reference to gateway */
lnet_ping_info_t *rcd_pinginfo; /* ping buffer */
} lnet_rc_data_t;
typedef struct lnet_peer {
- cfs_list_t lp_hashlist; /* chain on peer hash */
- cfs_list_t lp_txq; /* messages blocking for tx credits */
- cfs_list_t lp_rtrq; /* messages blocking for router credits */
- cfs_list_t lp_rtr_list; /* chain on router list */
- int lp_txcredits; /* # tx credits available */
- int lp_mintxcredits; /* low water mark */
- int lp_rtrcredits; /* # router credits */
- int lp_minrtrcredits; /* low water mark */
- unsigned int lp_alive:1; /* alive/dead? */
- unsigned int lp_notify:1; /* notification outstanding? */
- unsigned int lp_notifylnd:1; /* outstanding notification for LND? */
- unsigned int lp_notifying:1; /* some thread is handling notification */
- unsigned int lp_ping_notsent; /* SEND event outstanding from ping */
- int lp_alive_count; /* # times router went dead<->alive */
- long lp_txqnob; /* bytes queued for sending */
- cfs_time_t lp_timestamp; /* time of last aliveness news */
- cfs_time_t lp_ping_timestamp; /* time of last ping attempt */
- cfs_time_t lp_ping_deadline; /* != 0 if ping reply expected */
- cfs_time_t lp_last_alive; /* when I was last alive */
- cfs_time_t lp_last_query; /* when lp_ni was queried last time */
- lnet_ni_t *lp_ni; /* interface peer is on */
- lnet_nid_t lp_nid; /* peer's NID */
- int lp_refcount; /* # refs */
+ /* chain on peer hash */
+ struct list_head lp_hashlist;
+ /* messages blocking for tx credits */
+ struct list_head lp_txq;
+ /* messages blocking for router credits */
+ struct list_head lp_rtrq;
+ /* chain on router list */
+ struct list_head lp_rtr_list;
+ /* # tx credits available */
+ int lp_txcredits;
+ /* low water mark */
+ int lp_mintxcredits;
+ /* # router credits */
+ int lp_rtrcredits;
+ /* low water mark */
+ int lp_minrtrcredits;
+ /* alive/dead? */
+ unsigned int lp_alive:1;
+ /* notification outstanding? */
+ unsigned int lp_notify:1;
+ /* outstanding notification for LND? */
+ unsigned int lp_notifylnd:1;
+ /* some thread is handling notification */
+ unsigned int lp_notifying:1;
+ /* SEND event outstanding from ping */
+ unsigned int lp_ping_notsent;
+ /* # times router went dead<->alive */
+ int lp_alive_count;
+ /* bytes queued for sending */
+ long lp_txqnob;
+ /* time of last aliveness news */
+ cfs_time_t lp_timestamp;
+ /* time of last ping attempt */
+ cfs_time_t lp_ping_timestamp;
+ /* != 0 if ping reply expected */
+ cfs_time_t lp_ping_deadline;
+ /* when I was last alive */
+ cfs_time_t lp_last_alive;
+ /* when lp_ni was queried last time */
+ cfs_time_t lp_last_query;
+ /* interface peer is on */
+ lnet_ni_t *lp_ni;
+ lnet_nid_t lp_nid; /* peer's NID */
+ int lp_refcount; /* # refs */
int lp_cpt; /* CPT this peer attached on */
/* # refs from lnet_route_t::lr_gateway */
int lp_rtr_refcount;
/* returned RC ping features */
unsigned int lp_ping_feats;
- cfs_list_t lp_routes; /* routers on this peer */
+ struct list_head lp_routes; /* routers on this peer */
lnet_rc_data_t *lp_rcd; /* router checker state */
} lnet_peer_t;
struct lnet_peer_table {
int pt_version; /* /proc validity stamp */
int pt_number; /* # peers extant */
- cfs_list_t pt_deathrow; /* zombie peers */
- cfs_list_t *pt_hash; /* NID->peer hash */
+ struct list_head pt_deathrow; /* zombie peers */
+ struct list_head *pt_hash; /* NID->peer hash */
};
/* peer aliveness is enabled only on routers for peers in a network where the
(lp)->lp_ni->ni_peertimeout > 0)
typedef struct {
- cfs_list_t lr_list; /* chain on net */
- cfs_list_t lr_gwlist; /* chain on gateway */
+ struct list_head lr_list; /* chain on net */
+ struct list_head lr_gwlist; /* chain on gateway */
lnet_peer_t *lr_gateway; /* router node */
__u32 lr_net; /* remote network number */
int lr_seq; /* sequence for round-robin */
#define LNET_REMOTE_NETS_HASH_SIZE (1 << the_lnet.ln_remote_nets_hbits)
typedef struct {
- cfs_list_t lrn_list; /* chain on ln_remote_nets_hash */
- cfs_list_t lrn_routes; /* routes to me */
- __u32 lrn_net; /* my net number */
+ /* chain on ln_remote_nets_hash */
+ struct list_head lrn_list;
+ /* routes to me */
+ struct list_head lrn_routes;
+ /* my net number */
+ __u32 lrn_net;
} lnet_remotenet_t;
typedef struct {
- cfs_list_t rbp_bufs; /* my free buffer pool */
- cfs_list_t rbp_msgs; /* messages blocking for a buffer */
- int rbp_npages; /* # pages in each buffer */
- int rbp_nbuffers; /* # buffers */
- int rbp_credits; /* # free buffers / blocked messages */
- int rbp_mincredits; /* low water mark */
+ /* my free buffer pool */
+ struct list_head rbp_bufs;
+ /* messages blocking for a buffer */
+ struct list_head rbp_msgs;
+ /* # pages in each buffer */
+ int rbp_npages;
+ /* # buffers */
+ int rbp_nbuffers;
+ /* # free buffers / blocked messages */
+ int rbp_credits;
+ /* low water mark */
+ int rbp_mincredits;
} lnet_rtrbufpool_t;
typedef struct {
- cfs_list_t rb_list; /* chain on rbp_bufs */
- lnet_rtrbufpool_t *rb_pool; /* owning pool */
- lnet_kiov_t rb_kiov[0]; /* the buffer space */
+ struct list_head rb_list; /* chain on rbp_bufs */
+ lnet_rtrbufpool_t *rb_pool; /* owning pool */
+ lnet_kiov_t rb_kiov[0]; /* the buffer space */
} lnet_rtrbuf_t;
typedef struct {
unsigned int mt_enabled;
/* bitmap to flag whether MEs on mt_hash are exhausted or not */
__u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
- cfs_list_t *mt_mhash; /* matching hash */
+ struct list_head *mt_mhash; /* matching hash */
};
/* these are only useful for wildcard portal */
/* flags on this portal: lazy, unique... */
unsigned int ptl_options;
/* list of messags which are stealing buffer */
- cfs_list_t ptl_msg_stealing;
+ struct list_head ptl_msg_stealing;
/* messages blocking for MD */
- cfs_list_t ptl_msg_delayed;
+ struct list_head ptl_msg_delayed;
/* Match table for each CPT */
struct lnet_match_table **ptl_mtables;
/* spread rotor of incoming "PUT" */
struct lnet_res_container {
unsigned int rec_type; /* container type */
__u64 rec_lh_cookie; /* cookie generator */
- cfs_list_t rec_active; /* active resource list */
- cfs_list_t *rec_lh_hash; /* handle hash */
+ struct list_head rec_active; /* active resource list */
+ struct list_head *rec_lh_hash; /* handle hash */
#ifdef LNET_USE_LIB_FREELIST
lnet_freelist_t rec_freelist; /* freelist for resources */
#endif
/* max # threads finalizing */
int msc_nfinalizers;
/* msgs waiting to complete finalizing */
- cfs_list_t msc_finalizing;
- cfs_list_t msc_active; /* active message list */
+ struct list_head msc_finalizing;
+ struct list_head msc_active; /* active message list */
/* threads doing finalization */
void **msc_finalizers;
#ifdef LNET_USE_LIB_FREELIST
/* Event Queue container */
struct lnet_res_container ln_eq_container;
#ifdef __KERNEL__
- wait_queue_head_t ln_eq_waitq;
+ wait_queue_head_t ln_eq_waitq;
spinlock_t ln_eq_wait_lock;
#else
# ifndef HAVE_LIBPTHREAD
lnet_counters_t **ln_counters;
struct lnet_peer_table **ln_peer_tables;
/* failure simulation */
- cfs_list_t ln_test_peers;
+ struct list_head ln_test_peers;
- cfs_list_t ln_nis; /* LND instances */
+ struct list_head ln_nis; /* LND instances */
/* NIs bond on specific CPT(s) */
- cfs_list_t ln_nis_cpt;
+ struct list_head ln_nis_cpt;
/* dying LND instances */
- cfs_list_t ln_nis_zombie;
+ struct list_head ln_nis_zombie;
lnet_ni_t *ln_loni; /* the loopback NI */
/* NI to wait for events in */
lnet_ni_t *ln_eq_waitni;
/* remote networks with routes to them */
- cfs_list_t *ln_remote_nets_hash;
+ struct list_head *ln_remote_nets_hash;
/* validity stamp */
__u64 ln_remote_nets_version;
/* list of all known routers */
- cfs_list_t ln_routers;
+ struct list_head ln_routers;
/* validity stamp */
__u64 ln_routers_version;
/* percpt router buffer pools */
/* router checker's event queue */
lnet_handle_eq_t ln_rc_eqh;
/* rcd still pending on net */
- cfs_list_t ln_rcd_deathrow;
+ struct list_head ln_rcd_deathrow;
/* rcd ready for free */
- cfs_list_t ln_rcd_zombie;
+ struct list_head ln_rcd_zombie;
#ifdef __KERNEL__
/* serialise startup/shutdown */
struct semaphore ln_rc_signal;
/* uniquely identifies this ni in this epoch */
__u64 ln_interface_cookie;
/* registered LNDs */
- cfs_list_t ln_lnds;
+ struct list_head ln_lnds;
/* space for network names */
char *ln_network_tokens;
*** for list_batch command */
typedef struct {
- cfs_list_t rpe_link; /* link chain */
- lnet_process_id_t rpe_peer; /* peer's id */
- struct timeval rpe_stamp; /* time stamp of RPC */
- int rpe_state; /* peer's state */
- int rpe_rpc_errno; /* RPC errno */
+ struct list_head rpe_link; /* link chain */
+ lnet_process_id_t rpe_peer; /* peer's id */
+ struct timeval rpe_stamp; /* time stamp of RPC */
+ int rpe_state; /* peer's state */
+ int rpe_rpc_errno; /* RPC errno */
lst_sid_t rpe_sid; /* peer's session id */
int rpe_fwk_errno; /* framework errno */
char *lstio_dbg_namep; /* IN: name of group|batch */
int lstio_dbg_count; /* IN: # of test nodes to debug */
lnet_process_id_t *lstio_dbg_idsp; /* IN: id of test nodes */
- cfs_list_t *lstio_dbg_resultp; /* OUT: list head of result buffer */
+ /* OUT: list head of result buffer */
+ struct list_head *lstio_dbg_resultp;
} lstio_debug_args_t;
typedef struct {
#define LST_GROUP_RMND 3 /* delete nodes from the group */
typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_opc; /* IN: OPC */
- int lstio_grp_args; /* IN: arguments */
- int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
- int lstio_grp_count; /* IN: # of nodes id */
- lnet_process_id_t *lstio_grp_idsp; /* IN: array of nodes */
- cfs_list_t *lstio_grp_resultp; /* OUT: list head of result buffer */
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_opc; /* IN: OPC */
+ int lstio_grp_args; /* IN: arguments */
+ int lstio_grp_nmlen; /* IN: name length */
+ char *lstio_grp_namep; /* IN: group name */
+ int lstio_grp_count; /* IN: # of nodes id */
+ lnet_process_id_t *lstio_grp_idsp; /* IN: array of nodes */
+ /* OUT: list head of result buffer */
+ struct list_head *lstio_grp_resultp;
} lstio_group_update_args_t;
typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
- int lstio_grp_count; /* IN: # of nodes */
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_nmlen; /* IN: name length */
+ char *lstio_grp_namep; /* IN: group name */
+ int lstio_grp_count; /* IN: # of nodes */
/** OUT: session features */
- unsigned *lstio_grp_featp;
- lnet_process_id_t *lstio_grp_idsp; /* IN: nodes */
- cfs_list_t *lstio_grp_resultp; /* OUT: list head of result buffer */
+ unsigned *lstio_grp_featp;
+ lnet_process_id_t *lstio_grp_idsp; /* IN: nodes */
+ /* OUT: list head of result buffer */
+ struct list_head *lstio_grp_resultp;
} lstio_group_nodes_args_t;
typedef struct {
} lstio_batch_del_args_t;
typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_timeout; /* IN: timeout for the batch */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
- cfs_list_t *lstio_bat_resultp; /* OUT: list head of result buffer */
+ /* IN: session key */
+ int lstio_bat_key;
+ /* IN: timeout for the batch */
+ int lstio_bat_timeout;
+ /* IN: name length */
+ int lstio_bat_nmlen;
+ /* IN: batch name */
+ char *lstio_bat_namep;
+ /* OUT: list head of result buffer */
+ struct list_head *lstio_bat_resultp;
} lstio_batch_run_args_t;
typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_force; /* IN: abort unfinished test RPC */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
- cfs_list_t *lstio_bat_resultp; /* OUT: list head of result buffer */
+ /* IN: session key */
+ int lstio_bat_key;
+ /* IN: abort unfinished test RPC */
+ int lstio_bat_force;
+ /* IN: name length */
+ int lstio_bat_nmlen;
+ /* IN: batch name */
+ char *lstio_bat_namep;
+ /* OUT: list head of result buffer */
+ struct list_head *lstio_bat_resultp;
} lstio_batch_stop_args_t;
typedef struct {
- int lstio_bat_key; /* IN: session key */
- int lstio_bat_testidx; /* IN: test index */
- int lstio_bat_client; /* IN: is test client? */
- int lstio_bat_timeout; /* IN: timeout for waiting */
- int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
- cfs_list_t *lstio_bat_resultp; /* OUT: list head of result buffer */
+ /* IN: session key */
+ int lstio_bat_key;
+ /* IN: test index */
+ int lstio_bat_testidx;
+ /* IN: is test client? */
+ int lstio_bat_client;
+ /* IN: timeout for waiting */
+ int lstio_bat_timeout;
+ /* IN: name length */
+ int lstio_bat_nmlen;
+ /* IN: batch name */
+ char *lstio_bat_namep;
+ /* OUT: list head of result buffer */
+ struct list_head *lstio_bat_resultp;
} lstio_batch_query_args_t;
typedef struct {
/* add stat in session */
typedef struct {
- int lstio_sta_key; /* IN: session key */
- int lstio_sta_timeout; /* IN: timeout for stat requst */
- int lstio_sta_nmlen; /* IN: group name length */
- char *lstio_sta_namep; /* IN: group name */
- int lstio_sta_count; /* IN: # of pid */
- lnet_process_id_t *lstio_sta_idsp; /* IN: pid */
- cfs_list_t *lstio_sta_resultp; /* OUT: list head of result buffer */
+ /* IN: session key */
+ int lstio_sta_key;
+ /* IN: timeout for stat requst */
+ int lstio_sta_timeout;
+ /* IN: group name length */
+ int lstio_sta_nmlen;
+ /* IN: group name */
+ char *lstio_sta_namep;
+ /* IN: # of pid */
+ int lstio_sta_count;
+ /* IN: pid */
+ lnet_process_id_t *lstio_sta_idsp;
+ /* OUT: list head of result buffer */
+ struct list_head *lstio_sta_resultp;
} lstio_stat_args_t;
typedef enum {
int lstio_tes_dgrp_nmlen; /* IN: destination group name length */
char *lstio_tes_dgrp_name; /* IN: group name */
- int lstio_tes_param_len; /* IN: param buffer len */
- void *lstio_tes_param; /* IN: parameter for specified test:
- lstio_bulk_param_t,
- lstio_ping_param_t,
- ... more */
- int *lstio_tes_retp; /* OUT: private returned value */
- cfs_list_t *lstio_tes_resultp; /* OUT: list head of result buffer */
+ /* IN: param buffer len */
+ int lstio_tes_param_len;
+ /* IN: parameter for specified test:
+ lstio_bulk_param_t,
+ lstio_ping_param_t,
+ ... more */
+ void *lstio_tes_param;
+ /* OUT: private returned value */
+ int *lstio_tes_retp;
+ /* OUT: list head of result buffer */
+ struct list_head *lstio_tes_resultp;
} lstio_test_args_t;
typedef enum {
return -ENOMEM;
}
- memset(peer, 0, sizeof(*peer)); /* zero flags etc */
+ memset(peer, 0, sizeof(*peer)); /* zero flags etc */
- peer->ibp_ni = ni;
- peer->ibp_nid = nid;
- peer->ibp_error = 0;
- peer->ibp_last_alive = 0;
- atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
+ peer->ibp_ni = ni;
+ peer->ibp_nid = nid;
+ peer->ibp_error = 0;
+ peer->ibp_last_alive = 0;
+ atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
- CFS_INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
- CFS_INIT_LIST_HEAD(&peer->ibp_conns);
- CFS_INIT_LIST_HEAD(&peer->ibp_tx_queue);
+ INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
+ INIT_LIST_HEAD(&peer->ibp_conns);
+ INIT_LIST_HEAD(&peer->ibp_tx_queue);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- /* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT (net->ibn_shutdown == 0);
+ /* always called with a ref on ni, which prevents ni being shutdown */
+ LASSERT(net->ibn_shutdown == 0);
- /* npeers only grows with the global lock held */
+ /* npeers only grows with the global lock held */
atomic_inc(&net->ibn_npeers);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- *peerp = peer;
- return 0;
+ *peerp = peer;
+ return 0;
}
void
kiblnd_destroy_peer (kib_peer_t *peer)
{
- kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_net_t *net = peer->ibp_ni->ni_data;
- LASSERT (net != NULL);
+ LASSERT(net != NULL);
LASSERT (atomic_read(&peer->ibp_refcount) == 0);
- LASSERT (!kiblnd_peer_active(peer));
- LASSERT (peer->ibp_connecting == 0);
- LASSERT (peer->ibp_accepting == 0);
- LASSERT (cfs_list_empty(&peer->ibp_conns));
- LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
-
- LIBCFS_FREE(peer, sizeof(*peer));
-
- /* NB a peer's connections keep a reference on their peer until
- * they are destroyed, so we can be assured that _all_ state to do
- * with this peer has been cleaned up when its refcount drops to
- * zero. */
+ LASSERT(!kiblnd_peer_active(peer));
+ LASSERT(peer->ibp_connecting == 0);
+ LASSERT(peer->ibp_accepting == 0);
+ LASSERT(list_empty(&peer->ibp_conns));
+ LASSERT(list_empty(&peer->ibp_tx_queue));
+
+ LIBCFS_FREE(peer, sizeof(*peer));
+
+ /* NB a peer's connections keep a reference on their peer until
+ * they are destroyed, so we can be assured that _all_ state to do
+ * with this peer has been cleaned up when its refcount drops to
+ * zero. */
atomic_dec(&net->ibn_npeers);
}
kib_peer_t *
kiblnd_find_peer_locked (lnet_nid_t nid)
{
- /* the caller is responsible for accounting the additional reference
- * that this creates */
- cfs_list_t *peer_list = kiblnd_nid2peerlist(nid);
- cfs_list_t *tmp;
- kib_peer_t *peer;
+ /* the caller is responsible for accounting the additional reference
+ * that this creates */
+ struct list_head *peer_list = kiblnd_nid2peerlist(nid);
+ struct list_head *tmp;
+ kib_peer_t *peer;
- cfs_list_for_each (tmp, peer_list) {
+ list_for_each(tmp, peer_list) {
- peer = cfs_list_entry(tmp, kib_peer_t, ibp_list);
+ peer = list_entry(tmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns)); /* active conn */
+ LASSERT(peer->ibp_connecting > 0 || /* creating conns */
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns)); /* active conn */
- if (peer->ibp_nid != nid)
- continue;
+ if (peer->ibp_nid != nid)
+ continue;
- CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
- peer, libcfs_nid2str(nid),
+ CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
+ peer, libcfs_nid2str(nid),
atomic_read(&peer->ibp_refcount),
- peer->ibp_version);
- return peer;
- }
- return NULL;
+ peer->ibp_version);
+ return peer;
+ }
+ return NULL;
}
void
kiblnd_unlink_peer_locked (kib_peer_t *peer)
{
- LASSERT (cfs_list_empty(&peer->ibp_conns));
+ LASSERT(list_empty(&peer->ibp_conns));
LASSERT (kiblnd_peer_active(peer));
- cfs_list_del_init(&peer->ibp_list);
+ list_del_init(&peer->ibp_list);
/* lose peerlist's ref */
kiblnd_peer_decref(peer);
}
kiblnd_get_peer_info (lnet_ni_t *ni, int index,
lnet_nid_t *nidp, int *count)
{
- kib_peer_t *peer;
- cfs_list_t *ptmp;
- int i;
- unsigned long flags;
+ kib_peer_t *peer;
+ struct list_head *ptmp;
+ int i;
+ unsigned long flags;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+ list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns));
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT(peer->ibp_connecting > 0 ||
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns));
- if (peer->ibp_ni != ni)
- continue;
+ if (peer->ibp_ni != ni)
+ continue;
- if (index-- > 0)
- continue;
+ if (index-- > 0)
+ continue;
- *nidp = peer->ibp_nid;
+ *nidp = peer->ibp_nid;
*count = atomic_read(&peer->ibp_refcount);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- return -ENOENT;
+ return -ENOENT;
}
void
kiblnd_del_peer_locked (kib_peer_t *peer)
{
- cfs_list_t *ctmp;
- cfs_list_t *cnxt;
- kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ kib_conn_t *conn;
- if (cfs_list_empty(&peer->ibp_conns)) {
- kiblnd_unlink_peer_locked(peer);
- } else {
- cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
+ if (list_empty(&peer->ibp_conns)) {
+ kiblnd_unlink_peer_locked(peer);
+ } else {
+ list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
- kiblnd_close_conn_locked(conn, 0);
- }
- /* NB closing peer's last conn unlinked it. */
- }
- /* NB peer now unlinked; might even be freed if the peer table had the
- * last ref on it. */
+ kiblnd_close_conn_locked(conn, 0);
+ }
+ /* NB closing peer's last conn unlinked it. */
+ }
+ /* NB peer now unlinked; might even be freed if the peer table had the
+ * last ref on it. */
}
int
kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
{
- CFS_LIST_HEAD (zombies);
- cfs_list_t *ptmp;
- cfs_list_t *pnxt;
- kib_peer_t *peer;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int rc = -ENOENT;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ kib_peer_t *peer;
+ int lo;
+ int hi;
+ int i;
+ unsigned long flags;
+ int rc = -ENOENT;
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
hi = kiblnd_data.kib_peer_hash_size - 1;
}
- for (i = lo; i <= hi; i++) {
- cfs_list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns));
+ for (i = lo; i <= hi; i++) {
+ list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT(peer->ibp_connecting > 0 ||
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns));
- if (peer->ibp_ni != ni)
- continue;
+ if (peer->ibp_ni != ni)
+ continue;
- if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
- continue;
+ if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
+ continue;
- if (!cfs_list_empty(&peer->ibp_tx_queue)) {
- LASSERT (cfs_list_empty(&peer->ibp_conns));
+ if (!list_empty(&peer->ibp_tx_queue)) {
+ LASSERT(list_empty(&peer->ibp_conns));
- cfs_list_splice_init(&peer->ibp_tx_queue,
- &zombies);
- }
+ list_splice_init(&peer->ibp_tx_queue,
+ &zombies);
+ }
- kiblnd_del_peer_locked(peer);
- rc = 0; /* matched something */
- }
- }
+ kiblnd_del_peer_locked(peer);
+ rc = 0; /* matched something */
+ }
+ }
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_txlist_done(ni, &zombies, -EIO);
+ kiblnd_txlist_done(ni, &zombies, -EIO);
- return rc;
+ return rc;
}
kib_conn_t *
-kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
+kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
{
- kib_peer_t *peer;
- cfs_list_t *ptmp;
- kib_conn_t *conn;
- cfs_list_t *ctmp;
- int i;
- unsigned long flags;
+ kib_peer_t *peer;
+ struct list_head *ptmp;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ int i;
+ unsigned long flags;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+ for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
+ list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns));
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT(peer->ibp_connecting > 0 ||
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns));
- if (peer->ibp_ni != ni)
- continue;
+ if (peer->ibp_ni != ni)
+ continue;
- cfs_list_for_each (ctmp, &peer->ibp_conns) {
- if (index-- > 0)
- continue;
+ list_for_each(ctmp, &peer->ibp_conns) {
+ if (index-- > 0)
+ continue;
- conn = cfs_list_entry(ctmp, kib_conn_t,
- ibc_list);
- kiblnd_conn_addref(conn);
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ kiblnd_conn_addref(conn);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
return conn;
void
kiblnd_debug_conn (kib_conn_t *conn)
{
- cfs_list_t *tmp;
- int i;
+ struct list_head *tmp;
+ int i;
spin_lock(&conn->ibc_lock);
- CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
+ CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s:\n",
atomic_read(&conn->ibc_refcount), conn,
- conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
- conn->ibc_state, conn->ibc_noops_posted,
- conn->ibc_nsends_posted, conn->ibc_credits,
- conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
- CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
+ conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d "
+ " r_cred %d\n", conn->ibc_state, conn->ibc_noops_posted,
+ conn->ibc_nsends_posted, conn->ibc_credits,
+ conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
+ CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
- CDEBUG(D_CONSOLE, " early_rxs:\n");
- cfs_list_for_each(tmp, &conn->ibc_early_rxs)
- kiblnd_debug_rx(cfs_list_entry(tmp, kib_rx_t, rx_list));
+ CDEBUG(D_CONSOLE, " early_rxs:\n");
+ list_for_each(tmp, &conn->ibc_early_rxs)
+ kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
- CDEBUG(D_CONSOLE, " tx_noops:\n");
- cfs_list_for_each(tmp, &conn->ibc_tx_noops)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " tx_noops:\n");
+ list_for_each(tmp, &conn->ibc_tx_noops)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
- cfs_list_for_each(tmp, &conn->ibc_tx_queue_nocred)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
+ list_for_each(tmp, &conn->ibc_tx_queue_nocred)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
- cfs_list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
+ list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " tx_queue:\n");
- cfs_list_for_each(tmp, &conn->ibc_tx_queue)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " tx_queue:\n");
+ list_for_each(tmp, &conn->ibc_tx_queue)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " active_txs:\n");
- cfs_list_for_each(tmp, &conn->ibc_active_txs)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " active_txs:\n");
+ list_for_each(tmp, &conn->ibc_active_txs)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " rxs:\n");
- for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
- kiblnd_debug_rx(&conn->ibc_rxs[i]);
+ CDEBUG(D_CONSOLE, " rxs:\n");
+ for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
+ kiblnd_debug_rx(&conn->ibc_rxs[i]);
spin_unlock(&conn->ibc_lock);
}
kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
int state, int version)
{
- /* CAVEAT EMPTOR:
- * If the new conn is created successfully it takes over the caller's
- * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
- * is destroyed. On failure, the caller's ref on 'peer' remains and
- * she must dispose of 'cmid'. (Actually I'd block forever if I tried
- * to destroy 'cmid' here since I'm called from the CM which still has
- * its ref on 'cmid'). */
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_net_t *net = peer->ibp_ni->ni_data;
+ /* CAVEAT EMPTOR:
+ * If the new conn is created successfully it takes over the caller's
+ * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
+ * is destroyed. On failure, the caller's ref on 'peer' remains and
+ * she must dispose of 'cmid'. (Actually I'd block forever if I tried
+ * to destroy 'cmid' here since I'm called from the CM which still has
+ * its ref on 'cmid'). */
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_net_t *net = peer->ibp_ni->ni_data;
kib_dev_t *dev;
- struct ib_qp_init_attr *init_qp_attr;
+ struct ib_qp_init_attr *init_qp_attr;
struct kib_sched_info *sched;
kib_conn_t *conn;
struct ib_cq *cq;
}
LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
- if (conn == NULL) {
- CERROR("Can't allocate connection for %s\n",
- libcfs_nid2str(peer->ibp_nid));
- goto failed_1;
- }
-
- conn->ibc_state = IBLND_CONN_INIT;
- conn->ibc_version = version;
- conn->ibc_peer = peer; /* I take the caller's ref */
- cmid->context = conn; /* for future CM callbacks */
- conn->ibc_cmid = cmid;
-
- CFS_INIT_LIST_HEAD(&conn->ibc_early_rxs);
- CFS_INIT_LIST_HEAD(&conn->ibc_tx_noops);
- CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue);
- CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
- CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
- CFS_INIT_LIST_HEAD(&conn->ibc_active_txs);
+ if (conn == NULL) {
+ CERROR("Can't allocate connection for %s\n",
+ libcfs_nid2str(peer->ibp_nid));
+ goto failed_1;
+ }
+
+ conn->ibc_state = IBLND_CONN_INIT;
+ conn->ibc_version = version;
+ conn->ibc_peer = peer; /* I take the caller's ref */
+ cmid->context = conn; /* for future CM callbacks */
+ conn->ibc_cmid = cmid;
+
+ INIT_LIST_HEAD(&conn->ibc_early_rxs);
+ INIT_LIST_HEAD(&conn->ibc_tx_noops);
+ INIT_LIST_HEAD(&conn->ibc_tx_queue);
+ INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
+ INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
+ INIT_LIST_HEAD(&conn->ibc_active_txs);
spin_lock_init(&conn->ibc_lock);
LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
/* wakeup failover thread and teardown connection */
if (kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
+ list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
wake_up(&kiblnd_data.kib_failover_waitq);
}
LASSERT (!in_interrupt());
LASSERT (atomic_read(&conn->ibc_refcount) == 0);
- LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
- LASSERT (cfs_list_empty(&conn->ibc_tx_noops));
- LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
- LASSERT (cfs_list_empty(&conn->ibc_tx_queue_rsrvd));
- LASSERT (cfs_list_empty(&conn->ibc_tx_queue_nocred));
- LASSERT (cfs_list_empty(&conn->ibc_active_txs));
+ LASSERT(list_empty(&conn->ibc_early_rxs));
+ LASSERT(list_empty(&conn->ibc_tx_noops));
+ LASSERT(list_empty(&conn->ibc_tx_queue));
+ LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
+ LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
+ LASSERT(list_empty(&conn->ibc_active_txs));
LASSERT (conn->ibc_noops_posted == 0);
LASSERT (conn->ibc_nsends_posted == 0);
}
int
-kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
+kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
{
- kib_conn_t *conn;
- cfs_list_t *ctmp;
- cfs_list_t *cnxt;
- int count = 0;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int count = 0;
- cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
+ list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
- CDEBUG(D_NET, "Closing conn -> %s, "
- "version: %x, reason: %d\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_version, why);
+ CDEBUG(D_NET, "Closing conn -> %s, "
+ "version: %x, reason: %d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ conn->ibc_version, why);
- kiblnd_close_conn_locked(conn, why);
- count++;
- }
+ kiblnd_close_conn_locked(conn, why);
+ count++;
+ }
- return count;
+ return count;
}
int
-kiblnd_close_stale_conns_locked (kib_peer_t *peer,
- int version, __u64 incarnation)
+kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+ int version, __u64 incarnation)
{
- kib_conn_t *conn;
- cfs_list_t *ctmp;
- cfs_list_t *cnxt;
- int count = 0;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int count = 0;
- cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
+ list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
- if (conn->ibc_version == version &&
- conn->ibc_incarnation == incarnation)
- continue;
+ if (conn->ibc_version == version &&
+ conn->ibc_incarnation == incarnation)
+ continue;
- CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
- "incarnation:"LPX64"(%x, "LPX64")\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_version, conn->ibc_incarnation,
- version, incarnation);
+ CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
+ "incarnation:"LPX64"(%x, "LPX64")\n",
+ libcfs_nid2str(peer->ibp_nid),
+ conn->ibc_version, conn->ibc_incarnation,
+ version, incarnation);
- kiblnd_close_conn_locked(conn, -ESTALE);
- count++;
- }
+ kiblnd_close_conn_locked(conn, -ESTALE);
+ count++;
+ }
- return count;
+ return count;
}
int
-kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
+kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
{
- kib_peer_t *peer;
- cfs_list_t *ptmp;
- cfs_list_t *pnxt;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int count = 0;
+ kib_peer_t *peer;
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ int lo;
+ int hi;
+ int i;
+ unsigned long flags;
+ int count = 0;
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (nid != LNET_NID_ANY)
- lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- else {
- lo = 0;
- hi = kiblnd_data.kib_peer_hash_size - 1;
- }
+ if (nid != LNET_NID_ANY)
+ lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ else {
+ lo = 0;
+ hi = kiblnd_data.kib_peer_hash_size - 1;
+ }
- for (i = lo; i <= hi; i++) {
- cfs_list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+ for (i = lo; i <= hi; i++) {
+ list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns));
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT(peer->ibp_connecting > 0 ||
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns));
- if (peer->ibp_ni != ni)
- continue;
+ if (peer->ibp_ni != ni)
+ continue;
- if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
- continue;
+ if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
+ continue;
- count += kiblnd_close_peer_conns_locked(peer, 0);
- }
- }
+ count += kiblnd_close_peer_conns_locked(peer, 0);
+ }
+ }
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- /* wildcards always succeed */
- if (nid == LNET_NID_ANY)
- return 0;
+ /* wildcards always succeed */
+ if (nid == LNET_NID_ANY)
+ return 0;
- return (count == 0) ? -ENOENT : 0;
+ return (count == 0) ? -ENOENT : 0;
}
int
}
void
-kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
+kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
{
cfs_time_t last_alive = 0;
cfs_time_t now = cfs_time_current();
read_lock_irqsave(glock, flags);
- peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL) {
- LASSERT (peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns)); /* active conn */
- last_alive = peer->ibp_last_alive;
- }
+ peer = kiblnd_find_peer_locked(nid);
+ if (peer != NULL) {
+ LASSERT(peer->ibp_connecting > 0 || /* creating conns */
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns)); /* active conn */
+ last_alive = peer->ibp_last_alive;
+ }
read_unlock_irqrestore(glock, flags);
- if (last_alive != 0)
- *when = last_alive;
+ if (last_alive != 0)
+ *when = last_alive;
- /* peer is not persistent in hash, trigger peer creation
- * and connection establishment with a NULL tx */
- if (peer == NULL)
- kiblnd_launch_tx(ni, NULL, nid);
+ /* peer is not persistent in hash, trigger peer creation
+ * and connection establishment with a NULL tx */
+ if (peer == NULL)
+ kiblnd_launch_tx(ni, NULL, nid);
- CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
- libcfs_nid2str(nid), peer,
- last_alive ? cfs_duration_sec(now - last_alive) : -1);
- return;
+ CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
+ libcfs_nid2str(nid), peer,
+ last_alive ? cfs_duration_sec(now - last_alive) : -1);
+ return;
}
void
tpo->tpo_hdev = kiblnd_current_hdev(dev);
- for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
- page = txpgs->ibp_pages[ipage];
- tx = &tpo->tpo_tx_descs[i];
+ for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
+ page = txpgs->ibp_pages[ipage];
+ tx = &tpo->tpo_tx_descs[i];
- tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
- page_offset);
+ tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
+ page_offset);
- tx->tx_msgaddr = kiblnd_dma_map_single(
- tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
- IBLND_MSG_SIZE, DMA_TO_DEVICE);
- LASSERT (!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
- tx->tx_msgaddr));
- KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
+ tx->tx_msgaddr = kiblnd_dma_map_single(tpo->tpo_hdev->ibh_ibdev,
+ tx->tx_msg,
+ IBLND_MSG_SIZE,
+ DMA_TO_DEVICE);
+ LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
+ tx->tx_msgaddr));
+ KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
- cfs_list_add(&tx->tx_list, &pool->po_free_list);
+ list_add(&tx->tx_list, &pool->po_free_list);
- page_offset += IBLND_MSG_SIZE;
- LASSERT (page_offset <= PAGE_SIZE);
+ page_offset += IBLND_MSG_SIZE;
+ LASSERT(page_offset <= PAGE_SIZE);
- if (page_offset == PAGE_SIZE) {
- page_offset = 0;
- ipage++;
- LASSERT (ipage <= txpgs->ibp_npages);
- }
- }
+ if (page_offset == PAGE_SIZE) {
+ page_offset = 0;
+ ipage++;
+ LASSERT(ipage <= txpgs->ibp_npages);
+ }
+ }
}
struct ib_mr *
}
void
-kiblnd_destroy_fmr_pool_list(cfs_list_t *head)
+kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- kib_fmr_pool_t *pool;
+ kib_fmr_pool_t *pool;
- while (!cfs_list_empty(head)) {
- pool = cfs_list_entry(head->next, kib_fmr_pool_t, fpo_list);
- cfs_list_del(&pool->fpo_list);
- kiblnd_destroy_fmr_pool(pool);
- }
+ while (!list_empty(head)) {
+ pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
+ list_del(&pool->fpo_list);
+ kiblnd_destroy_fmr_pool(pool);
+ }
}
static int kiblnd_fmr_pool_size(int ncpts)
}
static void
-kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, cfs_list_t *zombies)
+kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies)
{
if (fps->fps_net == NULL) /* intialized? */
return;
spin_lock(&fps->fps_lock);
- while (!cfs_list_empty(&fps->fps_pool_list)) {
- kib_fmr_pool_t *fpo = cfs_list_entry(fps->fps_pool_list.next,
+ while (!list_empty(&fps->fps_pool_list)) {
+ kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
kib_fmr_pool_t, fpo_list);
- fpo->fpo_failed = 1;
- cfs_list_del(&fpo->fpo_list);
- if (fpo->fpo_map_count == 0)
- cfs_list_add(&fpo->fpo_list, zombies);
- else
- cfs_list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
- }
+ fpo->fpo_failed = 1;
+ list_del(&fpo->fpo_list);
+ if (fpo->fpo_map_count == 0)
+ list_add(&fpo->fpo_list, zombies);
+ else
+ list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
+ }
spin_unlock(&fps->fps_lock);
}
kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net,
int pool_size, int flush_trigger)
{
- kib_fmr_pool_t *fpo;
- int rc;
+ kib_fmr_pool_t *fpo;
+ int rc;
- memset(fps, 0, sizeof(kib_fmr_poolset_t));
+ memset(fps, 0, sizeof(kib_fmr_poolset_t));
- fps->fps_net = net;
+ fps->fps_net = net;
fps->fps_cpt = cpt;
fps->fps_pool_size = pool_size;
fps->fps_flush_trigger = flush_trigger;
spin_lock_init(&fps->fps_lock);
- CFS_INIT_LIST_HEAD(&fps->fps_pool_list);
- CFS_INIT_LIST_HEAD(&fps->fps_failed_pool_list);
+ INIT_LIST_HEAD(&fps->fps_pool_list);
+ INIT_LIST_HEAD(&fps->fps_failed_pool_list);
- rc = kiblnd_create_fmr_pool(fps, &fpo);
- if (rc == 0)
- cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+ rc = kiblnd_create_fmr_pool(fps, &fpo);
+ if (rc == 0)
+ list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
- return rc;
+ return rc;
}
static int
void
kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
- CFS_LIST_HEAD (zombies);
- kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps = fpo->fpo_owner;
- cfs_time_t now = cfs_time_current();
- kib_fmr_pool_t *tmp;
- int rc;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ kib_fmr_pool_t *fpo = fmr->fmr_pool;
+ kib_fmr_poolset_t *fps = fpo->fpo_owner;
+ cfs_time_t now = cfs_time_current();
+ kib_fmr_pool_t *tmp;
+ int rc;
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT (rc == 0);
+ rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
+ LASSERT(rc == 0);
- if (status != 0) {
- rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT (rc == 0);
- }
+ if (status != 0) {
+ rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
+ LASSERT(rc == 0);
+ }
- fmr->fmr_pool = NULL;
- fmr->fmr_pfmr = NULL;
+ fmr->fmr_pool = NULL;
+ fmr->fmr_pfmr = NULL;
spin_lock(&fps->fps_lock);
- fpo->fpo_map_count --; /* decref the pool */
+ fpo->fpo_map_count--; /* decref the pool */
- cfs_list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
- /* the first pool is persistent */
- if (fps->fps_pool_list.next == &fpo->fpo_list)
- continue;
+ list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
+ /* the first pool is persistent */
+ if (fps->fps_pool_list.next == &fpo->fpo_list)
+ continue;
- if (kiblnd_fmr_pool_is_idle(fpo, now)) {
- cfs_list_move(&fpo->fpo_list, &zombies);
- fps->fps_version ++;
- }
- }
+ if (kiblnd_fmr_pool_is_idle(fpo, now)) {
+ list_move(&fpo->fpo_list, &zombies);
+ fps->fps_version++;
+ }
+ }
spin_unlock(&fps->fps_lock);
- if (!cfs_list_empty(&zombies))
- kiblnd_destroy_fmr_pool_list(&zombies);
+ if (!list_empty(&zombies))
+ kiblnd_destroy_fmr_pool_list(&zombies);
}
int
__u64 version;
int rc;
- again:
+again:
spin_lock(&fps->fps_lock);
version = fps->fps_version;
- cfs_list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
+ list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
fpo->fpo_map_count++;
spin_unlock(&fps->fps_lock);
fps->fps_increasing = 0;
if (rc == 0) {
fps->fps_version++;
- cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+ list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
} else {
fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
}
static void
kiblnd_fini_pool(kib_pool_t *pool)
{
- LASSERT (cfs_list_empty(&pool->po_free_list));
- LASSERT (pool->po_allocated == 0);
+ LASSERT(list_empty(&pool->po_free_list));
+ LASSERT(pool->po_allocated == 0);
- CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
+ CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
}
static void
kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
{
- CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
+ CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
- memset(pool, 0, sizeof(kib_pool_t));
- CFS_INIT_LIST_HEAD(&pool->po_free_list);
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- pool->po_owner = ps;
- pool->po_size = size;
+ memset(pool, 0, sizeof(kib_pool_t));
+ INIT_LIST_HEAD(&pool->po_free_list);
+ pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ pool->po_owner = ps;
+ pool->po_size = size;
}
void
-kiblnd_destroy_pool_list(cfs_list_t *head)
+kiblnd_destroy_pool_list(struct list_head *head)
{
- kib_pool_t *pool;
+ kib_pool_t *pool;
- while (!cfs_list_empty(head)) {
- pool = cfs_list_entry(head->next, kib_pool_t, po_list);
- cfs_list_del(&pool->po_list);
+ while (!list_empty(head)) {
+ pool = list_entry(head->next, kib_pool_t, po_list);
+ list_del(&pool->po_list);
- LASSERT (pool->po_owner != NULL);
- pool->po_owner->ps_pool_destroy(pool);
- }
+ LASSERT(pool->po_owner != NULL);
+ pool->po_owner->ps_pool_destroy(pool);
+ }
}
static void
-kiblnd_fail_poolset(kib_poolset_t *ps, cfs_list_t *zombies)
+kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
{
if (ps->ps_net == NULL) /* intialized? */
return;
spin_lock(&ps->ps_lock);
- while (!cfs_list_empty(&ps->ps_pool_list)) {
- kib_pool_t *po = cfs_list_entry(ps->ps_pool_list.next,
+ while (!list_empty(&ps->ps_pool_list)) {
+ kib_pool_t *po = list_entry(ps->ps_pool_list.next,
kib_pool_t, po_list);
- po->po_failed = 1;
- cfs_list_del(&po->po_list);
- if (po->po_allocated == 0)
- cfs_list_add(&po->po_list, zombies);
- else
- cfs_list_add(&po->po_list, &ps->ps_failed_pool_list);
- }
+ po->po_failed = 1;
+ list_del(&po->po_list);
+ if (po->po_allocated == 0)
+ list_add(&po->po_list, zombies);
+ else
+ list_add(&po->po_list, &ps->ps_failed_pool_list);
+ }
spin_unlock(&ps->ps_lock);
}
>= sizeof(ps->ps_name))
return -E2BIG;
spin_lock_init(&ps->ps_lock);
- CFS_INIT_LIST_HEAD(&ps->ps_pool_list);
- CFS_INIT_LIST_HEAD(&ps->ps_failed_pool_list);
+ INIT_LIST_HEAD(&ps->ps_pool_list);
+ INIT_LIST_HEAD(&ps->ps_failed_pool_list);
- rc = ps->ps_pool_create(ps, size, &pool);
- if (rc == 0)
- cfs_list_add(&pool->po_list, &ps->ps_pool_list);
- else
- CERROR("Failed to create the first pool for %s\n", ps->ps_name);
+ rc = ps->ps_pool_create(ps, size, &pool);
+ if (rc == 0)
+ list_add(&pool->po_list, &ps->ps_pool_list);
+ else
+ CERROR("Failed to create the first pool for %s\n", ps->ps_name);
- return rc;
+ return rc;
}
static int
}
void
-kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node)
+kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
{
- CFS_LIST_HEAD (zombies);
- kib_poolset_t *ps = pool->po_owner;
- kib_pool_t *tmp;
- cfs_time_t now = cfs_time_current();
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ kib_poolset_t *ps = pool->po_owner;
+ kib_pool_t *tmp;
+ cfs_time_t now = cfs_time_current();
spin_lock(&ps->ps_lock);
- if (ps->ps_node_fini != NULL)
- ps->ps_node_fini(pool, node);
+ if (ps->ps_node_fini != NULL)
+ ps->ps_node_fini(pool, node);
- LASSERT (pool->po_allocated > 0);
- cfs_list_add(node, &pool->po_free_list);
- pool->po_allocated --;
+ LASSERT(pool->po_allocated > 0);
+ list_add(node, &pool->po_free_list);
+ pool->po_allocated--;
- cfs_list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
- /* the first pool is persistent */
- if (ps->ps_pool_list.next == &pool->po_list)
- continue;
+ list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
+ /* the first pool is persistent */
+ if (ps->ps_pool_list.next == &pool->po_list)
+ continue;
- if (kiblnd_pool_is_idle(pool, now))
- cfs_list_move(&pool->po_list, &zombies);
- }
+ if (kiblnd_pool_is_idle(pool, now))
+ list_move(&pool->po_list, &zombies);
+ }
spin_unlock(&ps->ps_lock);
- if (!cfs_list_empty(&zombies))
+ if (!list_empty(&zombies))
kiblnd_destroy_pool_list(&zombies);
}
-cfs_list_t *
+struct list_head *
kiblnd_pool_alloc_node(kib_poolset_t *ps)
{
- cfs_list_t *node;
- kib_pool_t *pool;
- int rc;
+ struct list_head *node;
+ kib_pool_t *pool;
+ int rc;
- again:
+again:
spin_lock(&ps->ps_lock);
- cfs_list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
- if (cfs_list_empty(&pool->po_free_list))
- continue;
-
- pool->po_allocated ++;
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- node = pool->po_free_list.next;
- cfs_list_del(node);
-
- if (ps->ps_node_init != NULL) {
- /* still hold the lock */
- ps->ps_node_init(pool, node);
- }
+ list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
+ if (list_empty(&pool->po_free_list))
+ continue;
+
+ pool->po_allocated++;
+ pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ node = pool->po_free_list.next;
+ list_del(node);
+
+ if (ps->ps_node_init != NULL) {
+ /* still hold the lock */
+ ps->ps_node_init(pool, node);
+ }
spin_unlock(&ps->ps_lock);
return node;
}
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
spin_lock(&ps->ps_lock);
- ps->ps_increasing = 0;
- if (rc == 0) {
- cfs_list_add_tail(&pool->po_list, &ps->ps_pool_list);
- } else {
- ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
- CERROR("Can't allocate new %s pool because out of memory\n",
- ps->ps_name);
- }
+ ps->ps_increasing = 0;
+ if (rc == 0) {
+ list_add_tail(&pool->po_list, &ps->ps_pool_list);
+ } else {
+ ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ CERROR("Can't allocate new %s pool because out of memory\n",
+ ps->ps_name);
+ }
spin_unlock(&ps->ps_lock);
goto again;
kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
{
- kib_phys_mr_t *pmr;
- cfs_list_t *node;
- int rc;
- int i;
+ kib_phys_mr_t *pmr;
+ struct list_head *node;
+ int rc;
+ int i;
- node = kiblnd_pool_alloc_node(&pps->pps_poolset);
- if (node == NULL) {
- CERROR("Failed to allocate PMR descriptor\n");
- return -ENOMEM;
- }
+ node = kiblnd_pool_alloc_node(&pps->pps_poolset);
+ if (node == NULL) {
+ CERROR("Failed to allocate PMR descriptor\n");
+ return -ENOMEM;
+ }
- pmr = container_of(node, kib_phys_mr_t, pmr_list);
- if (pmr->pmr_pool->ppo_hdev != hdev) {
- kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
- return -EAGAIN;
- }
+ pmr = container_of(node, kib_phys_mr_t, pmr_list);
+ if (pmr->pmr_pool->ppo_hdev != hdev) {
+ kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
+ return -EAGAIN;
+ }
for (i = 0; i < rd->rd_nfrags; i ++) {
pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr;
return 0;
}
- rc = PTR_ERR(pmr->pmr_mr);
- CERROR("Failed ib_reg_phys_mr: %d\n", rc);
+ rc = PTR_ERR(pmr->pmr_mr);
+ CERROR("Failed ib_reg_phys_mr: %d\n", rc);
- pmr->pmr_mr = NULL;
- kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
+ pmr->pmr_mr = NULL;
+ kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
- return rc;
+ return rc;
}
static void
LASSERT (pool->po_allocated == 0);
- while (!cfs_list_empty(&pool->po_free_list)) {
- pmr = cfs_list_entry(pool->po_free_list.next,
+ while (!list_empty(&pool->po_free_list)) {
+ pmr = list_entry(pool->po_free_list.next,
kib_phys_mr_t, pmr_list);
LASSERT (pmr->pmr_mr == NULL);
- cfs_list_del(&pmr->pmr_list);
+ list_del(&pmr->pmr_list);
if (pmr->pmr_ipb != NULL) {
LIBCFS_FREE(pmr->pmr_ipb,
if (pmr->pmr_ipb == NULL)
break;
- cfs_list_add(&pmr->pmr_list, &pool->po_free_list);
+ list_add(&pmr->pmr_list, &pool->po_free_list);
}
if (i < size) {
for (i = 0; i < pool->po_size; i++) {
kib_tx_t *tx = &tpo->tpo_tx_descs[i];
- cfs_list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
if (tx->tx_pages != NULL)
LIBCFS_FREE(tx->tx_pages,
LNET_MAX_IOV *
}
static void
-kiblnd_tx_init(kib_pool_t *pool, cfs_list_t *node)
+kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
{
- kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
- tps_poolset);
- kib_tx_t *tx = cfs_list_entry(node, kib_tx_t, tx_list);
+ kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
+ tps_poolset);
+ kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
- tx->tx_cookie = tps->tps_next_tx_cookie ++;
+ tx->tx_cookie = tps->tps_next_tx_cookie++;
}
void
int
kiblnd_dev_failover(kib_dev_t *dev)
{
- CFS_LIST_HEAD (zombie_tpo);
- CFS_LIST_HEAD (zombie_ppo);
- CFS_LIST_HEAD (zombie_fpo);
+ struct list_head zombie_tpo = LIST_HEAD_INIT(zombie_tpo);
+ struct list_head zombie_ppo = LIST_HEAD_INIT(zombie_ppo);
+ struct list_head zombie_fpo = LIST_HEAD_INIT(zombie_fpo);
struct rdma_cm_id *cmid = NULL;
kib_hca_dev_t *hdev = NULL;
kib_hca_dev_t *old;
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- old = dev->ibd_hdev;
- dev->ibd_hdev = hdev; /* take over the refcount */
- hdev = old;
+ old = dev->ibd_hdev;
+ dev->ibd_hdev = hdev; /* take over the refcount */
+ hdev = old;
- cfs_list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
+ list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
cfs_cpt_for_each(i, lnet_cpt_table()) {
kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
&zombie_tpo);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
out:
- if (!cfs_list_empty(&zombie_tpo))
- kiblnd_destroy_pool_list(&zombie_tpo);
- if (!cfs_list_empty(&zombie_ppo))
- kiblnd_destroy_pool_list(&zombie_ppo);
- if (!cfs_list_empty(&zombie_fpo))
- kiblnd_destroy_fmr_pool_list(&zombie_fpo);
- if (hdev != NULL)
- kiblnd_hdev_decref(hdev);
+ if (!list_empty(&zombie_tpo))
+ kiblnd_destroy_pool_list(&zombie_tpo);
+ if (!list_empty(&zombie_ppo))
+ kiblnd_destroy_pool_list(&zombie_ppo);
+ if (!list_empty(&zombie_fpo))
+ kiblnd_destroy_fmr_pool_list(&zombie_fpo);
+ if (hdev != NULL)
+ kiblnd_hdev_decref(hdev);
- if (rc != 0)
- dev->ibd_failed_failover++;
- else
- dev->ibd_failed_failover = 0;
+ if (rc != 0)
+ dev->ibd_failed_failover++;
+ else
+ dev->ibd_failed_failover = 0;
- return rc;
+ return rc;
}
void
kiblnd_destroy_dev (kib_dev_t *dev)
{
LASSERT (dev->ibd_nnets == 0);
- LASSERT (cfs_list_empty(&dev->ibd_nets));
+ LASSERT(list_empty(&dev->ibd_nets));
- cfs_list_del(&dev->ibd_fail_list);
- cfs_list_del(&dev->ibd_list);
+ list_del(&dev->ibd_fail_list);
+ list_del(&dev->ibd_list);
if (dev->ibd_hdev != NULL)
kiblnd_hdev_decref(dev->ibd_hdev);
dev_put(netdev);
}
- CFS_INIT_LIST_HEAD(&dev->ibd_nets);
- CFS_INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
- CFS_INIT_LIST_HEAD(&dev->ibd_fail_list);
+ INIT_LIST_HEAD(&dev->ibd_nets);
+ INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
+ INIT_LIST_HEAD(&dev->ibd_fail_list);
dev->ibd_ifip = ip;
strcpy(&dev->ibd_ifname[0], ifname);
return NULL;
}
- cfs_list_add_tail(&dev->ibd_list,
+ list_add_tail(&dev->ibd_list,
&kiblnd_data.kib_devs);
return dev;
}
struct kib_sched_info *sched;
int i;
- LASSERT (cfs_list_empty(&kiblnd_data.kib_devs));
+ LASSERT(list_empty(&kiblnd_data.kib_devs));
CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
atomic_read(&libcfs_kmemory));
case IBLND_INIT_DATA:
LASSERT (kiblnd_data.kib_peers != NULL);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- LASSERT (cfs_list_empty(&kiblnd_data.kib_peers[i]));
+ LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
}
- LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
- LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
+ LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
+ LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
/* flag threads to terminate; wake and wait for them to die */
kiblnd_data.kib_shutdown = 1;
if (kiblnd_data.kib_peers != NULL) {
LIBCFS_FREE(kiblnd_data.kib_peers,
- sizeof(cfs_list_t) *
+ sizeof(struct list_head) *
kiblnd_data.kib_peer_hash_size);
}
write_lock_irqsave(g_lock, flags);
LASSERT(net->ibn_dev->ibd_nnets > 0);
net->ibn_dev->ibd_nnets--;
- cfs_list_del(&net->ibn_list);
+ list_del(&net->ibn_list);
write_unlock_irqrestore(g_lock, flags);
/* fall through */
LIBCFS_FREE(net, sizeof(*net));
out:
- if (cfs_list_empty(&kiblnd_data.kib_devs))
+ if (list_empty(&kiblnd_data.kib_devs))
kiblnd_base_shutdown();
return;
}
int rc;
int i;
- LASSERT (kiblnd_data.kib_init == IBLND_INIT_NOTHING);
+ LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
try_module_get(THIS_MODULE);
memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
rwlock_init(&kiblnd_data.kib_global_lock);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_devs);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
+ INIT_LIST_HEAD(&kiblnd_data.kib_devs);
+ INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
- kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
- LIBCFS_ALLOC(kiblnd_data.kib_peers,
- sizeof(cfs_list_t) *
- kiblnd_data.kib_peer_hash_size);
- if (kiblnd_data.kib_peers == NULL) {
- goto failed;
- }
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
+ kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
+ LIBCFS_ALLOC(kiblnd_data.kib_peers,
+ sizeof(struct list_head) *
+ kiblnd_data.kib_peer_hash_size);
+ if (kiblnd_data.kib_peers == NULL)
+ goto failed;
+
+ for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
+ INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
spin_lock_init(&kiblnd_data.kib_connd_lock);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+ INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+ INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
int nthrs;
spin_lock_init(&sched->ibs_lock);
- CFS_INIT_LIST_HEAD(&sched->ibs_conns);
+ INIT_LIST_HEAD(&sched->ibs_conns);
init_waitqueue_head(&sched->ibs_waitq);
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
char *colon2;
colon = strchr(ifname, ':');
- cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
return dev;
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
ibdev->ibd_nnets++;
- cfs_list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
+ list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
net->ibn_init = IBLND_INIT_ALL;
typedef struct
{
- cfs_list_t ibd_list; /* chain on kib_devs */
- cfs_list_t ibd_fail_list; /* chain on kib_failed_devs */
- __u32 ibd_ifip; /* IPoIB interface IP */
- /** IPoIB interface name */
- char ibd_ifname[KIB_IFNAME_SIZE];
- int ibd_nnets; /* # nets extant */
-
- cfs_time_t ibd_next_failover;
- int ibd_failed_failover; /* # failover failures */
- unsigned int ibd_failover; /* failover in progress */
- unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
- cfs_list_t ibd_nets;
- struct kib_hca_dev *ibd_hdev;
+ struct list_head ibd_list; /* chain on kib_devs */
+ struct list_head ibd_fail_list; /* chain on kib_failed_devs */
+ __u32 ibd_ifip; /* IPoIB interface IP */
+ /** IPoIB interface name */
+ char ibd_ifname[KIB_IFNAME_SIZE];
+ int ibd_nnets; /* # nets extant */
+
+ cfs_time_t ibd_next_failover;
+ /* # failover failures */
+ int ibd_failed_failover;
+ /* failover in progress */
+ unsigned int ibd_failover;
+ /* IPoIB interface is a bonding master */
+ unsigned int ibd_can_failover;
+ struct list_head ibd_nets;
+ struct kib_hca_dev *ibd_hdev;
} kib_dev_t;
typedef struct kib_hca_dev
struct kib_pmr_pool;
typedef struct {
- cfs_list_t pmr_list; /* chain node */
- struct ib_phys_buf *pmr_ipb; /* physical buffer */
- struct ib_mr *pmr_mr; /* IB MR */
- struct kib_pmr_pool *pmr_pool; /* owner of this MR */
- __u64 pmr_iova; /* Virtual I/O address */
- int pmr_refcount; /* reference count */
+ struct list_head pmr_list; /* chain node */
+ struct ib_phys_buf *pmr_ipb; /* physical buffer */
+ struct ib_mr *pmr_mr; /* IB MR */
+ struct kib_pmr_pool *pmr_pool; /* owner of this MR */
+ __u64 pmr_iova; /* Virtual I/O address */
+ int pmr_refcount; /* reference count */
} kib_phys_mr_t;
struct kib_pool;
typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
int inc, struct kib_pool **pp_po);
typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
-typedef void (*kib_ps_node_init_t)(struct kib_pool *po, cfs_list_t *node);
-typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, cfs_list_t *node);
+typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
+typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
struct kib_net;
typedef struct kib_poolset
{
- spinlock_t ps_lock; /* serialize */
- struct kib_net *ps_net; /* network it belongs to */
- char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
- cfs_list_t ps_pool_list; /* list of pools */
- cfs_list_t ps_failed_pool_list; /* failed pool list */
- cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
- int ps_increasing; /* is allocating new pool */
- int ps_pool_size; /* new pool size */
- int ps_cpt; /* CPT id */
-
- kib_ps_pool_create_t ps_pool_create; /* create a new pool */
- kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
- kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
- kib_ps_node_fini_t ps_node_fini; /* finalize node */
+ /* serialize */
+ spinlock_t ps_lock;
+ /* network it belongs to */
+ struct kib_net *ps_net;
+ /* pool set name */
+ char ps_name[IBLND_POOL_NAME_LEN];
+ /* list of pools */
+ struct list_head ps_pool_list;
+ /* failed pool list */
+ struct list_head ps_failed_pool_list;
+ /* time stamp for retry if failed to allocate */
+ cfs_time_t ps_next_retry;
+ /* is allocating new pool */
+ int ps_increasing;
+ /* new pool size */
+ int ps_pool_size;
+ /* CPT id */
+ int ps_cpt;
+
+ /* create a new pool */
+ kib_ps_pool_create_t ps_pool_create;
+ /* destroy a pool */
+ kib_ps_pool_destroy_t ps_pool_destroy;
+ /* initialize new allocated node */
+ kib_ps_node_init_t ps_node_init;
+ /* finalize node */
+ kib_ps_node_fini_t ps_node_fini;
} kib_poolset_t;
typedef struct kib_pool
{
- cfs_list_t po_list; /* chain on pool list */
- cfs_list_t po_free_list; /* pre-allocated node */
- kib_poolset_t *po_owner; /* pool_set of this pool */
- cfs_time_t po_deadline; /* deadline of this pool */
- int po_allocated; /* # of elements in use */
- int po_failed; /* pool is created on failed HCA */
- int po_size; /* # of pre-allocated elements */
+ /* chain on pool list */
+ struct list_head po_list;
+ /* pre-allocated node */
+ struct list_head po_free_list;
+ /* pool_set of this pool */
+ kib_poolset_t *po_owner;
+ /* deadline of this pool */
+ cfs_time_t po_deadline;
+ /* # of elements in use */
+ int po_allocated;
+ /* pool is created on failed HCA */
+ int po_failed;
+ /* # of pre-allocated elements */
+ int po_size;
} kib_pool_t;
typedef struct {
typedef struct
{
spinlock_t fps_lock; /* serialize */
- struct kib_net *fps_net; /* IB network */
- cfs_list_t fps_pool_list; /* FMR pool list */
- cfs_list_t fps_failed_pool_list; /* FMR pool list */
- __u64 fps_version; /* validity stamp */
+ struct kib_net *fps_net; /* IB network */
+ struct list_head fps_pool_list; /* FMR pool list */
+ struct list_head fps_failed_pool_list; /* FMR pool list */
+ __u64 fps_version; /* validity stamp */
int fps_cpt; /* CPT id */
int fps_pool_size;
int fps_flush_trigger;
typedef struct
{
- cfs_list_t fpo_list; /* chain on pool list */
- struct kib_hca_dev *fpo_hdev; /* device for this pool */
- kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
- struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
- cfs_time_t fpo_deadline; /* deadline of this pool */
- int fpo_failed; /* fmr pool is failed */
- int fpo_map_count; /* # of mapped FMR */
+ struct list_head fpo_list; /* chain on pool list */
+ struct kib_hca_dev *fpo_hdev; /* device for this pool */
+ kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
+ struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ cfs_time_t fpo_deadline; /* deadline of this pool */
+ int fpo_failed; /* fmr pool is failed */
+ int fpo_map_count; /* # of mapped FMR */
} kib_fmr_pool_t;
typedef struct {
typedef struct kib_net
{
- cfs_list_t ibn_list; /* chain on kib_dev_t::ibd_nets */
- __u64 ibn_incarnation; /* my epoch */
- int ibn_init; /* initialisation state */
- int ibn_shutdown; /* shutting down? */
+ /* chain on kib_dev_t::ibd_nets */
+ struct list_head ibn_list;
+ __u64 ibn_incarnation;/* my epoch */
+ int ibn_init; /* initialisation state */
+ int ibn_shutdown; /* shutting down? */
atomic_t ibn_npeers; /* # peers extant */
atomic_t ibn_nconns; /* # connections extant */
/* serialise */
spinlock_t ibs_lock;
/* schedulers sleep here */
- wait_queue_head_t ibs_waitq;
+ wait_queue_head_t ibs_waitq;
/* conns to check for rx completions */
- cfs_list_t ibs_conns;
+ struct list_head ibs_conns;
/* number of scheduler threads */
int ibs_nthreads;
/* max allowed scheduler threads */
{
int kib_init; /* initialisation state */
int kib_shutdown; /* shut down? */
- cfs_list_t kib_devs; /* IB devices extant */
+ struct list_head kib_devs; /* IB devices extant */
/* list head of failed devices */
- cfs_list_t kib_failed_devs;
+ struct list_head kib_failed_devs;
/* schedulers sleep here */
- wait_queue_head_t kib_failover_waitq;
+ wait_queue_head_t kib_failover_waitq;
atomic_t kib_nthreads; /* # live threads */
/* stabilize net/dev/peer/conn ops */
rwlock_t kib_global_lock;
/* hash table of all my known peers */
- cfs_list_t *kib_peers;
+ struct list_head *kib_peers;
/* size of kib_peers */
int kib_peer_hash_size;
/* the connd task (serialisation assertions) */
void *kib_connd;
/* connections to setup/teardown */
- cfs_list_t kib_connd_conns;
+ struct list_head kib_connd_conns;
/* connections with zero refcount */
- cfs_list_t kib_connd_zombies;
+ struct list_head kib_connd_zombies;
/* connection daemon sleeps here */
- wait_queue_head_t kib_connd_waitq;
+ wait_queue_head_t kib_connd_waitq;
spinlock_t kib_connd_lock; /* serialise */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
/* percpt data for schedulers */
typedef struct kib_rx /* receive message */
{
- cfs_list_t rx_list; /* queue for attention */
- struct kib_conn *rx_conn; /* owning conn */
- int rx_nob; /* # bytes received (-1 while posted) */
- enum ib_wc_status rx_status; /* completion status */
- kib_msg_t *rx_msg; /* message buffer (host vaddr) */
- __u64 rx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
- struct ib_recv_wr rx_wrq; /* receive work item... */
- struct ib_sge rx_sge; /* ...and its memory */
+ /* queue for attention */
+ struct list_head rx_list;
+ /* owning conn */
+ struct kib_conn *rx_conn;
+ /* # bytes received (-1 while posted) */
+ int rx_nob;
+ /* completion status */
+ enum ib_wc_status rx_status;
+ /* message buffer (host vaddr) */
+ kib_msg_t *rx_msg;
+ /* message buffer (I/O addr) */
+ __u64 rx_msgaddr;
+ /* for dma_unmap_single() */
+ DECLARE_PCI_UNMAP_ADDR(rx_msgunmap);
+ /* receive work item... */
+ struct ib_recv_wr rx_wrq;
+ /* ...and its memory */
+ struct ib_sge rx_sge;
} kib_rx_t;
#define IBLND_POSTRX_DONT_POST 0 /* don't post */
typedef struct kib_tx /* transmit message */
{
- cfs_list_t tx_list; /* queue on idle_txs ibc_tx_queue etc. */
- kib_tx_pool_t *tx_pool; /* pool I'm from */
- struct kib_conn *tx_conn; /* owning conn */
- short tx_sending; /* # tx callbacks outstanding */
- short tx_queued; /* queued for sending */
- short tx_waiting; /* waiting for peer */
- int tx_status; /* LNET completion status */
- unsigned long tx_deadline; /* completion deadline */
- __u64 tx_cookie; /* completion cookie */
- lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
- kib_msg_t *tx_msg; /* message buffer (host vaddr) */
- __u64 tx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
- int tx_nwrq; /* # send work items */
- struct ib_send_wr *tx_wrq; /* send work items... */
- struct ib_sge *tx_sge; /* ...and their memory */
- kib_rdma_desc_t *tx_rd; /* rdma descriptor */
- int tx_nfrags; /* # entries in... */
- struct scatterlist *tx_frags; /* dma_map_sg descriptor */
- __u64 *tx_pages; /* rdma phys page addrs */
- union {
- kib_phys_mr_t *pmr; /* MR for physical buffer */
- kib_fmr_t fmr; /* FMR */
- } tx_u;
- int tx_dmadir; /* dma direction */
+ /* queue on idle_txs ibc_tx_queue etc. */
+ struct list_head tx_list;
+ /* pool I'm from */
+ kib_tx_pool_t *tx_pool;
+ /* owning conn */
+ struct kib_conn *tx_conn;
+ /* # tx callbacks outstanding */
+ short tx_sending;
+ /* queued for sending */
+ short tx_queued;
+ /* waiting for peer */
+ short tx_waiting;
+ /* LNET completion status */
+ int tx_status;
+ /* completion deadline */
+ unsigned long tx_deadline;
+ /* completion cookie */
+ __u64 tx_cookie;
+ /* lnet msgs to finalize on completion */
+ lnet_msg_t *tx_lntmsg[2];
+ /* message buffer (host vaddr) */
+ kib_msg_t *tx_msg;
+ /* message buffer (I/O addr) */
+ __u64 tx_msgaddr;
+ /* for dma_unmap_single() */
+ DECLARE_PCI_UNMAP_ADDR(tx_msgunmap);
+ /* # send work items */
+ int tx_nwrq;
+ /* send work items... */
+ struct ib_send_wr *tx_wrq;
+ /* ...and their memory */
+ struct ib_sge *tx_sge;
+ /* rdma descriptor */
+ kib_rdma_desc_t *tx_rd;
+ /* # entries in... */
+ int tx_nfrags;
+ /* dma_map_sg descriptor */
+ struct scatterlist *tx_frags;
+ /* rdma phys page addrs */
+ __u64 *tx_pages;
+ union {
+ /* MR for physical buffer */
+ kib_phys_mr_t *pmr;
+ /* FMR */
+ kib_fmr_t fmr;
+ } tx_u;
+ /* dma direction */
+ int tx_dmadir;
} kib_tx_t;
typedef struct kib_connvars
typedef struct kib_conn
{
- struct kib_sched_info *ibc_sched; /* scheduler information */
- struct kib_peer *ibc_peer; /* owning peer */
- kib_hca_dev_t *ibc_hdev; /* HCA bound on */
- cfs_list_t ibc_list; /* stash on peer's conn list */
- cfs_list_t ibc_sched_list; /* schedule for attention */
- __u16 ibc_version; /* version of connection */
- __u64 ibc_incarnation; /* which instance of the peer */
- atomic_t ibc_refcount; /* # users */
- int ibc_state; /* what's happening */
- int ibc_nsends_posted; /* # uncompleted sends */
- int ibc_noops_posted; /* # uncompleted NOOPs */
- int ibc_credits; /* # credits I have */
- int ibc_outstanding_credits; /* # credits to return */
- int ibc_reserved_credits;/* # ACK/DONE msg credits */
- int ibc_comms_error; /* set on comms error */
- unsigned int ibc_nrx:16; /* receive buffers owned */
- unsigned int ibc_scheduled:1; /* scheduled for attention */
- unsigned int ibc_ready:1; /* CQ callback fired */
- /* time of last send */
- unsigned long ibc_last_send;
- /** link chain for kiblnd_check_conns only */
- cfs_list_t ibc_connd_list;
- /** rxs completed before ESTABLISHED */
- cfs_list_t ibc_early_rxs;
- /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
- cfs_list_t ibc_tx_noops;
- cfs_list_t ibc_tx_queue; /* sends that need a credit */
- cfs_list_t ibc_tx_queue_nocred;/* sends that don't need a credit */
- cfs_list_t ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
- cfs_list_t ibc_active_txs; /* active tx awaiting completion */
- spinlock_t ibc_lock; /* serialise */
- kib_rx_t *ibc_rxs; /* the rx descs */
- kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
-
- struct rdma_cm_id *ibc_cmid; /* CM id */
- struct ib_cq *ibc_cq; /* completion queue */
-
- kib_connvars_t *ibc_connvars; /* in-progress connection state */
+ /* scheduler information */
+ struct kib_sched_info *ibc_sched;
+ /* owning peer */
+ struct kib_peer *ibc_peer;
+ /* HCA bound on */
+ kib_hca_dev_t *ibc_hdev;
+ /* stash on peer's conn list */
+ struct list_head ibc_list;
+ /* schedule for attention */
+ struct list_head ibc_sched_list;
+ /* version of connection */
+ __u16 ibc_version;
+ /* which instance of the peer */
+ __u64 ibc_incarnation;
+ /* # users */
+ atomic_t ibc_refcount;
+ /* what's happening */
+ int ibc_state;
+ /* # uncompleted sends */
+ int ibc_nsends_posted;
+ /* # uncompleted NOOPs */
+ int ibc_noops_posted;
+ /* # credits I have */
+ int ibc_credits;
+ /* # credits to return */
+ int ibc_outstanding_credits;
+ /* # ACK/DONE msg credits */
+ int ibc_reserved_credits;
+ /* set on comms error */
+ int ibc_comms_error;
+ /* receive buffers owned */
+ unsigned int ibc_nrx:16;
+ /* scheduled for attention */
+ unsigned int ibc_scheduled:1;
+ /* CQ callback fired */
+ unsigned int ibc_ready:1;
+ /* time of last send */
+ unsigned long ibc_last_send;
+ /** link chain for kiblnd_check_conns only */
+ struct list_head ibc_connd_list;
+ /** rxs completed before ESTABLISHED */
+ struct list_head ibc_early_rxs;
+ /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
+ struct list_head ibc_tx_noops;
+ /* sends that need a credit */
+ struct list_head ibc_tx_queue;
+ /* sends that don't need a credit */
+ struct list_head ibc_tx_queue_nocred;
+ /* sends that need to reserve an ACK/DONE msg */
+ struct list_head ibc_tx_queue_rsrvd;
+ /* active tx awaiting completion */
+ struct list_head ibc_active_txs;
+ /* serialise */
+ spinlock_t ibc_lock;
+ /* the rx descs */
+ kib_rx_t *ibc_rxs;
+ /* premapped rx msg pages */
+ kib_pages_t *ibc_rx_pages;
+
+ /* CM id */
+ struct rdma_cm_id *ibc_cmid;
+ /* completion queue */
+ struct ib_cq *ibc_cq;
+
+ /* in-progress connection state */
+ kib_connvars_t *ibc_connvars;
} kib_conn_t;
#define IBLND_CONN_INIT 0 /* being initialised */
typedef struct kib_peer
{
- cfs_list_t ibp_list; /* stash on global peer list */
- lnet_nid_t ibp_nid; /* who's on the other end(s) */
- lnet_ni_t *ibp_ni; /* LNet interface */
- atomic_t ibp_refcount; /* # users */
- cfs_list_t ibp_conns; /* all active connections */
- cfs_list_t ibp_tx_queue; /* msgs waiting for a conn */
- __u16 ibp_version; /* version of peer */
- __u64 ibp_incarnation; /* incarnation of peer */
- int ibp_connecting; /* current active connection attempts */
- int ibp_accepting; /* current passive connection attempts */
- int ibp_error; /* errno on closing this peer */
- cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
+ /* stash on global peer list */
+ struct list_head ibp_list;
+ /* who's on the other end(s) */
+ lnet_nid_t ibp_nid;
+ /* LNet interface */
+ lnet_ni_t *ibp_ni;
+ /* # users */
+ atomic_t ibp_refcount;
+ /* all active connections */
+ struct list_head ibp_conns;
+ /* msgs waiting for a conn */
+ struct list_head ibp_tx_queue;
+ /* version of peer */
+ __u16 ibp_version;
+ /* incarnation of peer */
+ __u64 ibp_incarnation;
+ /* current active connection attempts */
+ int ibp_connecting;
+ /* current passive connection attempts */
+ int ibp_accepting;
+ /* errno on closing this peer */
+ int ibp_error;
+ /* when (in jiffies) I was last alive */
+ cfs_time_t ibp_last_alive;
} kib_peer_t;
extern kib_data_t kiblnd_data;
static inline void
kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
{
- LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+ LASSERT(atomic_read(&hdev->ibh_ref) > 0);
atomic_inc(&hdev->ibh_ref);
}
static inline void
kiblnd_hdev_decref(kib_hca_dev_t *hdev)
{
- LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+ LASSERT(atomic_read(&hdev->ibh_ref) > 0);
if (atomic_dec_and_test(&hdev->ibh_ref))
kiblnd_hdev_destroy(hdev);
}
static inline int
kiblnd_dev_can_failover(kib_dev_t *dev)
{
- if (!cfs_list_empty(&dev->ibd_fail_list)) /* already scheduled */
+ if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
return 0;
if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
- cfs_list_add_tail(&(conn)->ibc_list, \
+ list_add_tail(&(conn)->ibc_list, \
&kiblnd_data.kib_connd_zombies); \
wake_up(&kiblnd_data.kib_connd_waitq); \
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
kiblnd_destroy_peer(peer); \
} while (0)
-static inline cfs_list_t *
+static inline struct list_head *
kiblnd_nid2peerlist (lnet_nid_t nid)
{
- unsigned int hash =
- ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
+ unsigned int hash =
+ ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
- return (&kiblnd_data.kib_peers [hash]);
+ return &kiblnd_data.kib_peers[hash];
}
static inline int
kiblnd_peer_active (kib_peer_t *peer)
{
- /* Am I in the peer hash table? */
- return (!cfs_list_empty(&peer->ibp_list));
+ /* Am I in the peer hash table? */
+ return !list_empty(&peer->ibp_list);
}
static inline kib_conn_t *
kiblnd_get_conn_locked (kib_peer_t *peer)
{
- LASSERT (!cfs_list_empty(&peer->ibp_conns));
+ LASSERT(!list_empty(&peer->ibp_conns));
/* just return the first connection */
- return cfs_list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+ return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
}
static inline int
return 0; /* No need to send NOOP */
if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
- if (!cfs_list_empty(&conn->ibc_tx_queue_nocred))
+ if (!list_empty(&conn->ibc_tx_queue_nocred))
return 0; /* NOOP can be piggybacked */
/* No tx to piggyback NOOP onto or no credit to send a tx */
- return (cfs_list_empty(&conn->ibc_tx_queue) ||
+ return (list_empty(&conn->ibc_tx_queue) ||
conn->ibc_credits == 0);
}
- if (!cfs_list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
- !cfs_list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
+ if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
+ !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
conn->ibc_credits == 0) /* no credit */
return 0;
return 0;
/* No tx to piggyback NOOP onto or no credit to send a tx */
- return (cfs_list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
+ return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
}
static inline void
}
static inline const char *
-kiblnd_queue2str (kib_conn_t *conn, cfs_list_t *q)
+kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
{
- if (q == &conn->ibc_tx_queue)
- return "tx_queue";
+ if (q == &conn->ibc_tx_queue)
+ return "tx_queue";
- if (q == &conn->ibc_tx_queue_rsrvd)
- return "tx_queue_rsrvd";
+ if (q == &conn->ibc_tx_queue_rsrvd)
+ return "tx_queue_rsrvd";
- if (q == &conn->ibc_tx_queue_nocred)
- return "tx_queue_nocred";
+ if (q == &conn->ibc_tx_queue_nocred)
+ return "tx_queue_nocred";
- if (q == &conn->ibc_active_txs)
- return "active_txs";
+ if (q == &conn->ibc_active_txs)
+ return "active_txs";
- LBUG();
- return NULL;
+ LBUG();
+ return NULL;
}
/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
kib_rdma_desc_t *rd, int nfrags);
void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
-void kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node);
-cfs_list_t *kiblnd_pool_alloc_node(kib_poolset_t *ps);
+void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
+struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
int npages, __u64 iov, kib_fmr_t *fmr);
void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist,
- int status);
+void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status);
void kiblnd_check_sends (kib_conn_t *conn);
void kiblnd_qp_event(struct ib_event *event, void *arg);
}
void
-kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int status)
+kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
{
- kib_tx_t *tx;
+ kib_tx_t *tx;
- while (!cfs_list_empty (txlist)) {
- tx = cfs_list_entry (txlist->next, kib_tx_t, tx_list);
+ while (!list_empty(txlist)) {
+ tx = list_entry(txlist->next, kib_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
- /* complete now */
- tx->tx_waiting = 0;
- tx->tx_status = status;
- kiblnd_tx_done(ni, tx);
- }
+ list_del(&tx->tx_list);
+ /* complete now */
+ tx->tx_waiting = 0;
+ tx->tx_status = status;
+ kiblnd_tx_done(ni, tx);
+ }
}
kib_tx_t *
kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
{
kib_net_t *net = (kib_net_t *)ni->ni_data;
- cfs_list_t *node;
+ struct list_head *node;
kib_tx_t *tx;
kib_tx_poolset_t *tps;
kib_tx_t *
kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
- cfs_list_for_each(tmp, &conn->ibc_active_txs) {
- kib_tx_t *tx = cfs_list_entry(tmp, kib_tx_t, tx_list);
+ list_for_each(tmp, &conn->ibc_active_txs) {
+ kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
- LASSERT (!tx->tx_queued);
- LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
+ LASSERT(!tx->tx_queued);
+ LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
- if (tx->tx_cookie != cookie)
- continue;
+ if (tx->tx_cookie != cookie)
+ continue;
- if (tx->tx_waiting &&
- tx->tx_msg->ibm_type == txtype)
- return tx;
+ if (tx->tx_waiting &&
+ tx->tx_msg->ibm_type == txtype)
+ return tx;
- CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
- tx->tx_waiting ? "" : "NOT ",
- tx->tx_msg->ibm_type, txtype);
- }
- return NULL;
+ CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
+ tx->tx_waiting ? "" : "NOT ",
+ tx->tx_msg->ibm_type, txtype);
+ }
+ return NULL;
}
void
idle = !tx->tx_queued && (tx->tx_sending == 0);
if (idle)
- cfs_list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
spin_unlock(&conn->ibc_lock);
tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
msg->ibm_u.putack.ibpam_src_cookie);
if (tx != NULL)
- cfs_list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
spin_unlock(&conn->ibc_lock);
if (tx == NULL) {
write_lock_irqsave(g_lock, flags);
/* must check holding global lock to eliminate race */
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
+ list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
write_unlock_irqrestore(g_lock, flags);
return;
}
}
/* NB don't drop ibc_lock before bumping tx_sending */
- cfs_list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
tx->tx_queued = 0;
if (msg->ibm_type == IBLND_MSG_NOOP &&
* tx_sending is non-zero if we've not done the tx_complete()
* from the first send; hence the ++ rather than = below. */
tx->tx_sending++;
- cfs_list_add(&tx->tx_list, &conn->ibc_active_txs);
+ list_add(&tx->tx_list, &conn->ibc_active_txs);
/* I'm still holding ibc_lock! */
if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
done = (tx->tx_sending == 0);
if (done)
- cfs_list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
spin_unlock(&conn->ibc_lock);
LASSERT (conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
- !cfs_list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = cfs_list_entry(conn->ibc_tx_queue_rsrvd.next,
+ !list_empty(&conn->ibc_tx_queue_rsrvd)) {
+ tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
kib_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
- cfs_list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+ list_del(&tx->tx_list);
+ list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
for (;;) {
int credit;
- if (!cfs_list_empty(&conn->ibc_tx_queue_nocred)) {
+ if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
- tx = cfs_list_entry(conn->ibc_tx_queue_nocred.next,
+ tx = list_entry(conn->ibc_tx_queue_nocred.next,
kib_tx_t, tx_list);
- } else if (!cfs_list_empty(&conn->ibc_tx_noops)) {
+ } else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT (!IBLND_OOB_CAPABLE(ver));
credit = 1;
- tx = cfs_list_entry(conn->ibc_tx_noops.next,
+ tx = list_entry(conn->ibc_tx_noops.next,
kib_tx_t, tx_list);
- } else if (!cfs_list_empty(&conn->ibc_tx_queue)) {
+ } else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
- tx = cfs_list_entry(conn->ibc_tx_queue.next,
+ tx = list_entry(conn->ibc_tx_queue.next,
kib_tx_t, tx_list);
} else
break;
!tx->tx_waiting && /* Not waiting for peer */
!tx->tx_queued; /* Not re-queued (PUT_DONE) */
if (idle)
- cfs_list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
kiblnd_conn_addref(conn); /* 1 ref for me.... */
void
kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
{
- cfs_list_t *q;
+ struct list_head *q;
- LASSERT (tx->tx_nwrq > 0); /* work items set up */
- LASSERT (!tx->tx_queued); /* not queued for sending already */
- LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ LASSERT(tx->tx_nwrq > 0); /* work items set up */
+ LASSERT(!tx->tx_queued); /* not queued for sending already */
+ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
tx->tx_queued = 1;
tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
break;
}
- cfs_list_add_tail(&tx->tx_list, q);
+ list_add_tail(&tx->tx_list, q);
}
void
read_lock_irqsave(g_lock, flags);
peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL && !cfs_list_empty(&peer->ibp_conns)) {
+ if (peer != NULL && !list_empty(&peer->ibp_conns)) {
/* Found a peer with an established connection */
conn = kiblnd_get_conn_locked(peer);
kiblnd_conn_addref(conn); /* 1 ref for me... */
peer = kiblnd_find_peer_locked(nid);
if (peer != NULL) {
- if (cfs_list_empty(&peer->ibp_conns)) {
+ if (list_empty(&peer->ibp_conns)) {
/* found a peer, but it's still connecting... */
LASSERT (peer->ibp_connecting != 0 ||
peer->ibp_accepting != 0);
if (tx != NULL)
- cfs_list_add_tail(&tx->tx_list,
+ list_add_tail(&tx->tx_list,
&peer->ibp_tx_queue);
write_unlock_irqrestore(g_lock, flags);
} else {
peer2 = kiblnd_find_peer_locked(nid);
if (peer2 != NULL) {
- if (cfs_list_empty(&peer2->ibp_conns)) {
+ if (list_empty(&peer2->ibp_conns)) {
/* found a peer, but it's still connecting... */
LASSERT (peer2->ibp_connecting != 0 ||
peer2->ibp_accepting != 0);
if (tx != NULL)
- cfs_list_add_tail(&tx->tx_list,
+ list_add_tail(&tx->tx_list,
&peer2->ibp_tx_queue);
write_unlock_irqrestore(g_lock, flags);
} else {
LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
if (tx != NULL)
- cfs_list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
+ list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
kiblnd_peer_addref(peer);
- cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+ list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
write_unlock_irqrestore(g_lock, flags);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (cfs_list_empty(&peer->ibp_conns) &&
+ if (list_empty(&peer->ibp_conns) &&
peer->ibp_accepting == 0 &&
peer->ibp_connecting == 0 &&
peer->ibp_error != 0) {
return; /* already being handled */
if (error == 0 &&
- cfs_list_empty(&conn->ibc_tx_noops) &&
- cfs_list_empty(&conn->ibc_tx_queue) &&
- cfs_list_empty(&conn->ibc_tx_queue_rsrvd) &&
- cfs_list_empty(&conn->ibc_tx_queue_nocred) &&
- cfs_list_empty(&conn->ibc_active_txs)) {
+ list_empty(&conn->ibc_tx_noops) &&
+ list_empty(&conn->ibc_tx_queue) &&
+ list_empty(&conn->ibc_tx_queue_rsrvd) &&
+ list_empty(&conn->ibc_tx_queue_nocred) &&
+ list_empty(&conn->ibc_active_txs)) {
CDEBUG(D_NET, "closing conn to %s\n",
libcfs_nid2str(peer->ibp_nid));
} else {
CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
libcfs_nid2str(peer->ibp_nid), error,
- cfs_list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- cfs_list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
- cfs_list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
- cfs_list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
- cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+ list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+ list_empty(&conn->ibc_tx_queue_rsrvd) ?
+ "" : "(sending_rsrvd)",
+ list_empty(&conn->ibc_tx_queue_nocred) ?
+ "" : "(sending_nocred)",
+ list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
- cfs_list_del(&conn->ibc_list);
+ list_del(&conn->ibc_list);
/* connd (see below) takes over ibc_list's ref */
- if (cfs_list_empty (&peer->ibp_conns) && /* no more conns */
+ if (list_empty(&peer->ibp_conns) && /* no more conns */
kiblnd_peer_active(peer)) { /* still in peer table */
kiblnd_unlink_peer_locked(peer);
if (error != 0 &&
kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
+ list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
wake_up(&kiblnd_data.kib_failover_waitq);
}
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
- cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
+ list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
wake_up(&kiblnd_data.kib_connd_waitq);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (!cfs_list_empty(&conn->ibc_early_rxs)) {
- rx = cfs_list_entry(conn->ibc_early_rxs.next,
+ while (!list_empty(&conn->ibc_early_rxs)) {
+ rx = list_entry(conn->ibc_early_rxs.next,
kib_rx_t, rx_list);
- cfs_list_del(&rx->rx_list);
+ list_del(&rx->rx_list);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
kiblnd_handle_rx(rx);
}
void
-kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs)
+kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
{
- CFS_LIST_HEAD (zombies);
- cfs_list_t *tmp;
- cfs_list_t *nxt;
- kib_tx_t *tx;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct list_head *tmp;
+ struct list_head *nxt;
+ kib_tx_t *tx;
spin_lock(&conn->ibc_lock);
- cfs_list_for_each_safe (tmp, nxt, txs) {
- tx = cfs_list_entry (tmp, kib_tx_t, tx_list);
+ list_for_each_safe(tmp, nxt, txs) {
+ tx = list_entry(tmp, kib_tx_t, tx_list);
- if (txs == &conn->ibc_active_txs) {
- LASSERT (!tx->tx_queued);
- LASSERT (tx->tx_waiting ||
- tx->tx_sending != 0);
- } else {
- LASSERT (tx->tx_queued);
- }
+ if (txs == &conn->ibc_active_txs) {
+ LASSERT(!tx->tx_queued);
+ LASSERT(tx->tx_waiting ||
+ tx->tx_sending != 0);
+ } else {
+ LASSERT(tx->tx_queued);
+ }
- tx->tx_status = -ECONNABORTED;
- tx->tx_waiting = 0;
+ tx->tx_status = -ECONNABORTED;
+ tx->tx_waiting = 0;
- if (tx->tx_sending == 0) {
- tx->tx_queued = 0;
- cfs_list_del (&tx->tx_list);
- cfs_list_add (&tx->tx_list, &zombies);
- }
- }
+ if (tx->tx_sending == 0) {
+ tx->tx_queued = 0;
+ list_del(&tx->tx_list);
+ list_add(&tx->tx_list, &zombies);
+ }
+ }
spin_unlock(&conn->ibc_lock);
void
kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
{
- CFS_LIST_HEAD (zombies);
- unsigned long flags;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ unsigned long flags;
LASSERT (error != 0);
LASSERT (!in_interrupt());
}
if (peer->ibp_connecting != 0 ||
- peer->ibp_accepting != 0) {
- /* another connection attempt under way... */
+ peer->ibp_accepting != 0) {
+ /* another connection attempt under way... */
write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
- return;
- }
+ flags);
+ return;
+ }
- if (cfs_list_empty(&peer->ibp_conns)) {
- /* Take peer's blocked transmits to complete with error */
- cfs_list_add(&zombies, &peer->ibp_tx_queue);
- cfs_list_del_init(&peer->ibp_tx_queue);
+ if (list_empty(&peer->ibp_conns)) {
+ /* Take peer's blocked transmits to complete with error */
+ list_add(&zombies, &peer->ibp_tx_queue);
+ list_del_init(&peer->ibp_tx_queue);
- if (kiblnd_peer_active(peer))
- kiblnd_unlink_peer_locked(peer);
+ if (kiblnd_peer_active(peer))
+ kiblnd_unlink_peer_locked(peer);
- peer->ibp_error = error;
- } else {
- /* Can't have blocked transmits if there are connections */
- LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
- }
+ peer->ibp_error = error;
+ } else {
+ /* Can't have blocked transmits if there are connections */
+ LASSERT(list_empty(&peer->ibp_tx_queue));
+ }
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_peer_notify(peer);
+ kiblnd_peer_notify(peer);
- if (cfs_list_empty (&zombies))
- return;
+ if (list_empty(&zombies))
+ return;
- CNETERR("Deleting messages for %s: connection failed\n",
- libcfs_nid2str(peer->ibp_nid));
+ CNETERR("Deleting messages for %s: connection failed\n",
+ libcfs_nid2str(peer->ibp_nid));
- kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
+ kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
}
void
kiblnd_connreq_done(kib_conn_t *conn, int status)
{
- kib_peer_t *peer = conn->ibc_peer;
- kib_tx_t *tx;
- cfs_list_t txs;
- unsigned long flags;
- int active;
+ kib_peer_t *peer = conn->ibc_peer;
+ kib_tx_t *tx;
+ struct list_head txs;
+ unsigned long flags;
+ int active;
active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
kiblnd_peer_alive(peer);
- /* Add conn to peer's list and nuke any dangling conns from a different
- * peer instance... */
- kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
- cfs_list_add(&conn->ibc_list, &peer->ibp_conns);
- if (active)
- peer->ibp_connecting--;
- else
- peer->ibp_accepting--;
+ /* Add conn to peer's list and nuke any dangling conns from a different
+ * peer instance... */
+ kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
+ list_add(&conn->ibc_list, &peer->ibp_conns);
+ if (active)
+ peer->ibp_connecting--;
+ else
+ peer->ibp_accepting--;
if (peer->ibp_version == 0) {
peer->ibp_version = conn->ibc_version;
peer->ibp_incarnation = conn->ibc_incarnation;
}
- /* grab pending txs while I have the lock */
- cfs_list_add(&txs, &peer->ibp_tx_queue);
- cfs_list_del_init(&peer->ibp_tx_queue);
+ /* grab pending txs while I have the lock */
+ list_add(&txs, &peer->ibp_tx_queue);
+ list_del_init(&peer->ibp_tx_queue);
if (!kiblnd_peer_active(peer) || /* peer has been deleted */
conn->ibc_comms_error != 0) { /* error has happened already */
/* Schedule blocked txs */
spin_lock(&conn->ibc_lock);
- while (!cfs_list_empty(&txs)) {
- tx = cfs_list_entry(txs.next, kib_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
+ while (!list_empty(&txs)) {
+ tx = list_entry(txs.next, kib_tx_t, tx_list);
+ list_del(&tx->tx_list);
kiblnd_queue_tx_locked(tx, conn);
}
LASSERT (net->ibn_shutdown == 0);
kiblnd_peer_addref(peer);
- cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+ list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
write_unlock_irqrestore(g_lock, flags);
}
* NB: reconnect is still needed even when ibp_tx_queue is
* empty if ibp_version != version because reconnect may be
* initiated by kiblnd_query() */
- if ((!cfs_list_empty(&peer->ibp_tx_queue) ||
+ if ((!list_empty(&peer->ibp_tx_queue) ||
peer->ibp_version != version) &&
peer->ibp_connecting == 1 &&
peer->ibp_accepting == 0) {
}
static int
-kiblnd_check_txs_locked(kib_conn_t *conn, cfs_list_t *txs)
+kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
{
- kib_tx_t *tx;
- cfs_list_t *ttmp;
+ kib_tx_t *tx;
+ struct list_head *ttmp;
- cfs_list_for_each (ttmp, txs) {
- tx = cfs_list_entry (ttmp, kib_tx_t, tx_list);
+ list_for_each(ttmp, txs) {
+ tx = list_entry(ttmp, kib_tx_t, tx_list);
- if (txs != &conn->ibc_active_txs) {
- LASSERT (tx->tx_queued);
- } else {
- LASSERT (!tx->tx_queued);
- LASSERT (tx->tx_waiting || tx->tx_sending != 0);
- }
+ if (txs != &conn->ibc_active_txs) {
+ LASSERT(tx->tx_queued);
+ } else {
+ LASSERT(!tx->tx_queued);
+ LASSERT(tx->tx_waiting || tx->tx_sending != 0);
+ }
- if (cfs_time_aftereq (jiffies, tx->tx_deadline)) {
- CERROR("Timed out tx: %s, %lu seconds\n",
- kiblnd_queue2str(conn, txs),
- cfs_duration_sec(jiffies - tx->tx_deadline));
- return 1;
- }
- }
+ if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
+ CERROR("Timed out tx: %s, %lu seconds\n",
+ kiblnd_queue2str(conn, txs),
+ cfs_duration_sec(jiffies - tx->tx_deadline));
+ return 1;
+ }
+ }
- return 0;
+ return 0;
}
static int
void
kiblnd_check_conns (int idx)
{
- CFS_LIST_HEAD (closes);
- CFS_LIST_HEAD (checksends);
- cfs_list_t *peers = &kiblnd_data.kib_peers[idx];
- cfs_list_t *ptmp;
- kib_peer_t *peer;
- kib_conn_t *conn;
- cfs_list_t *ctmp;
- unsigned long flags;
+ struct list_head closes = LIST_HEAD_INIT(closes);
+ struct list_head checksends = LIST_HEAD_INIT(checksends);
+ struct list_head *peers = &kiblnd_data.kib_peers[idx];
+ struct list_head *ptmp;
+ kib_peer_t *peer;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ unsigned long flags;
/* NB. We expect to have a look at all the peers and not find any
* RDMAs to time out, so we just use a shared lock while we
* take a look... */
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- cfs_list_for_each (ptmp, peers) {
- peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list);
+ list_for_each(ptmp, peers) {
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
- cfs_list_for_each (ctmp, &peer->ibp_conns) {
- int timedout;
- int sendnoop;
+ list_for_each(ctmp, &peer->ibp_conns) {
+ int timedout;
+ int sendnoop;
- conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
- LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
+ LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
spin_lock(&conn->ibc_lock);
timedout = kiblnd_conn_timed_out_locked(conn);
if (!sendnoop && !timedout) {
spin_unlock(&conn->ibc_lock);
- continue;
- }
+ continue;
+ }
- if (timedout) {
- CERROR("Timed out RDMA with %s (%lu): "
- "c: %u, oc: %u, rc: %u\n",
- libcfs_nid2str(peer->ibp_nid),
- cfs_duration_sec(cfs_time_current() -
- peer->ibp_last_alive),
- conn->ibc_credits,
- conn->ibc_outstanding_credits,
- conn->ibc_reserved_credits);
- cfs_list_add(&conn->ibc_connd_list, &closes);
- } else {
- cfs_list_add(&conn->ibc_connd_list,
- &checksends);
- }
- /* +ref for 'closes' or 'checksends' */
- kiblnd_conn_addref(conn);
+ if (timedout) {
+ CERROR("Timed out RDMA with %s (%lu): "
+ "c: %u, oc: %u, rc: %u\n",
+ libcfs_nid2str(peer->ibp_nid),
+ cfs_duration_sec(cfs_time_current() -
+ peer->ibp_last_alive),
+ conn->ibc_credits,
+ conn->ibc_outstanding_credits,
+ conn->ibc_reserved_credits);
+ list_add(&conn->ibc_connd_list, &closes);
+ } else {
+ list_add(&conn->ibc_connd_list, &checksends);
+ }
+ /* +ref for 'closes' or 'checksends' */
+ kiblnd_conn_addref(conn);
spin_unlock(&conn->ibc_lock);
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- /* Handle timeout by closing the whole
- * connection. We can only be sure RDMA activity
- * has ceased once the QP has been modified. */
- while (!cfs_list_empty(&closes)) {
- conn = cfs_list_entry(closes.next,
- kib_conn_t, ibc_connd_list);
- cfs_list_del(&conn->ibc_connd_list);
- kiblnd_close_conn(conn, -ETIMEDOUT);
- kiblnd_conn_decref(conn);
- }
+ /* Handle timeout by closing the whole
+ * connection. We can only be sure RDMA activity
+ * has ceased once the QP has been modified. */
+ while (!list_empty(&closes)) {
+ conn = list_entry(closes.next,
+ kib_conn_t, ibc_connd_list);
+ list_del(&conn->ibc_connd_list);
+ kiblnd_close_conn(conn, -ETIMEDOUT);
+ kiblnd_conn_decref(conn);
+ }
- /* In case we have enough credits to return via a
- * NOOP, but there were no non-blocking tx descs
- * free to do it last time... */
- while (!cfs_list_empty(&checksends)) {
- conn = cfs_list_entry(checksends.next,
- kib_conn_t, ibc_connd_list);
- cfs_list_del(&conn->ibc_connd_list);
- kiblnd_check_sends(conn);
- kiblnd_conn_decref(conn);
- }
+ /* In case we have enough credits to return via a
+ * NOOP, but there were no non-blocking tx descs
+ * free to do it last time... */
+ while (!list_empty(&checksends)) {
+ conn = list_entry(checksends.next,
+ kib_conn_t, ibc_connd_list);
+ list_del(&conn->ibc_connd_list);
+ kiblnd_check_sends(conn);
+ kiblnd_conn_decref(conn);
+ }
}
void
dropped_lock = 0;
- if (!cfs_list_empty (&kiblnd_data.kib_connd_zombies)) {
- conn = cfs_list_entry(kiblnd_data. \
+ if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+ conn = list_entry(kiblnd_data. \
kib_connd_zombies.next,
kib_conn_t, ibc_list);
- cfs_list_del(&conn->ibc_list);
+ list_del(&conn->ibc_list);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
flags);
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
}
- if (!cfs_list_empty(&kiblnd_data.kib_connd_conns)) {
- conn = cfs_list_entry(kiblnd_data.kib_connd_conns.next,
+ if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+ conn = list_entry(kiblnd_data.kib_connd_conns.next,
kib_conn_t, ibc_list);
- cfs_list_del(&conn->ibc_list);
+ list_del(&conn->ibc_list);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
flags);
conn->ibc_nsends_posted > 0)) {
kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
conn->ibc_scheduled = 1;
- cfs_list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
+ list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
if (waitqueue_active(&sched->ibs_waitq))
wake_up(&sched->ibs_waitq);
did_something = 0;
- if (!cfs_list_empty(&sched->ibs_conns)) {
- conn = cfs_list_entry(sched->ibs_conns.next,
+ if (!list_empty(&sched->ibs_conns)) {
+ conn = list_entry(sched->ibs_conns.next,
kib_conn_t, ibc_sched_list);
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
- cfs_list_del(&conn->ibc_sched_list);
+ list_del(&conn->ibc_sched_list);
conn->ibc_ready = 0;
spin_unlock_irqrestore(&sched->ibs_lock, flags);
* this one... */
/* +1 ref for sched_conns */
kiblnd_conn_addref(conn);
- cfs_list_add_tail(&conn->ibc_sched_list,
+ list_add_tail(&conn->ibc_sched_list,
&sched->ibs_conns);
if (waitqueue_active(&sched->ibs_waitq))
wake_up(&sched->ibs_waitq);
int do_failover = 0;
int long_sleep;
- cfs_list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
+ list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
ibd_fail_list) {
if (cfs_time_before(cfs_time_current(),
dev->ibd_next_failover))
}
if (do_failover) {
- cfs_list_del_init(&dev->ibd_fail_list);
+ list_del_init(&dev->ibd_fail_list);
dev->ibd_failover = 1;
write_unlock_irqrestore(glock, flags);
dev->ibd_next_failover =
cfs_time_shift(min(dev->ibd_failed_failover, 10));
if (kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
+ list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
}
}
/* long sleep if no more pending failover */
- long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
+ long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
* we need checking like this because if there is not active
* connection on the dev and no SEND from local, we may listen
* on wrong HCA for ever while there is a bonding failover */
- cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
if (kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
+ list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
}
}
peer->ksnp_last_alive = 0;
peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
- CFS_INIT_LIST_HEAD (&peer->ksnp_conns);
- CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
- CFS_INIT_LIST_HEAD (&peer->ksnp_tx_queue);
- CFS_INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
+ INIT_LIST_HEAD(&peer->ksnp_conns);
+ INIT_LIST_HEAD(&peer->ksnp_routes);
+ INIT_LIST_HEAD(&peer->ksnp_tx_queue);
+ INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
spin_lock_init(&peer->ksnp_lock);
spin_lock_bh(&net->ksnn_lock);
CDEBUG (D_NET, "peer %s %p deleted\n",
libcfs_id2str(peer->ksnp_id), peer);
- LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
- LASSERT (peer->ksnp_accepting == 0);
- LASSERT (cfs_list_empty (&peer->ksnp_conns));
- LASSERT (cfs_list_empty (&peer->ksnp_routes));
- LASSERT (cfs_list_empty (&peer->ksnp_tx_queue));
- LASSERT (cfs_list_empty (&peer->ksnp_zc_req_list));
+ LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
+ LASSERT(peer->ksnp_accepting == 0);
+ LASSERT(list_empty(&peer->ksnp_conns));
+ LASSERT(list_empty(&peer->ksnp_routes));
+ LASSERT(list_empty(&peer->ksnp_tx_queue));
+ LASSERT(list_empty(&peer->ksnp_zc_req_list));
- LIBCFS_FREE (peer, sizeof (*peer));
+ LIBCFS_FREE(peer, sizeof(*peer));
/* NB a peer's connections and routes keep a reference on their peer
* until they are destroyed, so we can be assured that _all_ state to
ksock_peer_t *
ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
{
- cfs_list_t *peer_list = ksocknal_nid2peerlist(id.nid);
- cfs_list_t *tmp;
- ksock_peer_t *peer;
+ struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
+ struct list_head *tmp;
+ ksock_peer_t *peer;
- cfs_list_for_each (tmp, peer_list) {
+ list_for_each(tmp, peer_list) {
- peer = cfs_list_entry (tmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(tmp, ksock_peer_t, ksnp_list);
- LASSERT (!peer->ksnp_closing);
+ LASSERT(!peer->ksnp_closing);
- if (peer->ksnp_ni != ni)
- continue;
+ if (peer->ksnp_ni != ni)
+ continue;
- if (peer->ksnp_id.nid != id.nid ||
- peer->ksnp_id.pid != id.pid)
- continue;
+ if (peer->ksnp_id.nid != id.nid ||
+ peer->ksnp_id.pid != id.pid)
+ continue;
CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
peer, libcfs_id2str(id),
atomic_read(&peer->ksnp_refcount));
- return (peer);
+ return peer;
}
- return (NULL);
+ return NULL;
}
ksock_peer_t *
iface->ksni_npeers--;
}
- LASSERT (cfs_list_empty(&peer->ksnp_conns));
- LASSERT (cfs_list_empty(&peer->ksnp_routes));
- LASSERT (!peer->ksnp_closing);
- peer->ksnp_closing = 1;
- cfs_list_del (&peer->ksnp_list);
- /* lose peerlist's ref */
- ksocknal_peer_decref(peer);
+ LASSERT(list_empty(&peer->ksnp_conns));
+ LASSERT(list_empty(&peer->ksnp_routes));
+ LASSERT(!peer->ksnp_closing);
+ peer->ksnp_closing = 1;
+ list_del(&peer->ksnp_list);
+ /* lose peerlist's ref */
+ ksocknal_peer_decref(peer);
}
int
lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
int *port, int *conn_count, int *share_count)
{
- ksock_peer_t *peer;
- cfs_list_t *ptmp;
- ksock_route_t *route;
- cfs_list_t *rtmp;
- int i;
+ ksock_peer_t *peer;
+ struct list_head *ptmp;
+ ksock_route_t *route;
+ struct list_head *rtmp;
+ int i;
int j;
- int rc = -ENOENT;
+ int rc = -ENOENT;
read_lock(&ksocknal_data.ksnd_global_lock);
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+ list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
- cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
+ if (peer->ksnp_ni != ni)
+ continue;
- if (peer->ksnp_ni != ni)
- continue;
-
- if (peer->ksnp_n_passive_ips == 0 &&
- cfs_list_empty(&peer->ksnp_routes)) {
- if (index-- > 0)
- continue;
+ if (peer->ksnp_n_passive_ips == 0 &&
+ list_empty(&peer->ksnp_routes)) {
+ if (index-- > 0)
+ continue;
*id = peer->ksnp_id;
*myip = 0;
goto out;
}
- for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
- if (index-- > 0)
- continue;
+ for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
+ if (index-- > 0)
+ continue;
*id = peer->ksnp_id;
*myip = peer->ksnp_passive_ips[j];
goto out;
}
- cfs_list_for_each (rtmp, &peer->ksnp_routes) {
- if (index-- > 0)
- continue;
-
- route = cfs_list_entry(rtmp, ksock_route_t,
- ksnr_list);
+ list_for_each(rtmp, &peer->ksnp_routes) {
+ if (index-- > 0)
+ continue;
- *id = peer->ksnp_id;
- *myip = route->ksnr_myipaddr;
- *peer_ip = route->ksnr_ipaddr;
- *port = route->ksnr_port;
- *conn_count = route->ksnr_conn_count;
- *share_count = route->ksnr_share_count;
- rc = 0;
- goto out;
- }
- }
- }
- out:
+ route = list_entry(rtmp, ksock_route_t,
+ ksnr_list);
+
+ *id = peer->ksnp_id;
+ *myip = route->ksnr_myipaddr;
+ *peer_ip = route->ksnr_ipaddr;
+ *port = route->ksnr_port;
+ *conn_count = route->ksnr_conn_count;
+ *share_count = route->ksnr_share_count;
+ rc = 0;
+ goto out;
+ }
+ }
+ }
+out:
read_unlock(&ksocknal_data.ksnd_global_lock);
- return (rc);
+ return rc;
}
void
void
ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
{
- cfs_list_t *tmp;
- ksock_conn_t *conn;
- ksock_route_t *route2;
-
- LASSERT (!peer->ksnp_closing);
- LASSERT (route->ksnr_peer == NULL);
- LASSERT (!route->ksnr_scheduled);
- LASSERT (!route->ksnr_connecting);
- LASSERT (route->ksnr_connected == 0);
-
- /* LASSERT(unique) */
- cfs_list_for_each(tmp, &peer->ksnp_routes) {
- route2 = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
-
- if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
- CERROR ("Duplicate route %s %u.%u.%u.%u\n",
- libcfs_id2str(peer->ksnp_id),
- HIPQUAD(route->ksnr_ipaddr));
- LBUG();
- }
- }
+ struct list_head *tmp;
+ ksock_conn_t *conn;
+ ksock_route_t *route2;
+
+ LASSERT(!peer->ksnp_closing);
+ LASSERT(route->ksnr_peer == NULL);
+ LASSERT(!route->ksnr_scheduled);
+ LASSERT(!route->ksnr_connecting);
+ LASSERT(route->ksnr_connected == 0);
+
+ /* LASSERT(unique) */
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+
+ if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
+ CERROR("Duplicate route %s %u.%u.%u.%u\n",
+ libcfs_id2str(peer->ksnp_id),
+ HIPQUAD(route->ksnr_ipaddr));
+ LBUG();
+ }
+ }
- route->ksnr_peer = peer;
- ksocknal_peer_addref(peer);
- /* peer's routelist takes over my ref on 'route' */
- cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+ route->ksnr_peer = peer;
+ ksocknal_peer_addref(peer);
+ /* peer's routelist takes over my ref on 'route' */
+ list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
- cfs_list_for_each(tmp, &peer->ksnp_conns) {
- conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+ list_for_each(tmp, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
- if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
- continue;
+ if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
+ continue;
- ksocknal_associate_route_conn_locked(route, conn);
- /* keep going (typed routes) */
- }
+ ksocknal_associate_route_conn_locked(route, conn);
+ /* keep going (typed routes) */
+ }
}
void
ksocknal_del_route_locked (ksock_route_t *route)
{
- ksock_peer_t *peer = route->ksnr_peer;
- ksock_interface_t *iface;
- ksock_conn_t *conn;
- cfs_list_t *ctmp;
- cfs_list_t *cnxt;
+ ksock_peer_t *peer = route->ksnr_peer;
+ ksock_interface_t *iface;
+ ksock_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
- LASSERT (!route->ksnr_deleted);
+ LASSERT(!route->ksnr_deleted);
- /* Close associated conns */
- cfs_list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
- conn = cfs_list_entry(ctmp, ksock_conn_t, ksnc_list);
+ /* Close associated conns */
+ list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
+ conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
- if (conn->ksnc_route != route)
- continue;
+ if (conn->ksnc_route != route)
+ continue;
- ksocknal_close_conn_locked (conn, 0);
- }
+ ksocknal_close_conn_locked(conn, 0);
+ }
- if (route->ksnr_myipaddr != 0) {
- iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myipaddr);
- if (iface != NULL)
- iface->ksni_nroutes--;
- }
+ if (route->ksnr_myipaddr != 0) {
+ iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
+ route->ksnr_myipaddr);
+ if (iface != NULL)
+ iface->ksni_nroutes--;
+ }
- route->ksnr_deleted = 1;
- cfs_list_del (&route->ksnr_list);
- ksocknal_route_decref(route); /* drop peer's ref */
+ route->ksnr_deleted = 1;
+ list_del(&route->ksnr_list);
+ ksocknal_route_decref(route); /* drop peer's ref */
- if (cfs_list_empty (&peer->ksnp_routes) &&
- cfs_list_empty (&peer->ksnp_conns)) {
- /* I've just removed the last route to a peer with no active
- * connections */
- ksocknal_unlink_peer_locked (peer);
- }
+ if (list_empty(&peer->ksnp_routes) &&
+ list_empty(&peer->ksnp_conns)) {
+ /* I've just removed the last route to a peer with no active
+ * connections */
+ ksocknal_unlink_peer_locked(peer);
+ }
}
int
-ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
+ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
{
- cfs_list_t *tmp;
- ksock_peer_t *peer;
- ksock_peer_t *peer2;
- ksock_route_t *route;
- ksock_route_t *route2;
- int rc;
+ struct list_head *tmp;
+ ksock_peer_t *peer;
+ ksock_peer_t *peer2;
+ ksock_route_t *route;
+ ksock_route_t *route2;
+ int rc;
if (id.nid == LNET_NID_ANY ||
id.pid == LNET_PID_ANY)
/* always called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
- peer2 = ksocknal_find_peer_locked (ni, id);
- if (peer2 != NULL) {
- ksocknal_peer_decref(peer);
- peer = peer2;
- } else {
- /* peer table takes my ref on peer */
- cfs_list_add_tail (&peer->ksnp_list,
- ksocknal_nid2peerlist (id.nid));
- }
+ peer2 = ksocknal_find_peer_locked(ni, id);
+ if (peer2 != NULL) {
+ ksocknal_peer_decref(peer);
+ peer = peer2;
+ } else {
+ /* peer table takes my ref on peer */
+ list_add_tail(&peer->ksnp_list,
+ ksocknal_nid2peerlist(id.nid));
+ }
- route2 = NULL;
- cfs_list_for_each (tmp, &peer->ksnp_routes) {
- route2 = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
+ route2 = NULL;
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route2 = list_entry(tmp, ksock_route_t, ksnr_list);
- if (route2->ksnr_ipaddr == ipaddr)
- break;
+ if (route2->ksnr_ipaddr == ipaddr)
+ break;
- route2 = NULL;
- }
- if (route2 == NULL) {
- ksocknal_add_route_locked(peer, route);
- route->ksnr_share_count++;
- } else {
- ksocknal_route_decref(route);
- route2->ksnr_share_count++;
- }
+ route2 = NULL;
+ }
+ if (route2 == NULL) {
+ ksocknal_add_route_locked(peer, route);
+ route->ksnr_share_count++;
+ } else {
+ ksocknal_route_decref(route);
+ route2->ksnr_share_count++;
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- return (0);
+ return 0;
}
void
ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
{
- ksock_conn_t *conn;
- ksock_route_t *route;
- cfs_list_t *tmp;
- cfs_list_t *nxt;
- int nshared;
+ ksock_conn_t *conn;
+ ksock_route_t *route;
+ struct list_head *tmp;
+ struct list_head *nxt;
+ int nshared;
- LASSERT (!peer->ksnp_closing);
+ LASSERT(!peer->ksnp_closing);
- /* Extra ref prevents peer disappearing until I'm done with it */
- ksocknal_peer_addref(peer);
+ /* Extra ref prevents peer disappearing until I'm done with it */
+ ksocknal_peer_addref(peer);
- cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
- route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
+ list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
- /* no match */
- if (!(ip == 0 || route->ksnr_ipaddr == ip))
- continue;
+ /* no match */
+ if (!(ip == 0 || route->ksnr_ipaddr == ip))
+ continue;
- route->ksnr_share_count = 0;
- /* This deletes associated conns too */
- ksocknal_del_route_locked (route);
- }
+ route->ksnr_share_count = 0;
+ /* This deletes associated conns too */
+ ksocknal_del_route_locked(route);
+ }
- nshared = 0;
- cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
- route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
- nshared += route->ksnr_share_count;
- }
+ nshared = 0;
+ list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
+ nshared += route->ksnr_share_count;
+ }
- if (nshared == 0) {
- /* remove everything else if there are no explicit entries
- * left */
+ if (nshared == 0) {
+ /* remove everything else if there are no explicit entries
+ * left */
- cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
- route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
+ list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
- /* we should only be removing auto-entries */
- LASSERT(route->ksnr_share_count == 0);
- ksocknal_del_route_locked (route);
- }
+ /* we should only be removing auto-entries */
+ LASSERT(route->ksnr_share_count == 0);
+ ksocknal_del_route_locked(route);
+ }
- cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_conns) {
- conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+ list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
- ksocknal_close_conn_locked(conn, 0);
- }
- }
+ ksocknal_close_conn_locked(conn, 0);
+ }
+ }
- ksocknal_peer_decref(peer);
- /* NB peer unlinks itself when last conn/route is removed */
+ ksocknal_peer_decref(peer);
+ /* NB peer unlinks itself when last conn/route is removed */
}
int
ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
{
- CFS_LIST_HEAD (zombies);
- cfs_list_t *ptmp;
- cfs_list_t *pnxt;
- ksock_peer_t *peer;
- int lo;
- int hi;
- int i;
- int rc = -ENOENT;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ ksock_peer_t *peer;
+ int lo;
+ int hi;
+ int i;
+ int rc = -ENOENT;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (id.nid != LNET_NID_ANY)
- lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- else {
- lo = 0;
- hi = ksocknal_data.ksnd_peer_hash_size - 1;
- }
+ if (id.nid != LNET_NID_ANY) {
+ hi = (int)(ksocknal_nid2peerlist(id.nid) -
+ ksocknal_data.ksnd_peers);
+ lo = hi;
+ } else {
+ lo = 0;
+ hi = ksocknal_data.ksnd_peer_hash_size - 1;
+ }
- for (i = lo; i <= hi; i++) {
- cfs_list_for_each_safe (ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
- peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
+ for (i = lo; i <= hi; i++) {
+ list_for_each_safe(ptmp, pnxt,
+ &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
- if (peer->ksnp_ni != ni)
- continue;
+ if (peer->ksnp_ni != ni)
+ continue;
- if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
- (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
- continue;
+ if (!((id.nid == LNET_NID_ANY ||
+ peer->ksnp_id.nid == id.nid) &&
+ (id.pid == LNET_PID_ANY ||
+ peer->ksnp_id.pid == id.pid)))
+ continue;
- ksocknal_peer_addref(peer); /* a ref for me... */
+ ksocknal_peer_addref(peer); /* a ref for me... */
- ksocknal_del_peer_locked (peer, ip);
+ ksocknal_del_peer_locked(peer, ip);
- if (peer->ksnp_closing &&
- !cfs_list_empty(&peer->ksnp_tx_queue)) {
- LASSERT (cfs_list_empty(&peer->ksnp_conns));
- LASSERT (cfs_list_empty(&peer->ksnp_routes));
+ if (peer->ksnp_closing &&
+ !list_empty(&peer->ksnp_tx_queue)) {
+ LASSERT(list_empty(&peer->ksnp_conns));
+ LASSERT(list_empty(&peer->ksnp_routes));
- cfs_list_splice_init(&peer->ksnp_tx_queue,
- &zombies);
- }
+ list_splice_init(&peer->ksnp_tx_queue,
+ &zombies);
+ }
- ksocknal_peer_decref(peer); /* ...till here */
+ ksocknal_peer_decref(peer); /* ...till here */
- rc = 0; /* matched! */
- }
- }
+ rc = 0; /* matched! */
+ }
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_txlist_done(ni, &zombies, 1);
+ ksocknal_txlist_done(ni, &zombies, 1);
- return (rc);
+ return rc;
}
ksock_conn_t *
ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
{
- ksock_peer_t *peer;
- cfs_list_t *ptmp;
- ksock_conn_t *conn;
- cfs_list_t *ctmp;
- int i;
+ ksock_peer_t *peer;
+ struct list_head *ptmp;
+ ksock_conn_t *conn;
+ struct list_head *ctmp;
+ int i;
read_lock(&ksocknal_data.ksnd_global_lock);
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+ list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
- LASSERT (!peer->ksnp_closing);
+ LASSERT(!peer->ksnp_closing);
- if (peer->ksnp_ni != ni)
- continue;
+ if (peer->ksnp_ni != ni)
+ continue;
- cfs_list_for_each (ctmp, &peer->ksnp_conns) {
- if (index-- > 0)
- continue;
+ list_for_each(ctmp, &peer->ksnp_conns) {
+ if (index-- > 0)
+ continue;
- conn = cfs_list_entry (ctmp, ksock_conn_t,
- ksnc_list);
- ksocknal_conn_addref(conn);
+ conn = list_entry(ctmp, ksock_conn_t,
+ ksnc_list);
+ ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data. \
- ksnd_global_lock);
- return (conn);
- }
- }
- }
+ ksnd_global_lock);
+ return conn;
+ }
+ }
+ }
read_unlock(&ksocknal_data.ksnd_global_lock);
- return (NULL);
+ return NULL;
}
ksock_sched_t *
ksocknal_create_routes(ksock_peer_t *peer, int port,
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
- ksock_route_t *newroute = NULL;
+ ksock_route_t *newroute = NULL;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- lnet_ni_t *ni = peer->ksnp_ni;
- ksock_net_t *net = ni->ni_data;
- cfs_list_t *rtmp;
- ksock_route_t *route;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
- int best_netmatch;
- int this_netmatch;
- int best_nroutes;
- int i;
- int j;
+ lnet_ni_t *ni = peer->ksnp_ni;
+ ksock_net_t *net = ni->ni_data;
+ struct list_head *rtmp;
+ ksock_route_t *route;
+ ksock_interface_t *iface;
+ ksock_interface_t *best_iface;
+ int best_netmatch;
+ int this_netmatch;
+ int best_nroutes;
+ int i;
+ int j;
/* CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
break;
}
- /* Already got a route? */
- route = NULL;
- cfs_list_for_each(rtmp, &peer->ksnp_routes) {
- route = cfs_list_entry(rtmp, ksock_route_t, ksnr_list);
+ /* Already got a route? */
+ route = NULL;
+ list_for_each(rtmp, &peer->ksnp_routes) {
+ route = list_entry(rtmp, ksock_route_t, ksnr_list);
- if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
- break;
+ if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
+ break;
- route = NULL;
- }
- if (route != NULL)
- continue;
+ route = NULL;
+ }
+ if (route != NULL)
+ continue;
- best_iface = NULL;
- best_nroutes = 0;
- best_netmatch = 0;
+ best_iface = NULL;
+ best_nroutes = 0;
+ best_netmatch = 0;
- LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
+ LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
- /* Select interface to connect from */
- for (j = 0; j < net->ksnn_ninterfaces; j++) {
- iface = &net->ksnn_interfaces[j];
+ /* Select interface to connect from */
+ for (j = 0; j < net->ksnn_ninterfaces; j++) {
+ iface = &net->ksnn_interfaces[j];
- /* Using this interface already? */
- cfs_list_for_each(rtmp, &peer->ksnp_routes) {
- route = cfs_list_entry(rtmp, ksock_route_t,
- ksnr_list);
+ /* Using this interface already? */
+ list_for_each(rtmp, &peer->ksnp_routes) {
+ route = list_entry(rtmp, ksock_route_t,
+ ksnr_list);
- if (route->ksnr_myipaddr == iface->ksni_ipaddr)
- break;
+ if (route->ksnr_myipaddr == iface->ksni_ipaddr)
+ break;
- route = NULL;
- }
- if (route != NULL)
- continue;
+ route = NULL;
+ }
+ if (route != NULL)
+ continue;
this_netmatch = (((iface->ksni_ipaddr ^
newroute->ksnr_ipaddr) &
return -ENOMEM;
}
- lnet_ni_addref(ni);
- cr->ksncr_ni = ni;
- cr->ksncr_sock = sock;
+ lnet_ni_addref(ni);
+ cr->ksncr_ni = ni;
+ cr->ksncr_sock = sock;
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
+ list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
- return 0;
+ return 0;
}
int
ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
{
- ksock_route_t *route;
-
- cfs_list_for_each_entry_typed (route, &peer->ksnp_routes,
- ksock_route_t, ksnr_list) {
+ ksock_route_t *route;
- if (route->ksnr_ipaddr == ipaddr)
- return route->ksnr_connecting;
- }
- return 0;
+ list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
+ if (route->ksnr_ipaddr == ipaddr)
+ return route->ksnr_connecting;
+ }
+ return 0;
}
int
cfs_socket_t *sock, int type)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- CFS_LIST_HEAD (zombies);
- lnet_process_id_t peerid;
- cfs_list_t *tmp;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ lnet_process_id_t peerid;
+ struct list_head *tmp;
__u64 incarnation;
ksock_conn_t *conn;
ksock_conn_t *conn2;
conn->ksnc_rx_ready = 0;
conn->ksnc_rx_scheduled = 0;
- CFS_INIT_LIST_HEAD (&conn->ksnc_tx_queue);
+ INIT_LIST_HEAD(&conn->ksnc_tx_queue);
conn->ksnc_tx_ready = 0;
conn->ksnc_tx_scheduled = 0;
conn->ksnc_tx_carrier = NULL;
/* called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
- peer2 = ksocknal_find_peer_locked(ni, peerid);
- if (peer2 == NULL) {
- /* NB this puts an "empty" peer in the peer
- * table (which takes my ref) */
- cfs_list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(peerid.nid));
- } else {
- ksocknal_peer_decref(peer);
- peer = peer2;
- }
+ peer2 = ksocknal_find_peer_locked(ni, peerid);
+ if (peer2 == NULL) {
+ /* NB this puts an "empty" peer in the peer
+ * table (which takes my ref) */
+ list_add_tail(&peer->ksnp_list,
+ ksocknal_nid2peerlist(peerid.nid));
+ } else {
+ ksocknal_peer_decref(peer);
+ peer = peer2;
+ }
/* +1 ref for me */
ksocknal_peer_addref(peer);
goto failed_2;
}
- if (peer->ksnp_proto == NULL) {
- /* Never connected before.
- * NB recv_hello may have returned EPROTO to signal my peer
- * wants a different protocol than the one I asked for.
- */
- LASSERT (cfs_list_empty(&peer->ksnp_conns));
+ if (peer->ksnp_proto == NULL) {
+ /* Never connected before.
+ * NB recv_hello may have returned EPROTO to signal my peer
+ * wants a different protocol than the one I asked for.
+ */
+ LASSERT(list_empty(&peer->ksnp_conns));
- peer->ksnp_proto = conn->ksnc_proto;
- peer->ksnp_incarnation = incarnation;
- }
+ peer->ksnp_proto = conn->ksnc_proto;
+ peer->ksnp_incarnation = incarnation;
+ }
if (peer->ksnp_proto != conn->ksnc_proto ||
peer->ksnp_incarnation != incarnation) {
goto failed_2;
}
- /* Refuse to duplicate an existing connection, unless this is a
- * loopback connection */
- if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
- cfs_list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+ /* Refuse to duplicate an existing connection, unless this is a
+ * loopback connection */
+ if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
+ list_for_each(tmp, &peer->ksnp_conns) {
+ conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
HIPQUAD(conn->ksnc_ipaddr));
}
- /* Search for a route corresponding to the new connection and
- * create an association. This allows incoming connections created
- * by routes in my peer to match my own route entries so I don't
- * continually create duplicate routes. */
- cfs_list_for_each (tmp, &peer->ksnp_routes) {
- route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
+ /* Search for a route corresponding to the new connection and
+ * create an association. This allows incoming connections created
+ * by routes in my peer to match my own route entries so I don't
+ * continually create duplicate routes. */
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
- if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
- continue;
+ if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
+ continue;
- ksocknal_associate_route_conn_locked(route, conn);
- break;
- }
+ ksocknal_associate_route_conn_locked(route, conn);
+ break;
+ }
conn->ksnc_peer = peer; /* conn takes my ref on peer */
peer->ksnp_last_alive = cfs_time_current();
conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
smp_mb(); /* order with adding to peer's conn list */
- cfs_list_add (&conn->ksnc_list, &peer->ksnp_conns);
+ list_add(&conn->ksnc_list, &peer->ksnp_conns);
ksocknal_conn_addref(conn);
ksocknal_new_packet(conn, 0);
conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
- /* Take packets blocking for this connection. */
- cfs_list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
- if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
- continue;
+ /* Take packets blocking for this connection. */
+ list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
+ if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
+ SOCKNAL_MATCH_NO)
+ continue;
- cfs_list_del (&tx->tx_list);
- ksocknal_queue_tx_locked (tx, conn);
- }
+ list_del(&tx->tx_list);
+ ksocknal_queue_tx_locked(tx, conn);
+ }
write_unlock_bh(global_lock);
ksocknal_conn_decref(conn);
return rc;
- failed_2:
- if (!peer->ksnp_closing &&
- cfs_list_empty (&peer->ksnp_conns) &&
- cfs_list_empty (&peer->ksnp_routes)) {
- cfs_list_add(&zombies, &peer->ksnp_tx_queue);
- cfs_list_del_init(&peer->ksnp_tx_queue);
- ksocknal_unlink_peer_locked(peer);
- }
+failed_2:
+ if (!peer->ksnp_closing &&
+ list_empty(&peer->ksnp_conns) &&
+ list_empty(&peer->ksnp_routes)) {
+ list_add(&zombies, &peer->ksnp_tx_queue);
+ list_del_init(&peer->ksnp_tx_queue);
+ ksocknal_unlink_peer_locked(peer);
+ }
write_unlock_bh(global_lock);
ksock_peer_t *peer = conn->ksnc_peer;
ksock_route_t *route;
ksock_conn_t *conn2;
- cfs_list_t *tmp;
+ struct list_head *tmp;
- LASSERT (peer->ksnp_error == 0);
- LASSERT (!conn->ksnc_closing);
- conn->ksnc_closing = 1;
+ LASSERT(peer->ksnp_error == 0);
+ LASSERT(!conn->ksnc_closing);
+ conn->ksnc_closing = 1;
- /* ksnd_deathrow_conns takes over peer's ref */
- cfs_list_del (&conn->ksnc_list);
+ /* ksnd_deathrow_conns takes over peer's ref */
+ list_del(&conn->ksnc_list);
- route = conn->ksnc_route;
- if (route != NULL) {
- /* dissociate conn from route... */
- LASSERT (!route->ksnr_deleted);
- LASSERT ((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
+ route = conn->ksnc_route;
+ if (route != NULL) {
+ /* dissociate conn from route... */
+ LASSERT(!route->ksnr_deleted);
+ LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
- conn2 = NULL;
- cfs_list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn2 = NULL;
+ list_for_each(tmp, &peer->ksnp_conns) {
+ conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
- if (conn2->ksnc_route == route &&
- conn2->ksnc_type == conn->ksnc_type)
- break;
+ if (conn2->ksnc_route == route &&
+ conn2->ksnc_type == conn->ksnc_type)
+ break;
- conn2 = NULL;
- }
- if (conn2 == NULL)
- route->ksnr_connected &= ~(1 << conn->ksnc_type);
+ conn2 = NULL;
+ }
+ if (conn2 == NULL)
+ route->ksnr_connected &= ~(1 << conn->ksnc_type);
- conn->ksnc_route = NULL;
+ conn->ksnc_route = NULL;
-#if 0 /* irrelevent with only eager routes */
- /* make route least favourite */
- cfs_list_del (&route->ksnr_list);
- cfs_list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
+#if 0 /* irrelevent with only eager routes */
+ /* make route least favourite */
+ list_del(&route->ksnr_list);
+ list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
#endif
- ksocknal_route_decref(route); /* drop conn's ref on route */
- }
+ ksocknal_route_decref(route); /* drop conn's ref on route */
+ }
- if (cfs_list_empty (&peer->ksnp_conns)) {
- /* No more connections to this peer */
+ if (list_empty(&peer->ksnp_conns)) {
+ /* No more connections to this peer */
- if (!cfs_list_empty(&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx;
+ if (!list_empty(&peer->ksnp_tx_queue)) {
+ ksock_tx_t *tx;
- LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
+ LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
- /* throw them to the last connection...,
- * these TXs will be send to /dev/null by scheduler */
- cfs_list_for_each_entry(tx, &peer->ksnp_tx_queue,
- tx_list)
- ksocknal_tx_prep(conn, tx);
+ /* throw them to the last connection...,
+ * these TXs will be send to /dev/null by scheduler */
+ list_for_each_entry(tx, &peer->ksnp_tx_queue,
+ tx_list)
+ ksocknal_tx_prep(conn, tx);
spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
- cfs_list_splice_init(&peer->ksnp_tx_queue,
- &conn->ksnc_tx_queue);
+ list_splice_init(&peer->ksnp_tx_queue,
+ &conn->ksnc_tx_queue);
spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
- }
+ }
- peer->ksnp_proto = NULL; /* renegotiate protocol version */
- peer->ksnp_error = error; /* stash last conn close reason */
+ /* renegotiate protocol version */
+ peer->ksnp_proto = NULL;
+ /* stash last conn close reason */
+ peer->ksnp_error = error;
- if (cfs_list_empty (&peer->ksnp_routes)) {
- /* I've just closed last conn belonging to a
- * peer with no routes to it */
- ksocknal_unlink_peer_locked (peer);
- }
- }
+ if (list_empty(&peer->ksnp_routes)) {
+ /* I've just closed last conn belonging to a
+ * peer with no routes to it */
+ ksocknal_unlink_peer_locked(peer);
+ }
+ }
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- cfs_list_add_tail(&conn->ksnc_list,
- &ksocknal_data.ksnd_deathrow_conns);
+ list_add_tail(&conn->ksnc_list,
+ &ksocknal_data.ksnd_deathrow_conns);
wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
read_lock(&ksocknal_data.ksnd_global_lock);
- if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
- cfs_list_empty(&peer->ksnp_conns) &&
- peer->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer) == NULL) {
- notify = 1;
- last_alive = peer->ksnp_last_alive;
- }
+ if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
+ list_empty(&peer->ksnp_conns) &&
+ peer->ksnp_accepting == 0 &&
+ ksocknal_find_connecting_route_locked(peer) == NULL) {
+ notify = 1;
+ last_alive = peer->ksnp_last_alive;
+ }
read_unlock(&ksocknal_data.ksnd_global_lock);
- if (notify)
- lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
- last_alive);
+ if (notify)
+ lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
+ last_alive);
}
void
ksocknal_finalize_zcreq(ksock_conn_t *conn)
{
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *tmp;
- CFS_LIST_HEAD (zlist);
+ ksock_peer_t *peer = conn->ksnc_peer;
+ ksock_tx_t *tx;
+ ksock_tx_t *tmp;
+ struct list_head zlist = LIST_HEAD_INIT(zlist);
- /* NB safe to finalize TXs because closing of socket will
- * abort all buffered data */
- LASSERT (conn->ksnc_sock == NULL);
+ /* NB safe to finalize TXs because closing of socket will
+ * abort all buffered data */
+ LASSERT(conn->ksnc_sock == NULL);
spin_lock(&peer->ksnp_lock);
- cfs_list_for_each_entry_safe_typed(tx, tmp, &peer->ksnp_zc_req_list,
- ksock_tx_t, tx_zc_list) {
- if (tx->tx_conn != conn)
- continue;
+ list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
+ if (tx->tx_conn != conn)
+ continue;
- LASSERT (tx->tx_msg.ksm_zc_cookies[0] != 0);
+ LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- tx->tx_zc_aborted = 1; /* mark it as not-acked */
- cfs_list_del(&tx->tx_zc_list);
- cfs_list_add(&tx->tx_zc_list, &zlist);
- }
+ tx->tx_msg.ksm_zc_cookies[0] = 0;
+ tx->tx_zc_aborted = 1; /* mark it as not-acked */
+ list_del(&tx->tx_zc_list);
+ list_add(&tx->tx_zc_list, &zlist);
+ }
spin_unlock(&peer->ksnp_lock);
- while (!cfs_list_empty(&zlist)) {
- tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ while (!list_empty(&zlist)) {
+ tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
- cfs_list_del(&tx->tx_zc_list);
- ksocknal_tx_decref(tx);
- }
+ list_del(&tx->tx_zc_list);
+ ksocknal_tx_decref(tx);
+ }
}
void
-ksocknal_terminate_conn (ksock_conn_t *conn)
+ksocknal_terminate_conn(ksock_conn_t *conn)
{
/* This gets called by the reaper (guaranteed thread context) to
* disengage the socket from its callbacks and close it.
conn->ksnc_tx_ready = 1;
if (!conn->ksnc_tx_scheduled &&
- !cfs_list_empty(&conn->ksnc_tx_queue)){
- cfs_list_add_tail (&conn->ksnc_tx_list,
+ !list_empty(&conn->ksnc_tx_queue)) {
+ list_add_tail(&conn->ksnc_tx_list,
&sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
if (peer->ksnp_error != 0) {
/* peer's last conn closed in error */
- LASSERT (cfs_list_empty (&peer->ksnp_conns));
+ LASSERT(list_empty(&peer->ksnp_conns));
failed = 1;
peer->ksnp_error = 0; /* avoid multiple notifications */
}
LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
+ list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
LASSERT (conn->ksnc_route == NULL);
LASSERT (!conn->ksnc_tx_scheduled);
LASSERT (!conn->ksnc_rx_scheduled);
- LASSERT (cfs_list_empty(&conn->ksnc_tx_queue));
+ LASSERT(list_empty(&conn->ksnc_tx_queue));
/* complete current receive if any */
switch (conn->ksnc_rx_state) {
ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
{
ksock_conn_t *conn;
- cfs_list_t *ctmp;
- cfs_list_t *cnxt;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
int count = 0;
- cfs_list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
- conn = cfs_list_entry (ctmp, ksock_conn_t, ksnc_list);
+ list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
+ conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
if (ipaddr == 0 ||
conn->ksnc_ipaddr == ipaddr) {
ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
{
ksock_peer_t *peer;
- cfs_list_t *ptmp;
- cfs_list_t *pnxt;
+ struct list_head *ptmp;
+ struct list_head *pnxt;
int lo;
int hi;
int i;
}
for (i = lo; i <= hi; i++) {
- cfs_list_for_each_safe (ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
+ list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
- peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
(id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL) {
- cfs_list_t *tmp;
+ struct list_head *tmp;
ksock_conn_t *conn;
int bufnob;
- cfs_list_for_each (tmp, &peer->ksnp_conns) {
- conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+ list_for_each(tmp, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
if (bufnob < conn->ksnc_tx_bufnob) {
{
int index;
int i;
- cfs_list_t *tmp;
+ struct list_head *tmp;
ksock_conn_t *conn;
for (index = 0; ; index++) {
i = 0;
conn = NULL;
- cfs_list_for_each (tmp, &peer->ksnp_conns) {
+ list_for_each(tmp, &peer->ksnp_conns) {
if (i++ == index) {
- conn = cfs_list_entry (tmp, ksock_conn_t,
+ conn = list_entry(tmp, ksock_conn_t,
ksnc_list);
ksocknal_conn_addref(conn);
break;
ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
{
ksock_peer_t *peer;
- cfs_list_t *tmp;
+ struct list_head *tmp;
int index;
int i;
int j;
index = 0;
peer = NULL;
- cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
- peer = cfs_list_entry(tmp, ksock_peer_t,
+ list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry(tmp, ksock_peer_t,
ksnp_list);
if (!((id.nid == LNET_NID_ANY ||
int rc;
int i;
int j;
- cfs_list_t *ptmp;
+ struct list_head *ptmp;
ksock_peer_t *peer;
- cfs_list_t *rtmp;
+ struct list_head *rtmp;
ksock_route_t *route;
if (ipaddress == 0 ||
iface->ksni_npeers = 0;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- cfs_list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = cfs_list_entry(ptmp, ksock_peer_t,
+ list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry(ptmp, ksock_peer_t,
ksnp_list);
for (j = 0; j < peer->ksnp_n_passive_ips; j++)
if (peer->ksnp_passive_ips[j] == ipaddress)
iface->ksni_npeers++;
- cfs_list_for_each(rtmp, &peer->ksnp_routes) {
- route = cfs_list_entry(rtmp,
+ list_for_each(rtmp, &peer->ksnp_routes) {
+ route = list_entry(rtmp,
ksock_route_t,
ksnr_list);
void
ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
{
- cfs_list_t *tmp;
- cfs_list_t *nxt;
+ struct list_head *tmp;
+ struct list_head *nxt;
ksock_route_t *route;
ksock_conn_t *conn;
int i;
break;
}
- cfs_list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
+ list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
if (route->ksnr_myipaddr != ipaddr)
continue;
}
}
- cfs_list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+ list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
if (conn->ksnc_myipaddr == ipaddr)
ksocknal_close_conn_locked (conn, 0);
{
ksock_net_t *net = ni->ni_data;
int rc = -ENOENT;
- cfs_list_t *tmp;
- cfs_list_t *nxt;
+ struct list_head *tmp;
+ struct list_head *nxt;
ksock_peer_t *peer;
__u32 this_ip;
int i;
net->ksnn_ninterfaces--;
for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
- cfs_list_for_each_safe(tmp, nxt,
+ list_for_each_safe(tmp, nxt,
&ksocknal_data.ksnd_peers[j]) {
- peer = cfs_list_entry(tmp, ksock_peer_t,
+ peer = list_entry(tmp, ksock_peer_t,
ksnp_list);
if (peer->ksnp_ni != ni)
}
LIBCFS_FREE (ksocknal_data.ksnd_peers,
- sizeof (cfs_list_t) *
+ sizeof(struct list_head) *
ksocknal_data.ksnd_peer_hash_size);
spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- cfs_list_t zlist;
+ if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ struct list_head zlist;
ksock_tx_t *tx;
- cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
- cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
+ list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
+ list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
- while (!cfs_list_empty(&zlist)) {
- tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
+ while (!list_empty(&zlist)) {
+ tx = list_entry(zlist.next, ksock_tx_t, tx_list);
+ list_del(&tx->tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
} else {
case SOCKNAL_INIT_DATA:
LASSERT (ksocknal_data.ksnd_peers != NULL);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- LASSERT (cfs_list_empty (&ksocknal_data.ksnd_peers[i]));
+ LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
}
- LASSERT(cfs_list_empty(&ksocknal_data.ksnd_nets));
- LASSERT (cfs_list_empty (&ksocknal_data.ksnd_enomem_conns));
- LASSERT (cfs_list_empty (&ksocknal_data.ksnd_zombie_conns));
- LASSERT (cfs_list_empty (&ksocknal_data.ksnd_connd_connreqs));
- LASSERT (cfs_list_empty (&ksocknal_data.ksnd_connd_routes));
+ LASSERT(list_empty(&ksocknal_data.ksnd_nets));
+ LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
+ LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
+ LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
+ LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
if (ksocknal_data.ksnd_sched_info != NULL) {
cfs_percpt_for_each(info, i,
for (j = 0; j < info->ksi_nthreads_max; j++) {
sched = &info->ksi_scheds[j];
- LASSERT(cfs_list_empty(&sched->\
+ LASSERT(list_empty(&sched->\
kss_tx_conns));
- LASSERT(cfs_list_empty(&sched->\
+ LASSERT(list_empty(&sched->\
kss_rx_conns));
- LASSERT(cfs_list_empty(&sched-> \
+ LASSERT(list_empty(&sched-> \
kss_zombie_noop_txs));
LASSERT(sched->kss_nconns == 0);
}
memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
- LIBCFS_ALLOC (ksocknal_data.ksnd_peers,
- sizeof (cfs_list_t) *
- ksocknal_data.ksnd_peer_hash_size);
+ LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
+ sizeof(struct list_head) *
+ ksocknal_data.ksnd_peer_hash_size);
if (ksocknal_data.ksnd_peers == NULL)
return -ENOMEM;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
- CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
rwlock_init(&ksocknal_data.ksnd_global_lock);
- CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
spin_lock_init(&ksocknal_data.ksnd_connd_lock);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
spin_lock_init(&ksocknal_data.ksnd_tx_lock);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
/* NB memset above zeros whole of ksocknal_data */
sched->kss_info = info;
spin_lock_init(&sched->kss_lock);
- CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
- CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
- CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
+ INIT_LIST_HEAD(&sched->kss_rx_conns);
+ INIT_LIST_HEAD(&sched->kss_tx_conns);
+ INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
init_waitqueue_head(&sched->kss_waitq);
}
}
ksocknal_debug_peerhash (lnet_ni_t *ni)
{
ksock_peer_t *peer = NULL;
- cfs_list_t *tmp;
+ struct list_head *tmp;
int i;
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
- peer = cfs_list_entry (tmp, ksock_peer_t, ksnp_list);
+ list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry(tmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni == ni) break;
peer->ksnp_sharecount, peer->ksnp_closing,
peer->ksnp_accepting, peer->ksnp_error,
peer->ksnp_zc_next_cookie,
- !cfs_list_empty(&peer->ksnp_tx_queue),
- !cfs_list_empty(&peer->ksnp_zc_req_list));
+ !list_empty(&peer->ksnp_tx_queue),
+ !list_empty(&peer->ksnp_zc_req_list));
- cfs_list_for_each (tmp, &peer->ksnp_routes) {
- route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
"del %d\n", atomic_read(&route->ksnr_refcount),
route->ksnr_scheduled, route->ksnr_connecting,
route->ksnr_connected, route->ksnr_deleted);
}
- cfs_list_for_each (tmp, &peer->ksnp_conns) {
- conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+ list_for_each(tmp, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
atomic_read(&conn->ksnc_conn_refcount),
atomic_read(&conn->ksnc_sock_refcount),
LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
}
- cfs_list_del(&net->ksnn_list);
+ list_del(&net->ksnn_list);
LIBCFS_FREE(net, sizeof(*net));
ksocknal_data.ksnd_nnets--;
if (colon != NULL) /* ignore alias device */
*colon = 0;
- cfs_list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
+ list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
ksnn_list) {
for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
char *ifnam2 = &tmp->ksnn_interfaces[j].\
ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
net->ksnn_interfaces[0].ksni_ipaddr);
- cfs_list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
+ list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
ksocknal_data.ksnd_nnets++;
typedef struct /* per scheduler state */
{
spinlock_t kss_lock; /* serialise */
- cfs_list_t kss_rx_conns; /* conn waiting to be read */
+ struct list_head kss_rx_conns; /* conn waiting to be read */
/* conn waiting to be written */
- cfs_list_t kss_tx_conns;
+ struct list_head kss_tx_conns;
/* zombie noop tx list */
- cfs_list_t kss_zombie_noop_txs;
+ struct list_head kss_zombie_noop_txs;
wait_queue_head_t kss_waitq; /* where scheduler sleeps */
/* # connections assigned to this scheduler */
int kss_nconns;
{
__u64 ksnn_incarnation; /* my epoch */
spinlock_t ksnn_lock; /* serialise */
- cfs_list_t ksnn_list; /* chain on global list */
+ struct list_head ksnn_list; /* chain on global list */
int ksnn_npeers; /* # peers */
int ksnn_shutdown; /* shutting down? */
int ksnn_ninterfaces; /* IP interfaces */
{
int ksnd_init; /* initialisation state */
int ksnd_nnets; /* # networks set up */
- cfs_list_t ksnd_nets; /* list of nets */
+ struct list_head ksnd_nets; /* list of nets */
/* stabilize peer/conn ops */
rwlock_t ksnd_global_lock;
/* hash table of all my known peers */
- cfs_list_t *ksnd_peers;
+ struct list_head *ksnd_peers;
int ksnd_peer_hash_size; /* size of ksnd_peers */
int ksnd_nthreads; /* # live threads */
atomic_t ksnd_nactive_txs; /* #active txs */
- cfs_list_t ksnd_deathrow_conns; /* conns to close: reaper_lock*/
- cfs_list_t ksnd_zombie_conns; /* conns to free: reaper_lock */
- cfs_list_t ksnd_enomem_conns; /* conns to retry: reaper_lock*/
- wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
- cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
- spinlock_t ksnd_reaper_lock; /* serialise */
+ /* conns to close: reaper_lock*/
+ struct list_head ksnd_deathrow_conns;
+ /* conns to free: reaper_lock */
+ struct list_head ksnd_zombie_conns;
+ /* conns to retry: reaper_lock*/
+ struct list_head ksnd_enomem_conns;
+ /* reaper sleeps here */
+ wait_queue_head_t ksnd_reaper_waitq;
+ /* when reaper will wake */
+ cfs_time_t ksnd_reaper_waketime;
+ /* serialise */
+ spinlock_t ksnd_reaper_lock;
int ksnd_enomem_tx; /* test ENOMEM sender */
int ksnd_stall_tx; /* test sluggish sender */
int ksnd_stall_rx; /* test sluggish receiver */
- cfs_list_t ksnd_connd_connreqs; /* incoming connection requests */
- cfs_list_t ksnd_connd_routes; /* routes waiting to be connected */
- wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
- int ksnd_connd_connecting;/* # connds connecting */
+ /* incoming connection requests */
+ struct list_head ksnd_connd_connreqs;
+ /* routes waiting to be connected */
+ struct list_head ksnd_connd_routes;
+ /* connds sleep here */
+ wait_queue_head_t ksnd_connd_waitq;
+ /* # connds connecting */
+ int ksnd_connd_connecting;
/** time stamp of the last failed connecting attempt */
- long ksnd_connd_failed_stamp;
+ long ksnd_connd_failed_stamp;
/** # starting connd */
- unsigned ksnd_connd_starting;
+ unsigned ksnd_connd_starting;
/** time stamp of the last starting connd */
- long ksnd_connd_starting_stamp;
+ long ksnd_connd_starting_stamp;
/** # running connd */
- unsigned ksnd_connd_running;
- spinlock_t ksnd_connd_lock; /* serialise */
+ unsigned ksnd_connd_running;
+ /* serialise */
+ spinlock_t ksnd_connd_lock;
- cfs_list_t ksnd_idle_noop_txs; /* list head for freed noop tx */
- spinlock_t ksnd_tx_lock; /* serialise, g_lock unsafe */
+ /* list head for freed noop tx */
+ struct list_head ksnd_idle_noop_txs;
+ /* serialise, g_lock unsafe */
+ spinlock_t ksnd_tx_lock;
} ksock_nal_data_t;
typedef struct /* transmit packet */
{
- cfs_list_t tx_list; /* queue on conn for transmission etc */
- cfs_list_t tx_zc_list; /* queue on peer for ZC request */
+ struct list_head tx_list; /* queue on conn for transmission etc */
+ struct list_head tx_zc_list; /* queue on peer for ZC request */
atomic_t tx_refcount; /* tx reference count */
int tx_nob; /* # packet bytes */
int tx_resid; /* residual bytes */
unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
unsigned short tx_nonblk:1; /* it's a non-blocking ACK */
lnet_kiov_t *tx_kiov; /* packet page frags */
- struct ksock_conn *tx_conn; /* owning conn */
+ struct ksock_conn *tx_conn; /* owning conn */
lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
cfs_time_t tx_deadline; /* when (in jiffies) tx times out */
ksock_msg_t tx_msg; /* socklnd message buffer */
typedef struct ksock_conn
{
- struct ksock_peer *ksnc_peer; /* owning peer */
- struct ksock_route *ksnc_route; /* owning route */
- cfs_list_t ksnc_list; /* stash on peer's conn list */
- cfs_socket_t *ksnc_sock; /* actual socket */
+ struct ksock_peer *ksnc_peer; /* owning peer */
+ struct ksock_route *ksnc_route; /* owning route */
+ struct list_head ksnc_list; /* stash on peer's conn list */
+ cfs_socket_t *ksnc_sock; /* actual socket */
void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
void *ksnc_saved_write_space; /* socket's original write_space() callback */
atomic_t ksnc_conn_refcount; /* conn refcount */
unsigned int ksnc_zc_capable:1; /* enable to ZC */
struct ksock_proto *ksnc_proto; /* protocol for the connection */
- /* reader */
- cfs_list_t ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */
+ /* READER */
+
+ /* where I enq waiting input or a forwarding descriptor */
+ struct list_head ksnc_rx_list;
cfs_time_t ksnc_rx_deadline; /* when (in jiffies) receive times out */
__u8 ksnc_rx_started; /* started receiving a message */
__u8 ksnc_rx_ready; /* data ready to read */
* lnet_hdr_t, it's stored in
* ksnc_msg.ksm_u.lnetmsg */
- /* WRITER */
- cfs_list_t ksnc_tx_list; /* where I enq waiting for output space */
- cfs_list_t ksnc_tx_queue; /* packets waiting to be sent */
- ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */
- cfs_time_t ksnc_tx_deadline; /* when (in jiffies) tx times out */
- int ksnc_tx_bufnob; /* send buffer marker */
- atomic_t ksnc_tx_nob; /* # bytes queued */
- int ksnc_tx_ready; /* write space */
- int ksnc_tx_scheduled; /* being progressed */
- cfs_time_t ksnc_tx_last_post; /* time stamp of the last posted TX */
+ /* -- WRITER -- */
+ /* where I enq waiting for output space */
+ struct list_head ksnc_tx_list;
+ /* packets waiting to be sent */
+ struct list_head ksnc_tx_queue;
+ /* next TX that can carry a LNet message or ZC-ACK */
+ ksock_tx_t *ksnc_tx_carrier;
+ /* when (in jiffies) tx times out */
+ cfs_time_t ksnc_tx_deadline;
+ /* send buffer marker */
+ int ksnc_tx_bufnob;
+ /* # bytes queued */
+ atomic_t ksnc_tx_nob;
+ /* write space */
+ int ksnc_tx_ready;
+ /* being progressed */
+ int ksnc_tx_scheduled;
+ /* time stamp of the last posted TX */
+ cfs_time_t ksnc_tx_last_post;
} ksock_conn_t;
typedef struct ksock_route
{
- cfs_list_t ksnr_list; /* chain on peer route list */
- cfs_list_t ksnr_connd_list; /* chain on ksnr_connd_routes */
- struct ksock_peer *ksnr_peer; /* owning peer */
- atomic_t ksnr_refcount; /* # users */
+ struct list_head ksnr_list; /* chain on peer route list */
+ struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
+ struct ksock_peer *ksnr_peer; /* owning peer */
+ atomic_t ksnr_refcount; /* # users */
cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
cfs_duration_t ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
typedef struct ksock_peer
{
- cfs_list_t ksnp_list; /* stash on global peer list */
+ struct list_head ksnp_list; /* stash on global peer list */
cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
lnet_process_id_t ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
__u64 ksnp_zc_next_cookie;/* ZC completion cookie */
__u64 ksnp_incarnation; /* latest known peer incarnation */
struct ksock_proto *ksnp_proto; /* latest known peer protocol */
- cfs_list_t ksnp_conns; /* all active connections */
- cfs_list_t ksnp_routes; /* routes */
- cfs_list_t ksnp_tx_queue; /* waiting packets */
+ struct list_head ksnp_conns; /* all active connections */
+ struct list_head ksnp_routes; /* routes */
+ struct list_head ksnp_tx_queue; /* waiting packets */
spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
- cfs_list_t ksnp_zc_req_list; /* zero copy requests wait for ACK */
+ /* zero copy requests wait for ACK */
+ struct list_head ksnp_zc_req_list;
cfs_time_t ksnp_send_keepalive; /* time to send keepalive */
lnet_ni_t *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
typedef struct ksock_connreq
{
- cfs_list_t ksncr_list; /* stash on ksnd_connd_connreqs */
- lnet_ni_t *ksncr_ni; /* chosen NI */
- cfs_socket_t *ksncr_sock; /* accepted socket */
+ /* stash on ksnd_connd_connreqs */
+ struct list_head ksncr_list;
+ /* chosen NI */
+ lnet_ni_t *ksncr_ni;
+ /* accepted socket */
+ cfs_socket_t *ksncr_sock;
} ksock_connreq_t;
extern ksock_nal_data_t ksocknal_data;
(1 << SOCKLND_CONN_BULK_OUT));
}
-static inline cfs_list_t *
+static inline struct list_head *
ksocknal_nid2peerlist (lnet_nid_t nid)
{
unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
extern int ksocknal_close_peer_conns_locked (ksock_peer_t *peer,
__u32 ipaddr, int why);
extern int ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why);
-extern int ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr);
+extern int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
ksock_tx_t *tx, int nonblk);
extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
extern void ksocknal_next_tx_carrier(ksock_conn_t *conn);
extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn);
-extern void ksocknal_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist,
- int error);
+extern void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
+ int error);
extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
/* searching for a noop tx in free list */
spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = cfs_list_entry(ksocknal_data.ksnd_idle_noop_txs. \
+ if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
next, ksock_tx_t, tx_list);
LASSERT(tx->tx_desc_size == size);
- cfs_list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
}
spin_unlock(&ksocknal_data.ksnd_tx_lock);
/* it's a noop tx */
spin_lock(&ksocknal_data.ksnd_tx_lock);
- cfs_list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
+ list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
} else {
}
void
-ksocknal_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int error)
+ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
{
ksock_tx_t *tx;
- while (!cfs_list_empty (txlist)) {
- tx = cfs_list_entry (txlist->next, ksock_tx_t, tx_list);
+ while (!list_empty(txlist)) {
+ tx = list_entry(txlist->next, ksock_tx_t, tx_list);
if (error && tx->tx_lnetmsg != NULL) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
CNETERR("Deleting noop packet\n");
}
- cfs_list_del (&tx->tx_list);
+ list_del(&tx->tx_list);
LASSERT (atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done (ni, tx);
if (peer->ksnp_zc_next_cookie == 0)
peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
- cfs_list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
+ list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
spin_unlock(&peer->ksnp_lock);
}
}
tx->tx_msg.ksm_zc_cookies[0] = 0;
- cfs_list_del(&tx->tx_zc_list);
+ list_del(&tx->tx_zc_list);
spin_unlock(&peer->ksnp_lock);
/* enomem list takes over scheduler's ref... */
LASSERT (conn->ksnc_tx_scheduled);
- cfs_list_add_tail(&conn->ksnc_tx_list,
+ list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns);
if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
SOCKNAL_ENOMEM_RETRY),
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- cfs_list_add_tail(&route->ksnr_connd_list,
+ list_add_tail(&route->ksnr_connd_list,
&ksocknal_data.ksnd_connd_routes);
wake_up(&ksocknal_data.ksnd_connd_waitq);
ksock_conn_t *
ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
ksock_conn_t *conn;
ksock_conn_t *typed = NULL;
ksock_conn_t *fallback = NULL;
int tnob = 0;
int fnob = 0;
- cfs_list_for_each (tmp, &peer->ksnp_conns) {
- ksock_conn_t *c = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+ list_for_each(tmp, &peer->ksnp_conns) {
+ ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
int nob = atomic_read(&c->ksnc_tx_nob) +
libcfs_sock_wmem_queued(c->ksnc_sock);
int rc;
bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
spin_lock_bh(&sched->kss_lock);
- if (cfs_list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+ if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
if (ztx != NULL) {
atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
- cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
+ list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
if (conn->ksnc_tx_ready && /* able to send */
!conn->ksnc_tx_scheduled) { /* not scheduled to send */
/* +1 ref for scheduler */
ksocknal_conn_addref(conn);
- cfs_list_add_tail (&conn->ksnc_tx_list,
+ list_add_tail(&conn->ksnc_tx_list,
&sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
wake_up(&sched->kss_waitq);
ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
{
cfs_time_t now = cfs_time_current();
- cfs_list_t *tmp;
+ struct list_head *tmp;
ksock_route_t *route;
- cfs_list_for_each (tmp, &peer->ksnp_routes) {
- route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
ksock_route_t *
ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
{
- cfs_list_t *tmp;
+ struct list_head *tmp;
ksock_route_t *route;
- cfs_list_for_each (tmp, &peer->ksnp_routes) {
- route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
/* Queue the message until a connection is established */
- cfs_list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+ list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
write_unlock_bh(g_lock);
return 0;
}
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_PARSE_WAIT:
- cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+ list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
wake_up(&sched->kss_waitq);
LASSERT(conn->ksnc_rx_ready);
break;
spin_lock_bh(&sched->kss_lock);
rc = (!ksocknal_data.ksnd_shuttingdown &&
- cfs_list_empty(&sched->kss_rx_conns) &&
- cfs_list_empty(&sched->kss_tx_conns));
+ list_empty(&sched->kss_rx_conns) &&
+ list_empty(&sched->kss_tx_conns));
spin_unlock_bh(&sched->kss_lock);
return rc;
/* Ensure I progress everything semi-fairly */
- if (!cfs_list_empty (&sched->kss_rx_conns)) {
- conn = cfs_list_entry(sched->kss_rx_conns.next,
+ if (!list_empty(&sched->kss_rx_conns)) {
+ conn = list_entry(sched->kss_rx_conns.next,
ksock_conn_t, ksnc_rx_list);
- cfs_list_del(&conn->ksnc_rx_list);
+ list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
LASSERT(conn->ksnc_rx_ready);
conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
} else if (conn->ksnc_rx_ready) {
/* reschedule for rx */
- cfs_list_add_tail (&conn->ksnc_rx_list,
+ list_add_tail(&conn->ksnc_rx_list,
&sched->kss_rx_conns);
} else {
conn->ksnc_rx_scheduled = 0;
did_something = 1;
}
- if (!cfs_list_empty (&sched->kss_tx_conns)) {
- CFS_LIST_HEAD (zlist);
+ if (!list_empty(&sched->kss_tx_conns)) {
+ struct list_head zlist = LIST_HEAD_INIT(zlist);
- if (!cfs_list_empty(&sched->kss_zombie_noop_txs)) {
- cfs_list_add(&zlist,
+ if (!list_empty(&sched->kss_zombie_noop_txs)) {
+ list_add(&zlist,
&sched->kss_zombie_noop_txs);
- cfs_list_del_init(&sched->kss_zombie_noop_txs);
+ list_del_init(&sched->kss_zombie_noop_txs);
}
- conn = cfs_list_entry(sched->kss_tx_conns.next,
+ conn = list_entry(sched->kss_tx_conns.next,
ksock_conn_t, ksnc_tx_list);
- cfs_list_del (&conn->ksnc_tx_list);
+ list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(conn->ksnc_tx_ready);
- LASSERT(!cfs_list_empty(&conn->ksnc_tx_queue));
+ LASSERT(!list_empty(&conn->ksnc_tx_queue));
- tx = cfs_list_entry(conn->ksnc_tx_queue.next,
+ tx = list_entry(conn->ksnc_tx_queue.next,
ksock_tx_t, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
/* dequeue now so empty list => more to send */
- cfs_list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
/* Clear tx_ready in case send isn't complete. Do
* it BEFORE we call process_transmit, since
conn->ksnc_tx_ready = 0;
spin_unlock_bh(&sched->kss_lock);
- if (!cfs_list_empty(&zlist)) {
+ if (!list_empty(&zlist)) {
/* free zombie noop txs, it's fast because
* noop txs are just put in freelist */
ksocknal_txlist_done(NULL, &zlist, 0);
if (rc == -ENOMEM || rc == -EAGAIN) {
/* Incomplete send: replace tx on HEAD of tx_queue */
spin_lock_bh(&sched->kss_lock);
- cfs_list_add(&tx->tx_list,
+ list_add(&tx->tx_list,
&conn->ksnc_tx_queue);
} else {
/* Complete send; tx -ref */
/* Do nothing; after a short timeout, this
* conn will be reposted on kss_tx_conns. */
} else if (conn->ksnc_tx_ready &&
- !cfs_list_empty (&conn->ksnc_tx_queue)) {
+ !list_empty(&conn->ksnc_tx_queue)) {
/* reschedule for tx */
- cfs_list_add_tail (&conn->ksnc_tx_list,
+ list_add_tail(&conn->ksnc_tx_list,
&sched->kss_tx_conns);
} else {
conn->ksnc_tx_scheduled = 0;
conn->ksnc_rx_ready = 1;
if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- cfs_list_add_tail(&conn->ksnc_rx_list,
+ list_add_tail(&conn->ksnc_rx_list,
&sched->kss_rx_conns);
conn->ksnc_rx_scheduled = 1;
/* extra ref for scheduler */
conn->ksnc_tx_ready = 1;
- if (!conn->ksnc_tx_scheduled && // not being progressed
- !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
- cfs_list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
+ if (!conn->ksnc_tx_scheduled && /* not being progressed */
+ !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
+ list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
int
ksocknal_connect (ksock_route_t *route)
{
- CFS_LIST_HEAD (zombies);
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
ksock_peer_t *peer = route->ksnr_peer;
int type;
int wanted;
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
- if (!cfs_list_empty(&peer->ksnp_tx_queue) &&
+ if (!list_empty(&peer->ksnp_tx_queue) &&
peer->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer) == NULL) {
ksock_conn_t *conn;
/* ksnp_tx_queue is queued on a conn on successful
* connection for V1.x and V2.x */
- if (!cfs_list_empty (&peer->ksnp_conns)) {
- conn = cfs_list_entry(peer->ksnp_conns.next,
+ if (!list_empty(&peer->ksnp_conns)) {
+ conn = list_entry(peer->ksnp_conns.next,
ksock_conn_t, ksnc_list);
LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
}
/* take all the blocked packets while I've got the lock and
* complete below... */
- cfs_list_splice_init(&peer->ksnp_tx_queue, &zombies);
+ list_splice_init(&peer->ksnp_tx_queue, &zombies);
}
#if 0 /* irrelevent with only eager routes */
if (!route->ksnr_deleted) {
/* make this route least-favourite for re-selection */
- cfs_list_del(&route->ksnr_list);
- cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+ list_del(&route->ksnr_list);
+ list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
}
#endif
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
now = cfs_time_current();
/* connd_routes can contain both pending and ordinary routes */
- cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+ list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
ksnr_connd_list) {
if (route->ksnr_retry_interval == 0 ||
dropped_lock = 1;
}
- if (!cfs_list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
+ if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
/* Connection accepted by the listener */
- cr = cfs_list_entry(ksocknal_data.ksnd_connd_connreqs. \
+ cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
next, ksock_connreq_t, ksncr_list);
- cfs_list_del(&cr->ksncr_list);
+ list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
dropped_lock = 1;
route = ksocknal_connd_get_route_locked(&timeout);
}
if (route != NULL) {
- cfs_list_del (&route->ksnr_connd_list);
+ list_del(&route->ksnr_connd_list);
ksocknal_data.ksnd_connd_connecting++;
spin_unlock_bh(connd_lock);
dropped_lock = 1;
{
/* We're called with a shared lock on ksnd_global_lock */
ksock_conn_t *conn;
- cfs_list_t *ctmp;
+ struct list_head *ctmp;
- cfs_list_for_each (ctmp, &peer->ksnp_conns) {
+ list_for_each(ctmp, &peer->ksnp_conns) {
int error;
- conn = cfs_list_entry (ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
return (conn);
}
- if ((!cfs_list_empty(&conn->ksnc_tx_queue) ||
+ if ((!list_empty(&conn->ksnc_tx_queue) ||
libcfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
cfs_time_aftereq(cfs_time_current(),
conn->ksnc_tx_deadline)) {
ksocknal_flush_stale_txs(ksock_peer_t *peer)
{
ksock_tx_t *tx;
- CFS_LIST_HEAD (stale_txs);
+ struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- while (!cfs_list_empty (&peer->ksnp_tx_queue)) {
- tx = cfs_list_entry (peer->ksnp_tx_queue.next,
+ while (!list_empty(&peer->ksnp_tx_queue)) {
+ tx = list_entry(peer->ksnp_tx_queue.next,
ksock_tx_t, tx_list);
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
- cfs_list_del (&tx->tx_list);
- cfs_list_add_tail (&tx->tx_list, &stale_txs);
+ list_del(&tx->tx_list);
+ list_add_tail(&tx->tx_list, &stale_txs);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksock_conn_t *conn;
ksock_tx_t *tx;
- if (cfs_list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+ /* last_alive will be updated by create_conn */
+ if (list_empty(&peer->ksnp_conns))
return 0;
if (peer->ksnp_proto != &ksocknal_protocol_v3x)
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
- if (!cfs_list_empty(&conn->ksnc_tx_queue)) {
+ if (!list_empty(&conn->ksnc_tx_queue)) {
spin_unlock_bh(&sched->kss_lock);
/* there is an queued ACK, don't need keepalive */
return 0;
void
ksocknal_check_peer_timeouts (int idx)
{
- cfs_list_t *peers = &ksocknal_data.ksnd_peers[idx];
+ struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
ksock_peer_t *peer;
ksock_conn_t *conn;
ksock_tx_t *tx;
* take a look... */
read_lock(&ksocknal_data.ksnd_global_lock);
- cfs_list_for_each_entry_typed(peer, peers, ksock_peer_t, ksnp_list) {
+ list_for_each_entry(peer, peers, ksnp_list) {
cfs_time_t deadline = 0;
int resid = 0;
int n = 0;
/* we can't process stale txs right here because we're
* holding only shared lock */
- if (!cfs_list_empty (&peer->ksnp_tx_queue)) {
+ if (!list_empty(&peer->ksnp_tx_queue)) {
ksock_tx_t *tx =
- cfs_list_entry (peer->ksnp_tx_queue.next,
+ list_entry(peer->ksnp_tx_queue.next,
ksock_tx_t, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
}
}
- if (cfs_list_empty(&peer->ksnp_zc_req_list))
+ if (list_empty(&peer->ksnp_zc_req_list))
continue;
spin_lock(&peer->ksnp_lock);
- cfs_list_for_each_entry_typed(tx, &peer->ksnp_zc_req_list,
- ksock_tx_t, tx_zc_list) {
+ list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
continue;
}
- tx = cfs_list_entry(peer->ksnp_zc_req_list.next,
+ tx = list_entry(peer->ksnp_zc_req_list.next,
ksock_tx_t, tx_zc_list);
deadline = tx->tx_deadline;
resid = tx->tx_resid;
wait_queue_t wait;
ksock_conn_t *conn;
ksock_sched_t *sched;
- cfs_list_t enomem_conns;
+ struct list_head enomem_conns;
int nenomem_conns;
cfs_duration_t timeout;
int i;
cfs_block_allsigs ();
- CFS_INIT_LIST_HEAD(&enomem_conns);
+ INIT_LIST_HEAD(&enomem_conns);
init_waitqueue_entry_current(&wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
- if (!cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
- conn = cfs_list_entry (ksocknal_data. \
+ if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
+ conn = list_entry(ksocknal_data. \
ksnd_deathrow_conns.next,
ksock_conn_t, ksnc_list);
- cfs_list_del (&conn->ksnc_list);
+ list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
- if (!cfs_list_empty (&ksocknal_data.ksnd_zombie_conns)) {
- conn = cfs_list_entry (ksocknal_data.ksnd_zombie_conns.\
+ if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
+ conn = list_entry(ksocknal_data.ksnd_zombie_conns.\
next, ksock_conn_t, ksnc_list);
- cfs_list_del (&conn->ksnc_list);
+ list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
- if (!cfs_list_empty (&ksocknal_data.ksnd_enomem_conns)) {
- cfs_list_add(&enomem_conns,
+ if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
+ list_add(&enomem_conns,
&ksocknal_data.ksnd_enomem_conns);
- cfs_list_del_init(&ksocknal_data.ksnd_enomem_conns);
+ list_del_init(&ksocknal_data.ksnd_enomem_conns);
}
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
- while (!cfs_list_empty (&enomem_conns)) {
- conn = cfs_list_entry (enomem_conns.next,
+ while (!list_empty(&enomem_conns)) {
+ conn = list_entry(enomem_conns.next,
ksock_conn_t, ksnc_tx_list);
- cfs_list_del (&conn->ksnc_tx_list);
+ list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
LASSERT(conn->ksnc_tx_scheduled);
conn->ksnc_tx_ready = 1;
- cfs_list_add_tail(&conn->ksnc_tx_list,
+ list_add_tail(&conn->ksnc_tx_list,
&sched->kss_tx_conns);
wake_up(&sched->kss_waitq);
add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
if (!ksocknal_data.ksnd_shuttingdown &&
- cfs_list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
- cfs_list_empty(&ksocknal_data.ksnd_zombie_conns))
+ list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+ list_empty(&ksocknal_data.ksnd_zombie_conns))
waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
set_current_state(TASK_RUNNING);
nob += scratchiov[i].iov_len;
}
- if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
+ if (!list_empty(&conn->ksnc_tx_queue) ||
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
CDEBUG(D_NET, "page %p + offset %x for %d\n",
page, offset, kiov->kiov_len);
- if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
+ if (!list_empty(&conn->ksnc_tx_queue) ||
fragsize < tx->tx_resid)
msgflg |= MSG_MORE;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
- if (!cfs_list_empty(&conn->ksnc_tx_queue) ||
+ if (!list_empty(&conn->ksnc_tx_queue) ||
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
" ready" : " blocked"),
(conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
" scheduled" : " idle"),
- (conn == NULL) ? "" : (cfs_list_empty (&conn->ksnc_tx_queue) ?
+ (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ?
" empty" : " queued"));
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
ksocknal_lib_csum_tx(tx);
nob = ks_query_iovs_length(tx->tx_iov, tx->tx_niov);
- flags = (!cfs_list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
+ flags = (!list_empty(&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
(MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT;
rc = ks_send_iovs(sock, tx->tx_iov, tx->tx_niov, flags, 0);
nkiov = tx->tx_nkiov;
nob = ks_query_kiovs_length(tx->tx_kiov, nkiov);
- flags = (!cfs_list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
+ flags = (!list_empty(&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
(MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT;
rc = ks_send_kiovs(sock, tx->tx_kiov, nkiov, flags, 0);
{
/* remove tx/conn from conn's outgoing queue */
spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
- cfs_list_del(&tx->tx_list);
- if (cfs_list_empty(&conn->ksnc_tx_queue))
- cfs_list_del(&conn->ksnc_tx_list);
+ list_del(&tx->tx_list);
+ if (list_empty(&conn->ksnc_tx_queue))
+ list_del(&conn->ksnc_tx_list);
spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
{
/* V1.x, just enqueue it */
- cfs_list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
return NULL;
}
ksock_tx_t *tx = conn->ksnc_tx_carrier;
/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
- LASSERT (!cfs_list_empty(&conn->ksnc_tx_queue));
- LASSERT (tx != NULL);
+ LASSERT(!list_empty(&conn->ksnc_tx_queue));
+ LASSERT(tx != NULL);
/* Next TX that can carry ZC-ACK or LNet message */
if (tx->tx_list.next == &conn->ksnc_tx_queue) {
/* no more packets queued */
conn->ksnc_tx_carrier = NULL;
} else {
- conn->ksnc_tx_carrier = cfs_list_entry(tx->tx_list.next,
+ conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
ksock_tx_t, tx_list);
- LASSERT (conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
+ LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type ==
+ tx->tx_msg.ksm_type);
}
}
*/
if (tx == NULL) {
if (tx_ack != NULL) {
- cfs_list_add_tail(&tx_ack->tx_list,
+ list_add_tail(&tx_ack->tx_list,
&conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_ack;
}
if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
/* tx is noop zc-ack, can't piggyback zc-ack cookie */
if (tx_ack != NULL)
- cfs_list_add_tail(&tx_ack->tx_list,
+ list_add_tail(&tx_ack->tx_list,
&conn->ksnc_tx_queue);
return 0;
}
* and replace the NOOP tx, and return the NOOP tx.
*/
if (tx == NULL) { /* nothing on queue */
- cfs_list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_msg;
return NULL;
}
if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */
- cfs_list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
return NULL;
}
ksocknal_next_tx_carrier(conn);
/* use new_tx to replace the noop zc-ack packet */
- cfs_list_add(&tx_msg->tx_list, &tx->tx_list);
- cfs_list_del(&tx->tx_list);
+ list_add(&tx_msg->tx_list, &tx->tx_list);
+ list_del(&tx->tx_list);
return tx;
}
if ((tx = conn->ksnc_tx_carrier) == NULL) {
if (tx_ack != NULL) {
- cfs_list_add_tail(&tx_ack->tx_list,
+ list_add_tail(&tx_ack->tx_list,
&conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_ack;
}
/* failed to piggyback ZC-ACK */
if (tx_ack != NULL) {
- cfs_list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+ list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
/* the next tx can piggyback at least 1 ACK */
ksocknal_next_tx_carrier(conn);
}
ksock_peer_t *peer = conn->ksnc_peer;
ksock_tx_t *tx;
ksock_tx_t *tmp;
- CFS_LIST_HEAD (zlist);
+ struct list_head zlist = LIST_HEAD_INIT(zlist);
int count;
if (cookie1 == 0)
spin_lock(&peer->ksnp_lock);
- cfs_list_for_each_entry_safe(tx, tmp,
+ list_for_each_entry_safe(tx, tmp,
&peer->ksnp_zc_req_list, tx_zc_list) {
__u64 c = tx->tx_msg.ksm_zc_cookies[0];
if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
tx->tx_msg.ksm_zc_cookies[0] = 0;
- cfs_list_del(&tx->tx_zc_list);
- cfs_list_add(&tx->tx_zc_list, &zlist);
+ list_del(&tx->tx_zc_list);
+ list_add(&tx->tx_zc_list, &zlist);
if (--count == 0)
break;
spin_unlock(&peer->ksnp_lock);
- while (!cfs_list_empty(&zlist)) {
- tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
- cfs_list_del(&tx->tx_zc_list);
+ while (!list_empty(&zlist)) {
+ tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
char *
lnet_get_networks (void)
{
- static char default_networks[256];
- char *networks = getenv ("LNET_NETWORKS");
- char *str;
- char *sep;
- int len;
- int nob;
- cfs_list_t *tmp;
-
- if (networks != NULL)
- return networks;
+ static char default_networks[256];
+ char *networks = getenv("LNET_NETWORKS");
+ char *str;
+ char *sep;
+ int len;
+ int nob;
+ struct list_head *tmp;
+
+ if (networks != NULL)
+ return networks;
/* In userland, the default 'networks=' is the list of known net types */
len = sizeof(default_networks);
*str = 0;
sep = "";
- cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
- lnd_t *lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
+ list_for_each(tmp, &the_lnet.ln_lnds) {
+ lnd_t *lnd = list_entry(tmp, lnd_t, lnd_list);
- nob = snprintf(str, len, "%s%s", sep,
- libcfs_lnd2str(lnd->lnd_type));
+ nob = snprintf(str, len, "%s%s", sep,
+ libcfs_lnd2str(lnd->lnd_type));
if (nob >= len) {
/* overflowed the string; leave it where it was */
*str = 0;
break;
}
- len -= nob;
- str += nob;
- sep = ",";
- }
+ len -= nob;
+ str += nob;
+ sep = ",";
+ }
- return default_networks;
+ return default_networks;
}
# ifndef HAVE_LIBPTHREAD
static int
lnet_create_remote_nets_table(void)
{
- int i;
- cfs_list_t *hash;
+ int i;
+ struct list_head *hash;
LASSERT(the_lnet.ln_remote_nets_hash == NULL);
LASSERT(the_lnet.ln_remote_nets_hbits > 0);
}
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
- CFS_INIT_LIST_HEAD(&hash[i]);
+ INIT_LIST_HEAD(&hash[i]);
the_lnet.ln_remote_nets_hash = hash;
return 0;
}
return;
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
- LASSERT(cfs_list_empty(&the_lnet.ln_remote_nets_hash[i]));
+ LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
LNET_REMOTE_NETS_HASH_SIZE *
lnd_t *
lnet_find_lnd_by_type (int type)
{
- lnd_t *lnd;
- cfs_list_t *tmp;
-
- /* holding lnd mutex */
- cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
- lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
+ lnd_t *lnd;
+ struct list_head *tmp;
- if ((int)lnd->lnd_type == type)
- return lnd;
- }
+ /* holding lnd mutex */
+ list_for_each(tmp, &the_lnet.ln_lnds) {
+ lnd = list_entry(tmp, lnd_t, lnd_list);
- return NULL;
+ if ((int)lnd->lnd_type == type)
+ return lnd;
+ }
+ return NULL;
}
void
lnet_register_lnd (lnd_t *lnd)
{
- LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
- LASSERT (the_lnet.ln_init);
- LASSERT (libcfs_isknown_lnd(lnd->lnd_type));
- LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
+ LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
- cfs_list_add_tail (&lnd->lnd_list, &the_lnet.ln_lnds);
- lnd->lnd_refcount = 0;
+ list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
+ lnd->lnd_refcount = 0;
- CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
+ CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
}
EXPORT_SYMBOL(lnet_register_lnd);
void
lnet_unregister_lnd (lnd_t *lnd)
{
- LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
- LASSERT (the_lnet.ln_init);
- LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
- LASSERT (lnd->lnd_refcount == 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
+ LASSERT(lnd->lnd_refcount == 0);
- cfs_list_del (&lnd->lnd_list);
- CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
+ list_del(&lnd->lnd_list);
+ CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
}
EXPORT_SYMBOL(lnet_unregister_lnd);
#ifdef LNET_USE_LIB_FREELIST
int
-lnet_freelist_init (lnet_freelist_t *fl, int n, int size)
+lnet_freelist_init(lnet_freelist_t *fl, int n, int size)
{
char *space;
if (space == NULL)
return (-ENOMEM);
- CFS_INIT_LIST_HEAD (&fl->fl_list);
- fl->fl_objs = space;
- fl->fl_nobjs = n;
- fl->fl_objsize = size;
+ INIT_LIST_HEAD(&fl->fl_list);
+ fl->fl_objs = space;
+ fl->fl_nobjs = n;
+ fl->fl_objsize = size;
- do
- {
- memset (space, 0, size);
- cfs_list_add ((cfs_list_t *)space, &fl->fl_list);
- space += size;
- } while (--n != 0);
+ do {
+ memset(space, 0, size);
+ list_add((struct list_head *)space, &fl->fl_list);
+ space += size;
+ } while (--n != 0);
- return (0);
+ return 0;
}
void
-lnet_freelist_fini (lnet_freelist_t *fl)
+lnet_freelist_fini(lnet_freelist_t *fl)
{
- cfs_list_t *el;
- int count;
+ struct list_head *el;
+ int count;
if (fl->fl_nobjs == 0)
return;
if (rec->rec_type == 0) /* not set yet, it's uninitialized */
return;
- while (!cfs_list_empty(&rec->rec_active)) {
- cfs_list_t *e = rec->rec_active.next;
+ while (!list_empty(&rec->rec_active)) {
+ struct list_head *e = rec->rec_active.next;
- cfs_list_del_init(e);
+ list_del_init(e);
if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
- lnet_eq_free(cfs_list_entry(e, lnet_eq_t, eq_list));
+ lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
} else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
- lnet_md_free(cfs_list_entry(e, lnet_libmd_t, md_list));
+ lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
} else { /* NB: Active MEs should be attached on portals */
LBUG();
LASSERT(rec->rec_type == 0);
rec->rec_type = type;
- CFS_INIT_LIST_HEAD(&rec->rec_active);
+ INIT_LIST_HEAD(&rec->rec_active);
#ifdef LNET_USE_LIB_FREELIST
memset(&rec->rec_freelist, 0, sizeof(rec->rec_freelist));
}
for (i = 0; i < LNET_LH_HASH_SIZE; i++)
- CFS_INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
+ INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
return 0;
lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
{
/* ALWAYS called with lnet_res_lock held */
- cfs_list_t *head;
+ struct list_head *head;
lnet_libhandle_t *lh;
unsigned int hash;
hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
- cfs_list_for_each_entry(lh, head, lh_hash_chain) {
+ list_for_each_entry(lh, head, lh_hash_chain) {
if (lh->lh_cookie == cookie)
return lh;
}
hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
- cfs_list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
+ list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
}
#ifndef __KERNEL__
}
#endif
- CFS_INIT_LIST_HEAD(&the_lnet.ln_test_peers);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_nis);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_routers);
+ INIT_LIST_HEAD(&the_lnet.ln_test_peers);
+ INIT_LIST_HEAD(&the_lnet.ln_nis);
+ INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
+ INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
+ INIT_LIST_HEAD(&the_lnet.ln_routers);
rc = lnet_create_remote_nets_table();
if (rc != 0)
int
lnet_unprepare (void)
{
- /* NB no LNET_LOCK since this is the last reference. All LND instances
- * have shut down already, so it is safe to unlink and free all
- * descriptors, even those that appear committed to a network op (eg MD
- * with non-zero pending count) */
+ /* NB no LNET_LOCK since this is the last reference. All LND instances
+ * have shut down already, so it is safe to unlink and free all
+ * descriptors, even those that appear committed to a network op (eg MD
+ * with non-zero pending count) */
lnet_fail_nid(LNET_NID_ANY, 0);
LASSERT(the_lnet.ln_refcount == 0);
- LASSERT(cfs_list_empty(&the_lnet.ln_test_peers));
- LASSERT(cfs_list_empty(&the_lnet.ln_nis));
- LASSERT(cfs_list_empty(&the_lnet.ln_nis_cpt));
- LASSERT(cfs_list_empty(&the_lnet.ln_nis_zombie));
+ LASSERT(list_empty(&the_lnet.ln_test_peers));
+ LASSERT(list_empty(&the_lnet.ln_nis));
+ LASSERT(list_empty(&the_lnet.ln_nis_cpt));
+ LASSERT(list_empty(&the_lnet.ln_nis_zombie));
lnet_portals_destroy();
lnet_ni_t *
lnet_net2ni_locked(__u32 net, int cpt)
{
- cfs_list_t *tmp;
- lnet_ni_t *ni;
+ struct list_head *tmp;
+ lnet_ni_t *ni;
LASSERT(cpt != LNET_LOCK_EX);
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net) {
lnet_ni_addref_locked(ni, cpt);
return 0; /* the only one */
/* take lnet_net_lock(any) would be OK */
- if (!cfs_list_empty(&the_lnet.ln_nis_cpt)) {
- cfs_list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
+ if (!list_empty(&the_lnet.ln_nis_cpt)) {
+ list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
continue;
if (LNET_CPT_NUMBER == 1)
return 0; /* the only one */
- if (cfs_list_empty(&the_lnet.ln_nis_cpt))
+ if (list_empty(&the_lnet.ln_nis_cpt))
return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
cpt = lnet_net_lock_current();
lnet_ni_t *
lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
{
- struct lnet_ni *ni;
- cfs_list_t *tmp;
+ struct lnet_ni *ni;
+ struct list_head *tmp;
LASSERT(cpt != LNET_LOCK_EX);
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
if (ni->ni_nid == nid) {
lnet_ni_addref_locked(ni, cpt);
lnet_count_acceptor_nis (void)
{
/* Return the # of NIs that need the acceptor. */
- int count = 0;
+ int count = 0;
#if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD)
- cfs_list_t *tmp;
- struct lnet_ni *ni;
- int cpt;
+ struct list_head *tmp;
+ struct lnet_ni *ni;
+ int cpt;
cpt = lnet_net_lock_current();
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
if (ni->ni_lnd->lnd_accept != NULL)
count++;
int islo;
lnet_ni_t *ni;
- /* NB called holding the global mutex */
+ /* NB called holding the global mutex */
- /* All quiet on the API front */
+ /* All quiet on the API front */
LASSERT(!the_lnet.ln_shutdown);
LASSERT(the_lnet.ln_refcount == 0);
- LASSERT(cfs_list_empty(&the_lnet.ln_nis_zombie));
+ LASSERT(list_empty(&the_lnet.ln_nis_zombie));
lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_shutdown = 1; /* flag shutdown */
/* Unlink NIs from the global table */
- while (!cfs_list_empty(&the_lnet.ln_nis)) {
- ni = cfs_list_entry(the_lnet.ln_nis.next,
- lnet_ni_t, ni_list);
+ while (!list_empty(&the_lnet.ln_nis)) {
+ ni = list_entry(the_lnet.ln_nis.next,
+ lnet_ni_t, ni_list);
/* move it to zombie list and nobody can find it anymore */
- cfs_list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
+ list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
- if (!cfs_list_empty(&ni->ni_cptlist)) {
- cfs_list_del_init(&ni->ni_cptlist);
+ if (!list_empty(&ni->ni_cptlist)) {
+ list_del_init(&ni->ni_cptlist);
lnet_ni_decref_locked(ni, 0);
}
}
lnd_t *lnd;
struct lnet_ni *ni;
struct lnet_tx_queue *tq;
- cfs_list_t nilist;
+ struct list_head nilist;
int i;
- int rc = 0;
- int lnd_type;
- int nicount = 0;
- char *nets = lnet_get_networks();
+ int rc = 0;
+ int lnd_type;
+ int nicount = 0;
+ char *nets = lnet_get_networks();
- CFS_INIT_LIST_HEAD(&nilist);
+ INIT_LIST_HEAD(&nilist);
- if (nets == NULL)
- goto failed;
+ if (nets == NULL)
+ goto failed;
rc = lnet_parse_networks(&nilist, nets);
if (rc != 0)
goto failed;
- while (!cfs_list_empty(&nilist)) {
- ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+ while (!list_empty(&nilist)) {
+ ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+ lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
- LASSERT (libcfs_isknown_lnd(lnd_type));
+ LASSERT(libcfs_isknown_lnd(lnd_type));
if (lnd_type == CIBLND ||
lnd_type == OPENIBLND ||
LASSERT (ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
- cfs_list_del(&ni->ni_list);
+ list_del(&ni->ni_list);
lnet_net_lock(LNET_LOCK_EX);
/* refcount for ln_nis */
lnet_ni_addref_locked(ni, 0);
- cfs_list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
+ list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
if (ni->ni_cpts != NULL) {
- cfs_list_add_tail(&ni->ni_cptlist,
- &the_lnet.ln_nis_cpt);
+ list_add_tail(&ni->ni_cptlist,
+ &the_lnet.ln_nis_cpt);
lnet_ni_addref_locked(ni, 0);
}
failed:
lnet_shutdown_lndnis();
- while (!cfs_list_empty(&nilist)) {
- ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
- cfs_list_del(&ni->ni_list);
+ while (!list_empty(&nilist)) {
+ ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+ list_del(&ni->ni_list);
lnet_ni_free(ni);
}
the_lnet.ln_refcount = 0;
the_lnet.ln_init = 1;
LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_lnds);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
+ INIT_LIST_HEAD(&the_lnet.ln_lnds);
+ INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
+ INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
#ifdef __KERNEL__
/* The hash table size is the number of bits it takes to express the set
LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount == 0);
- while (!cfs_list_empty(&the_lnet.ln_lnds))
- lnet_unregister_lnd(cfs_list_entry(the_lnet.ln_lnds.next,
- lnd_t, lnd_list));
+ while (!list_empty(&the_lnet.ln_lnds))
+ lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
+ lnd_t, lnd_list));
lnet_destroy_locks();
the_lnet.ln_init = 0;
int
LNetGetId(unsigned int index, lnet_process_id_t *id)
{
- struct lnet_ni *ni;
- cfs_list_t *tmp;
- int cpt;
- int rc = -ENOENT;
+ struct lnet_ni *ni;
+ struct list_head *tmp;
+ int cpt;
+ int rc = -ENOENT;
LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_net_lock_current();
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- if (index-- != 0)
- continue;
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ if (index-- != 0)
+ continue;
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
- id->nid = ni->ni_nid;
- id->pid = the_lnet.ln_pid;
- rc = 0;
- break;
- }
+ id->nid = ni->ni_nid;
+ id->pid = the_lnet.ln_pid;
+ rc = 0;
+ break;
+ }
lnet_net_unlock(cpt);
return rc;
lnet_net_lock(0);
- cfs_list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
lnet_ni_lock(ni);
ni->ni_status = NULL;
lnet_ni_unlock(ni);
lnet_net_unlock(0);
- LIBCFS_FREE(the_lnet.ln_ping_info,
- offsetof(lnet_ping_info_t,
- pi_ni[the_lnet.ln_ping_info->pi_nnis]));
- the_lnet.ln_ping_info = NULL;
- return;
+ LIBCFS_FREE(the_lnet.ln_ping_info,
+ offsetof(lnet_ping_info_t,
+ pi_ni[the_lnet.ln_ping_info->pi_nnis]));
+ the_lnet.ln_ping_info = NULL;
+ return;
}
int
#define DEBUG_SUBSYSTEM S_LNET
#include <lnet/lib-lnet.h>
-typedef struct { /* tmp struct for parsing routes */
- cfs_list_t ltb_list; /* stash on lists */
- int ltb_size; /* allocated size */
- char ltb_text[0]; /* text buffer */
-} lnet_text_buf_t;
+/* tmp struct for parsing routes */
+struct lnet_text_buf {
+ struct list_head ltb_list; /* stash on lists */
+ int ltb_size; /* allocated size */
+ char ltb_text[0]; /* text buffer */
+};
static int lnet_tbnob = 0; /* track text buf allocation */
#define LNET_MAX_TEXTBUF_NOB (64<<10) /* bound allocation */
}
int
-lnet_net_unique(__u32 net, cfs_list_t *nilist)
+lnet_net_unique(__u32 net, struct list_head *nilist)
{
- cfs_list_t *tmp;
- lnet_ni_t *ni;
+ struct list_head *tmp;
+ lnet_ni_t *ni;
- cfs_list_for_each (tmp, nilist) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, nilist) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
- if (LNET_NIDNET(ni->ni_nid) == net)
- return 0;
- }
+ if (LNET_NIDNET(ni->ni_nid) == net)
+ return 0;
+ }
- return 1;
+ return 1;
}
void
}
lnet_ni_t *
-lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, cfs_list_t *nilist)
+lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
{
struct lnet_tx_queue *tq;
struct lnet_ni *ni;
pthread_mutex_init(&ni->ni_lock, NULL);
# endif
#endif
- CFS_INIT_LIST_HEAD(&ni->ni_cptlist);
+ INIT_LIST_HEAD(&ni->ni_cptlist);
ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*ni->ni_refs[0]));
if (ni->ni_refs == NULL)
goto failed;
cfs_percpt_for_each(tq, i, ni->ni_tx_queues)
- CFS_INIT_LIST_HEAD(&tq->tq_delayed);
+ INIT_LIST_HEAD(&tq->tq_delayed);
if (el == NULL) {
ni->ni_cpts = NULL;
/* LND will fill in the address part of the NID */
ni->ni_nid = LNET_MKNID(net, 0);
ni->ni_last_alive = cfs_time_current_sec();
- cfs_list_add_tail(&ni->ni_list, nilist);
+ list_add_tail(&ni->ni_list, nilist);
return ni;
failed:
lnet_ni_free(ni);
}
int
-lnet_parse_networks(cfs_list_t *nilist, char *networks)
+lnet_parse_networks(struct list_head *nilist, char *networks)
{
struct cfs_expr_list *el = NULL;
int tokensize = strlen(networks) + 1;
}
}
- LASSERT(!cfs_list_empty(nilist));
+ LASSERT(!list_empty(nilist));
return 0;
failed_syntax:
lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp));
failed:
- while (!cfs_list_empty(nilist)) {
- ni = cfs_list_entry(nilist->next, lnet_ni_t, ni_list);
+ while (!list_empty(nilist)) {
+ ni = list_entry(nilist->next, lnet_ni_t, ni_list);
- cfs_list_del(&ni->ni_list);
+ list_del(&ni->ni_list);
lnet_ni_free(ni);
}
return -EINVAL;
}
-lnet_text_buf_t *
+struct lnet_text_buf *
lnet_new_text_buf (int str_len)
{
- lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
int nob;
/* NB allocate space for the terminating 0 */
- nob = offsetof(lnet_text_buf_t, ltb_text[str_len + 1]);
+ nob = offsetof(struct lnet_text_buf, ltb_text[str_len + 1]);
if (nob > LNET_SINGLE_TEXTBUF_NOB) {
/* _way_ conservative for "route net gateway..." */
CERROR("text buffer too big\n");
}
void
-lnet_free_text_buf (lnet_text_buf_t *ltb)
+lnet_free_text_buf(struct lnet_text_buf *ltb)
{
lnet_tbnob -= ltb->ltb_size;
LIBCFS_FREE(ltb, ltb->ltb_size);
}
void
-lnet_free_text_bufs(cfs_list_t *tbs)
+lnet_free_text_bufs(struct list_head *tbs)
{
- lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
- while (!cfs_list_empty(tbs)) {
- ltb = cfs_list_entry(tbs->next, lnet_text_buf_t, ltb_list);
+ while (!list_empty(tbs)) {
+ ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list);
- cfs_list_del(<b->ltb_list);
+ list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
}
}
void
-lnet_print_text_bufs(cfs_list_t *tbs)
+lnet_print_text_bufs(struct list_head *tbs)
{
- cfs_list_t *tmp;
- lnet_text_buf_t *ltb;
+ struct list_head *tmp;
+ struct lnet_text_buf *ltb;
- cfs_list_for_each (tmp, tbs) {
- ltb = cfs_list_entry(tmp, lnet_text_buf_t, ltb_list);
+ list_for_each(tmp, tbs) {
+ ltb = list_entry(tmp, struct lnet_text_buf, ltb_list);
CDEBUG(D_WARNING, "%s\n", ltb->ltb_text);
}
}
int
-lnet_str2tbs_sep (cfs_list_t *tbs, char *str)
+lnet_str2tbs_sep(struct list_head *tbs, char *str)
{
- cfs_list_t pending;
+ struct list_head pending;
char *sep;
int nob;
int i;
- lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
- CFS_INIT_LIST_HEAD(&pending);
+ INIT_LIST_HEAD(&pending);
/* Split 'str' into separate commands */
for (;;) {
ltb->ltb_text[nob] = 0;
- cfs_list_add_tail(<b->ltb_list, &pending);
+ list_add_tail(<b->ltb_list, &pending);
}
if (*sep == '#') {
str = sep + 1;
}
- cfs_list_splice(&pending, tbs->prev);
+ list_splice(&pending, tbs->prev);
return 0;
}
int
-lnet_expand1tb (cfs_list_t *list,
+lnet_expand1tb(struct list_head *list,
char *str, char *sep1, char *sep2,
char *item, int itemlen)
{
int len1 = (int)(sep1 - str);
int len2 = strlen(sep2 + 1);
- lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
LASSERT (*sep1 == '[');
LASSERT (*sep2 == ']');
memcpy(<b->ltb_text[len1+itemlen], sep2 + 1, len2);
ltb->ltb_text[len1 + itemlen + len2] = 0;
- cfs_list_add_tail(<b->ltb_list, list);
+ list_add_tail(<b->ltb_list, list);
return 0;
}
int
-lnet_str2tbs_expand (cfs_list_t *tbs, char *str)
+lnet_str2tbs_expand(struct list_head *tbs, char *str)
{
- char num[16];
- cfs_list_t pending;
- char *sep;
- char *sep2;
- char *parsed;
- char *enditem;
- int lo;
- int hi;
- int stride;
- int i;
- int nob;
- int scanned;
-
- CFS_INIT_LIST_HEAD(&pending);
+ char num[16];
+ struct list_head pending;
+ char *sep;
+ char *sep2;
+ char *parsed;
+ char *enditem;
+ int lo;
+ int hi;
+ int stride;
+ int i;
+ int nob;
+ int scanned;
+
+ INIT_LIST_HEAD(&pending);
sep = strchr(str, '[');
if (sep == NULL) /* nothing to expand */
}
}
- cfs_list_splice(&pending, tbs->prev);
+ list_splice(&pending, tbs->prev);
return 1;
failed:
/* static scratch buffer OK (single threaded) */
static char cmd[LNET_SINGLE_TEXTBUF_NOB];
- cfs_list_t nets;
- cfs_list_t gateways;
- cfs_list_t *tmp1;
- cfs_list_t *tmp2;
+ struct list_head nets;
+ struct list_head gateways;
+ struct list_head *tmp1;
+ struct list_head *tmp2;
__u32 net;
lnet_nid_t nid;
- lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
int rc;
char *sep;
char *token = str;
int got_hops = 0;
unsigned int priority = 0;
- CFS_INIT_LIST_HEAD(&gateways);
- CFS_INIT_LIST_HEAD(&nets);
+ INIT_LIST_HEAD(&gateways);
+ INIT_LIST_HEAD(&nets);
/* save a copy of the string for error messages */
strncpy(cmd, str, sizeof(cmd) - 1);
strcpy(ltb->ltb_text, token);
tmp1 = <b->ltb_list;
- cfs_list_add_tail(tmp1, tmp2);
+ list_add_tail(tmp1, tmp2);
while (tmp1 != tmp2) {
- ltb = cfs_list_entry(tmp1, lnet_text_buf_t, ltb_list);
+ ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list);
rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
if (rc < 0)
tmp1 = tmp1->next;
if (rc > 0) { /* expanded! */
- cfs_list_del(<b->ltb_list);
+ list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
continue;
}
if (!got_hops)
hops = 1;
- LASSERT (!cfs_list_empty(&nets));
- LASSERT (!cfs_list_empty(&gateways));
+ LASSERT(!list_empty(&nets));
+ LASSERT(!list_empty(&gateways));
- cfs_list_for_each (tmp1, &nets) {
- ltb = cfs_list_entry(tmp1, lnet_text_buf_t, ltb_list);
+ list_for_each(tmp1, &nets) {
+ ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list);
net = libcfs_str2net(ltb->ltb_text);
LASSERT (net != LNET_NIDNET(LNET_NID_ANY));
- cfs_list_for_each (tmp2, &gateways) {
- ltb = cfs_list_entry(tmp2, lnet_text_buf_t, ltb_list);
+ list_for_each(tmp2, &gateways) {
+ ltb = list_entry(tmp2, struct lnet_text_buf, ltb_list);
nid = libcfs_str2nid(ltb->ltb_text);
LASSERT(nid != LNET_NID_ANY);
*im_a_router = 1;
continue;
}
-
+
rc = lnet_add_route(net, hops, nid, priority);
if (rc != 0) {
CERROR("Can't create route "
}
int
-lnet_parse_route_tbs(cfs_list_t *tbs, int *im_a_router)
+lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
{
- lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
- while (!cfs_list_empty(tbs)) {
- ltb = cfs_list_entry(tbs->next, lnet_text_buf_t, ltb_list);
+ while (!list_empty(tbs)) {
+ ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list);
if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
lnet_free_text_bufs(tbs);
return -EINVAL;
}
- cfs_list_del(<b->ltb_list);
+ list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
}
int
lnet_parse_routes (char *routes, int *im_a_router)
{
- cfs_list_t tbs;
- int rc = 0;
+ struct list_head tbs;
+ int rc = 0;
*im_a_router = 0;
- CFS_INIT_LIST_HEAD(&tbs);
+ INIT_LIST_HEAD(&tbs);
if (lnet_str2tbs_sep(&tbs, routes) < 0) {
CERROR("Error parsing routes\n");
int
lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
{
- CFS_LIST_HEAD (list);
+ struct list_head list = LIST_HEAD_INIT(list);
int rc;
int i;
}
int
-lnet_splitnets(char *source, cfs_list_t *nets)
+lnet_splitnets(char *source, struct list_head *nets)
{
int offset = 0;
int offset2;
int len;
- lnet_text_buf_t *tb;
- lnet_text_buf_t *tb2;
- cfs_list_t *t;
+ struct lnet_text_buf *tb;
+ struct lnet_text_buf *tb2;
+ struct list_head *t;
char *sep;
char *bracket;
__u32 net;
- LASSERT (!cfs_list_empty(nets));
- LASSERT (nets->next == nets->prev); /* single entry */
+ LASSERT(!list_empty(nets));
+ LASSERT(nets->next == nets->prev); /* single entry */
- tb = cfs_list_entry(nets->next, lnet_text_buf_t, ltb_list);
+ tb = list_entry(nets->next, struct lnet_text_buf, ltb_list);
for (;;) {
sep = strchr(tb->ltb_text, ',');
return -EINVAL;
}
- cfs_list_for_each(t, nets) {
- tb2 = cfs_list_entry(t, lnet_text_buf_t, ltb_list);
+ list_for_each(t, nets) {
+ tb2 = list_entry(t, struct lnet_text_buf, ltb_list);
if (tb2 == tb)
continue;
if (tb2 == NULL)
return -ENOMEM;
- strcpy(tb2->ltb_text, sep);
- cfs_list_add_tail(&tb2->ltb_list, nets);
+ strncpy(tb2->ltb_text, sep, strlen(sep));
+ list_add_tail(&tb2->ltb_list, nets);
- tb = tb2;
- }
+ tb = tb2;
+ }
}
int
lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
{
- static char networks[LNET_SINGLE_TEXTBUF_NOB];
- static char source[LNET_SINGLE_TEXTBUF_NOB];
-
- cfs_list_t raw_entries;
- cfs_list_t matched_nets;
- cfs_list_t current_nets;
- cfs_list_t *t;
- cfs_list_t *t2;
- lnet_text_buf_t *tb;
- lnet_text_buf_t *tb2;
- __u32 net1;
- __u32 net2;
- int len;
- int count;
- int dup;
- int rc;
-
- CFS_INIT_LIST_HEAD(&raw_entries);
+ static char networks[LNET_SINGLE_TEXTBUF_NOB];
+ static char source[LNET_SINGLE_TEXTBUF_NOB];
+
+ struct list_head raw_entries;
+ struct list_head matched_nets;
+ struct list_head current_nets;
+ struct list_head *t;
+ struct list_head *t2;
+ struct lnet_text_buf *tb;
+ struct lnet_text_buf *tb2;
+ __u32 net1;
+ __u32 net2;
+ int len;
+ int count;
+ int dup;
+ int rc;
+
+ INIT_LIST_HEAD(&raw_entries);
if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
CERROR("Error parsing ip2nets\n");
LASSERT (lnet_tbnob == 0);
return -EINVAL;
}
- CFS_INIT_LIST_HEAD(&matched_nets);
- CFS_INIT_LIST_HEAD(¤t_nets);
+ INIT_LIST_HEAD(&matched_nets);
+ INIT_LIST_HEAD(¤t_nets);
networks[0] = 0;
count = 0;
len = 0;
rc = 0;
- while (!cfs_list_empty(&raw_entries)) {
- tb = cfs_list_entry(raw_entries.next, lnet_text_buf_t,
- ltb_list);
+ while (!list_empty(&raw_entries)) {
+ tb = list_entry(raw_entries.next, struct lnet_text_buf,
+ ltb_list);
strncpy(source, tb->ltb_text, sizeof(source)-1);
source[sizeof(source)-1] = 0;
if (rc < 0)
break;
- cfs_list_del(&tb->ltb_list);
+ list_del(&tb->ltb_list);
if (rc == 0) { /* no match */
lnet_free_text_buf(tb);
}
/* split into separate networks */
- CFS_INIT_LIST_HEAD(¤t_nets);
- cfs_list_add(&tb->ltb_list, ¤t_nets);
+ INIT_LIST_HEAD(¤t_nets);
+ list_add(&tb->ltb_list, ¤t_nets);
rc = lnet_splitnets(source, ¤t_nets);
if (rc < 0)
break;
dup = 0;
- cfs_list_for_each (t, ¤t_nets) {
- tb = cfs_list_entry(t, lnet_text_buf_t, ltb_list);
+ list_for_each(t, ¤t_nets) {
+ tb = list_entry(t, struct lnet_text_buf, ltb_list);
net1 = lnet_netspec2net(tb->ltb_text);
- LASSERT (net1 != LNET_NIDNET(LNET_NID_ANY));
-
- cfs_list_for_each(t2, &matched_nets) {
- tb2 = cfs_list_entry(t2, lnet_text_buf_t,
- ltb_list);
- net2 = lnet_netspec2net(tb2->ltb_text);
- LASSERT (net2 != LNET_NIDNET(LNET_NID_ANY));
-
- if (net1 == net2) {
- dup = 1;
- break;
- }
- }
+ LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY));
+
+ list_for_each(t2, &matched_nets) {
+ tb2 = list_entry(t2, struct lnet_text_buf,
+ ltb_list);
+ net2 = lnet_netspec2net(tb2->ltb_text);
+ LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY));
+
+ if (net1 == net2) {
+ dup = 1;
+ break;
+ }
+ }
- if (dup)
- break;
- }
+ if (dup)
+ break;
+ }
if (dup) {
lnet_free_text_bufs(¤t_nets);
continue;
}
- cfs_list_for_each_safe(t, t2, ¤t_nets) {
- tb = cfs_list_entry(t, lnet_text_buf_t, ltb_list);
+ list_for_each_safe(t, t2, ¤t_nets) {
+ tb = list_entry(t, struct lnet_text_buf, ltb_list);
- cfs_list_del(&tb->ltb_list);
- cfs_list_add_tail(&tb->ltb_list, &matched_nets);
+ list_del(&tb->ltb_list);
+ list_add_tail(&tb->ltb_list, &matched_nets);
len += snprintf(networks + len, sizeof(networks) - len,
"%s%s", (len == 0) ? "" : ",",
lnet_eq_wait_lock();
lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
- cfs_list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
+ list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
lnet_eq_wait_unlock();
lnet_res_unlock(LNET_LOCK_EX);
refs = eq->eq_refs;
lnet_res_lh_invalidate(&eq->eq_lh);
- cfs_list_del(&eq->eq_list);
+ list_del(&eq->eq_list);
lnet_eq_free_locked(eq);
out:
lnet_eq_wait_unlock();
(*md->md_eq->eq_refs[cpt])--;
}
- LASSERT(!cfs_list_empty(&md->md_list));
- cfs_list_del_init(&md->md_list);
+ LASSERT(!list_empty(&md->md_list));
+ list_del_init(&md->md_list);
lnet_md_free_locked(md);
}
lnet_res_lh_initialize(container, &md->md_lh);
- LASSERT(cfs_list_empty(&md->md_list));
- cfs_list_add(&md->md_list, &container->rec_active);
+ LASSERT(list_empty(&md->md_list));
+ list_add(&md->md_list, &container->rec_active);
return 0;
}
LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
lnet_unlink_t unlink, lnet_handle_md_t *handle)
{
- CFS_LIST_HEAD (matches);
- CFS_LIST_HEAD (drops);
+ struct list_head matches = LIST_HEAD_INIT(matches);
+ struct list_head drops = LIST_HEAD_INIT(drops);
struct lnet_me *me;
struct lnet_libmd *md;
int cpt;
{
struct lnet_match_table *mtable;
struct lnet_me *me;
- cfs_list_t *head;
+ struct list_head *head;
LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
me->me_pos = head - &mtable->mt_mhash[0];
if (pos == LNET_INS_AFTER || pos == LNET_INS_LOCAL)
- cfs_list_add_tail(&me->me_list, head);
+ list_add_tail(&me->me_list, head);
else
- cfs_list_add(&me->me_list, head);
+ list_add(&me->me_list, head);
lnet_me2handle(handle, me);
lnet_res_lh_initialize(the_lnet.ln_me_containers[cpt], &new_me->me_lh);
- if (pos == LNET_INS_AFTER)
- cfs_list_add(&new_me->me_list, ¤t_me->me_list);
- else
- cfs_list_add_tail(&new_me->me_list, ¤t_me->me_list);
+ if (pos == LNET_INS_AFTER)
+ list_add(&new_me->me_list, ¤t_me->me_list);
+ else
+ list_add_tail(&new_me->me_list, ¤t_me->me_list);
- lnet_me2handle(handle, new_me);
+ lnet_me2handle(handle, new_me);
lnet_res_unlock(cpt);
void
lnet_me_unlink(lnet_me_t *me)
{
- cfs_list_del(&me->me_list);
+ list_del(&me->me_list);
if (me->me_md != NULL) {
lnet_libmd_t *md = me->me_md;
CWARN("\tMD\t= %p\n", me->md);
CWARN("\tprev\t= %p\n",
- cfs_list_entry(me->me_list.prev, lnet_me_t, me_list));
+ list_entry(me->me_list.prev, lnet_me_t, me_list));
CWARN("\tnext\t= %p\n",
- cfs_list_entry(me->me_list.next, lnet_me_t, me_list));
+ list_entry(me->me_list.next, lnet_me_t, me_list));
}
#endif
"Reserved");
int
-lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
+lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
{
- lnet_test_peer_t *tp;
- cfs_list_t *el;
- cfs_list_t *next;
- cfs_list_t cull;
+ lnet_test_peer_t *tp;
+ struct list_head *el;
+ struct list_head *next;
+ struct list_head cull;
- LASSERT (the_lnet.ln_init);
+ LASSERT(the_lnet.ln_init);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold != 0) {
tp->tp_threshold = threshold;
lnet_net_lock(0);
- cfs_list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
+ list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
lnet_net_unlock(0);
return 0;
}
/* removing entries */
- CFS_INIT_LIST_HEAD(&cull);
+ INIT_LIST_HEAD(&cull);
lnet_net_lock(0);
- cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
+ list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry(el, lnet_test_peer_t, tp_list);
- if (tp->tp_threshold == 0 || /* needs culling anyway */
- nid == LNET_NID_ANY || /* removing all entries */
- tp->tp_nid == nid) /* matched this one */
- {
- cfs_list_del (&tp->tp_list);
- cfs_list_add (&tp->tp_list, &cull);
- }
- }
+ if (tp->tp_threshold == 0 || /* needs culling anyway */
+ nid == LNET_NID_ANY || /* removing all entries */
+ tp->tp_nid == nid) { /* matched this one */
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
+ }
+ }
lnet_net_unlock(0);
- while (!cfs_list_empty (&cull)) {
- tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
+ while (!list_empty(&cull)) {
+ tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
- cfs_list_del (&tp->tp_list);
- LIBCFS_FREE(tp, sizeof (*tp));
- }
- return 0;
+ list_del(&tp->tp_list);
+ LIBCFS_FREE(tp, sizeof(*tp));
+ }
+ return 0;
}
static int
fail_peer (lnet_nid_t nid, int outgoing)
{
- lnet_test_peer_t *tp;
- cfs_list_t *el;
- cfs_list_t *next;
- cfs_list_t cull;
- int fail = 0;
+ lnet_test_peer_t *tp;
+ struct list_head *el;
+ struct list_head *next;
+ struct list_head cull;
+ int fail = 0;
- CFS_INIT_LIST_HEAD (&cull);
+ INIT_LIST_HEAD(&cull);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
lnet_net_lock(0);
- cfs_list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = cfs_list_entry (el, lnet_test_peer_t, tp_list);
-
- if (tp->tp_threshold == 0) {
- /* zombie entry */
- if (outgoing) {
- /* only cull zombies on outgoing tests,
- * since we may be at interrupt priority on
- * incoming messages. */
- cfs_list_del (&tp->tp_list);
- cfs_list_add (&tp->tp_list, &cull);
- }
- continue;
- }
+ list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry(el, lnet_test_peer_t, tp_list);
+
+ if (tp->tp_threshold == 0) {
+ /* zombie entry */
+ if (outgoing) {
+ /* only cull zombies on outgoing tests,
+ * since we may be at interrupt priority on
+ * incoming messages. */
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
+ }
+ continue;
+ }
- if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
- nid == tp->tp_nid) { /* fail this peer */
- fail = 1;
-
- if (tp->tp_threshold != LNET_MD_THRESH_INF) {
- tp->tp_threshold--;
- if (outgoing &&
- tp->tp_threshold == 0) {
- /* see above */
- cfs_list_del (&tp->tp_list);
- cfs_list_add (&tp->tp_list, &cull);
- }
- }
- break;
- }
- }
+ if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
+ nid == tp->tp_nid) { /* fail this peer */
+ fail = 1;
+
+ if (tp->tp_threshold != LNET_MD_THRESH_INF) {
+ tp->tp_threshold--;
+ if (outgoing &&
+ tp->tp_threshold == 0) {
+ /* see above */
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
+ }
+ }
+ break;
+ }
+ }
lnet_net_unlock(0);
- while (!cfs_list_empty (&cull)) {
- tp = cfs_list_entry (cull.next, lnet_test_peer_t, tp_list);
- cfs_list_del (&tp->tp_list);
+ while (!list_empty(&cull)) {
+ tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
+ list_del(&tp->tp_list);
- LIBCFS_FREE(tp, sizeof (*tp));
- }
+ LIBCFS_FREE(tp, sizeof(*tp));
+ }
- return (fail);
+ return fail;
}
unsigned int
* it sets do_send FALSE and I don't do the unlock/send/lock bit.
*
* \retval 0 If \a msg sent or OK to send.
- * \retval EAGAIN If \a msg blocked for credit.
- * \retval EHOSTUNREACH If the next hop of the message appears dead.
- * \retval ECANCELED If the MD of the message has been unlinked.
+ * \retval -EAGAIN If \a msg blocked for credit.
+ * \retval -EHOSTUNREACH If the next hop of the message appears dead.
+ * \retval -ECANCELED If the MD of the message has been unlinked.
*/
static int
lnet_post_send_locked(lnet_msg_t *msg, int do_send)
lnet_finalize(ni, msg, -EHOSTUNREACH);
lnet_net_lock(cpt);
- return EHOSTUNREACH;
+ return -EHOSTUNREACH;
}
if (msg->msg_md != NULL &&
lnet_finalize(ni, msg, -ECANCELED);
lnet_net_lock(cpt);
- return ECANCELED;
+ return -ECANCELED;
}
- if (!msg->msg_peertxcredit) {
- LASSERT ((lp->lp_txcredits < 0) ==
- !cfs_list_empty(&lp->lp_txq));
+ if (!msg->msg_peertxcredit) {
+ LASSERT((lp->lp_txcredits < 0) ==
+ !list_empty(&lp->lp_txq));
- msg->msg_peertxcredit = 1;
- lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
- lp->lp_txcredits--;
+ msg->msg_peertxcredit = 1;
+ lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
+ lp->lp_txcredits--;
- if (lp->lp_txcredits < lp->lp_mintxcredits)
- lp->lp_mintxcredits = lp->lp_txcredits;
+ if (lp->lp_txcredits < lp->lp_mintxcredits)
+ lp->lp_mintxcredits = lp->lp_txcredits;
- if (lp->lp_txcredits < 0) {
+ if (lp->lp_txcredits < 0) {
msg->msg_tx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &lp->lp_txq);
- return EAGAIN;
- }
- }
+ list_add_tail(&msg->msg_list, &lp->lp_txq);
+ return -EAGAIN;
+ }
+ }
- if (!msg->msg_txcredit) {
+ if (!msg->msg_txcredit) {
LASSERT((tq->tq_credits < 0) ==
- !cfs_list_empty(&tq->tq_delayed));
+ !list_empty(&tq->tq_delayed));
msg->msg_txcredit = 1;
tq->tq_credits--;
if (tq->tq_credits < 0) {
msg->msg_tx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &tq->tq_delayed);
- return EAGAIN;
+ list_add_tail(&msg->msg_list, &tq->tq_delayed);
+ return -EAGAIN;
}
}
/* non-lnet_parse callers only receive delayed messages */
LASSERT(!do_recv || msg->msg_rx_delayed);
- if (!msg->msg_peerrtrcredit) {
- LASSERT ((lp->lp_rtrcredits < 0) ==
- !cfs_list_empty(&lp->lp_rtrq));
+ if (!msg->msg_peerrtrcredit) {
+ LASSERT((lp->lp_rtrcredits < 0) ==
+ !list_empty(&lp->lp_rtrq));
msg->msg_peerrtrcredit = 1;
lp->lp_rtrcredits--;
if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
lp->lp_minrtrcredits = lp->lp_rtrcredits;
- if (lp->lp_rtrcredits < 0) {
- /* must have checked eager_recv before here */
+ if (lp->lp_rtrcredits < 0) {
+ /* must have checked eager_recv before here */
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &lp->lp_rtrq);
- return EAGAIN;
- }
- }
+ list_add_tail(&msg->msg_list, &lp->lp_rtrq);
+ return -EAGAIN;
+ }
+ }
- rbp = lnet_msg2bufpool(msg);
+ rbp = lnet_msg2bufpool(msg);
- if (!msg->msg_rtrcredit) {
- LASSERT ((rbp->rbp_credits < 0) ==
- !cfs_list_empty(&rbp->rbp_msgs));
+ if (!msg->msg_rtrcredit) {
+ LASSERT((rbp->rbp_credits < 0) ==
+ !list_empty(&rbp->rbp_msgs));
msg->msg_rtrcredit = 1;
rbp->rbp_credits--;
if (rbp->rbp_credits < rbp->rbp_mincredits)
rbp->rbp_mincredits = rbp->rbp_credits;
- if (rbp->rbp_credits < 0) {
- /* must have checked eager_recv before here */
+ if (rbp->rbp_credits < 0) {
+ /* must have checked eager_recv before here */
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
- return EAGAIN;
- }
- }
+ list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
+ return -EAGAIN;
+ }
+ }
- LASSERT (!cfs_list_empty(&rbp->rbp_bufs));
- rb = cfs_list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
- cfs_list_del(&rb->rb_list);
+ LASSERT(!list_empty(&rbp->rbp_bufs));
+ rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
+ list_del(&rb->rb_list);
msg->msg_niov = rbp->rbp_npages;
msg->msg_kiov = &rb->rb_kiov[0];
msg->msg_txcredit = 0;
LASSERT((tq->tq_credits < 0) ==
- !cfs_list_empty(&tq->tq_delayed));
+ !list_empty(&tq->tq_delayed));
tq->tq_credits++;
if (tq->tq_credits <= 0) {
- msg2 = cfs_list_entry(tq->tq_delayed.next,
- lnet_msg_t, msg_list);
- cfs_list_del(&msg2->msg_list);
+ msg2 = list_entry(tq->tq_delayed.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer->lp_ni == ni);
LASSERT(msg2->msg_tx_delayed);
msg->msg_peertxcredit = 0;
LASSERT((txpeer->lp_txcredits < 0) ==
- !cfs_list_empty(&txpeer->lp_txq));
+ !list_empty(&txpeer->lp_txq));
txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
LASSERT (txpeer->lp_txqnob >= 0);
txpeer->lp_txcredits++;
if (txpeer->lp_txcredits <= 0) {
- msg2 = cfs_list_entry(txpeer->lp_txq.next,
+ msg2 = list_entry(txpeer->lp_txq.next,
lnet_msg_t, msg_list);
- cfs_list_del(&msg2->msg_list);
+ list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer == txpeer);
LASSERT(msg2->msg_tx_delayed);
* itself */
LASSERT (msg->msg_kiov != NULL);
- rb = cfs_list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
+ rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
rbp = rb->rb_pool;
LASSERT (rbp == lnet_msg2bufpool(msg));
msg->msg_rtrcredit = 0;
LASSERT((rbp->rbp_credits < 0) ==
- !cfs_list_empty(&rbp->rbp_msgs));
+ !list_empty(&rbp->rbp_msgs));
LASSERT((rbp->rbp_credits > 0) ==
- !cfs_list_empty(&rbp->rbp_bufs));
+ !list_empty(&rbp->rbp_bufs));
- cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
+ list_add(&rb->rb_list, &rbp->rbp_bufs);
rbp->rbp_credits++;
if (rbp->rbp_credits <= 0) {
- msg2 = cfs_list_entry(rbp->rbp_msgs.next,
- lnet_msg_t, msg_list);
- cfs_list_del(&msg2->msg_list);
+ msg2 = list_entry(rbp->rbp_msgs.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg2->msg_list);
- (void) lnet_post_routed_recv_locked(msg2, 1);
- }
- }
+ (void) lnet_post_routed_recv_locked(msg2, 1);
+ }
+ }
- if (msg->msg_peerrtrcredit) {
- /* give back peer router credits */
- msg->msg_peerrtrcredit = 0;
+ if (msg->msg_peerrtrcredit) {
+ /* give back peer router credits */
+ msg->msg_peerrtrcredit = 0;
- LASSERT((rxpeer->lp_rtrcredits < 0) ==
- !cfs_list_empty(&rxpeer->lp_rtrq));
+ LASSERT((rxpeer->lp_rtrcredits < 0) ==
+ !list_empty(&rxpeer->lp_rtrq));
- rxpeer->lp_rtrcredits++;
- if (rxpeer->lp_rtrcredits <= 0) {
- msg2 = cfs_list_entry(rxpeer->lp_rtrq.next,
- lnet_msg_t, msg_list);
- cfs_list_del(&msg2->msg_list);
+ rxpeer->lp_rtrcredits++;
+ if (rxpeer->lp_rtrcredits <= 0) {
+ msg2 = list_entry(rxpeer->lp_rtrq.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg2->msg_list);
- (void) lnet_post_routed_recv_locked(msg2, 1);
- }
- }
+ (void) lnet_post_routed_recv_locked(msg2, 1);
+ }
+ }
#else
LASSERT (!msg->msg_rtrcredit);
LASSERT (!msg->msg_peerrtrcredit);
lp_best = NULL;
best_route = last_route = NULL;
- cfs_list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
+ list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
lp = route->lr_gateway;
if (!lnet_is_route_alive(route))
rc = lnet_post_send_locked(msg, 0);
lnet_net_unlock(cpt);
- if (rc == EHOSTUNREACH || rc == ECANCELED)
- return -rc;
+ if (rc == -EHOSTUNREACH || rc == -ECANCELED)
+ return rc;
if (rc == 0)
lnet_ni_send(src_ni, msg);
- return 0; /* rc == 0 or EAGAIN */
+ return 0; /* rc == 0 or -EAGAIN */
}
static void
/* Message looks OK; we're not going to return an error, so we MUST
* call back lnd_recv() come what may... */
- if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (src_nid, 0)) /* shall we now? */
- {
- CERROR("%s, src %s: Dropping %s to simulate failure\n",
- libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
- lnet_msgtyp2str(type));
- goto drop;
- }
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(src_nid, 0)) { /* shall we now? */
+ CERROR("%s, src %s: Dropping %s to simulate failure\n",
+ libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type));
+ goto drop;
+ }
msg = lnet_msg_alloc();
if (msg == NULL) {
EXPORT_SYMBOL(lnet_parse);
void
-lnet_drop_delayed_msg_list(cfs_list_t *head, char *reason)
+lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
{
- while (!cfs_list_empty(head)) {
+ while (!list_empty(head)) {
lnet_process_id_t id = {0};
lnet_msg_t *msg;
- msg = cfs_list_entry(head->next, lnet_msg_t, msg_list);
- cfs_list_del(&msg->msg_list);
+ msg = list_entry(head->next, lnet_msg_t, msg_list);
+ list_del(&msg->msg_list);
id.nid = msg->msg_hdr.src_nid;
id.pid = msg->msg_hdr.src_pid;
}
void
-lnet_recv_delayed_msg_list(cfs_list_t *head)
+lnet_recv_delayed_msg_list(struct list_head *head)
{
- while (!cfs_list_empty(head)) {
+ while (!list_empty(head)) {
lnet_msg_t *msg;
lnet_process_id_t id;
- msg = cfs_list_entry(head->next, lnet_msg_t, msg_list);
- cfs_list_del(&msg->msg_list);
+ msg = list_entry(head->next, lnet_msg_t, msg_list);
+ list_del(&msg->msg_list);
/* md won't disappear under me, since each msg
* holds a ref on it */
int cpt;
int rc;
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
- if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (target.nid, 1)) /* shall we now? */
- {
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(target.nid, 1)) { /* shall we now? */
CERROR("Dropping PUT to %s: simulated failure\n",
libcfs_id2str(target));
return -EIO;
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
- if (!cfs_list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (target.nid, 1)) /* shall we now? */
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(target.nid, 1)) /* shall we now? */
{
CERROR("Dropping GET to %s: simulated failure\n",
libcfs_id2str(target));
int
LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
{
- cfs_list_t *e;
+ struct list_head *e;
struct lnet_ni *ni;
lnet_remotenet_t *rnet;
__u32 dstnet = LNET_NIDNET(dstnid);
int hops;
int cpt;
__u32 order = 2;
- cfs_list_t *rn_list;
+ struct list_head *rn_list;
/* if !local_nid_dist_zero, I don't return a distance of 0 ever
* (when lustre sees a distance of 0, it substitutes 0@lo), so I
cpt = lnet_net_lock_current();
- cfs_list_for_each (e, &the_lnet.ln_nis) {
- ni = cfs_list_entry(e, lnet_ni_t, ni_list);
+ list_for_each(e, &the_lnet.ln_nis) {
+ ni = list_entry(e, lnet_ni_t, ni_list);
if (ni->ni_nid == dstnid) {
if (srcnidp != NULL)
}
rn_list = lnet_net2rnethash(dstnet);
- cfs_list_for_each(e, rn_list) {
- rnet = cfs_list_entry(e, lnet_remotenet_t, lrn_list);
+ list_for_each(e, rn_list) {
+ rnet = list_entry(e, lnet_remotenet_t, lrn_list);
- if (rnet->lrn_net == dstnet) {
- lnet_route_t *route;
- lnet_route_t *shortest = NULL;
+ if (rnet->lrn_net == dstnet) {
+ lnet_route_t *route;
+ lnet_route_t *shortest = NULL;
- LASSERT (!cfs_list_empty(&rnet->lrn_routes));
+ LASSERT(!list_empty(&rnet->lrn_routes));
- cfs_list_for_each_entry(route, &rnet->lrn_routes,
- lr_list) {
- if (shortest == NULL ||
- route->lr_hops < shortest->lr_hops)
- shortest = route;
- }
+ list_for_each_entry(route, &rnet->lrn_routes,
+ lr_list) {
+ if (shortest == NULL ||
+ route->lr_hops < shortest->lr_hops)
+ shortest = route;
+ }
LASSERT (shortest != NULL);
hops = shortest->lr_hops;
#else
lnet_ni_t *ni;
lnet_remotenet_t *rnet;
- cfs_list_t *tmp;
+ struct list_head *tmp;
lnet_route_t *route;
lnet_nid_t *nids;
int nnids;
cpt = lnet_net_lock_current();
rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
if (rnet != NULL) {
- cfs_list_for_each(tmp, &rnet->lrn_routes) {
+ list_for_each(tmp, &rnet->lrn_routes) {
if (nnids == maxnids) {
lnet_net_unlock(cpt);
LIBCFS_FREE(nids, maxnids * sizeof(*nids));
goto again;
}
- route = cfs_list_entry(tmp, lnet_route_t, lr_list);
- nids[nnids++] = route->lr_gateway->lp_nid;
- }
- }
+ route = list_entry(tmp, lnet_route_t, lr_list);
+ nids[nnids++] = route->lr_gateway->lp_nid;
+ }
+ }
lnet_net_unlock(cpt);
/* set async on all the routers */
LASSERT(!msg->msg_onactivelist);
msg->msg_onactivelist = 1;
- cfs_list_add(&msg->msg_activelist, &container->msc_active);
+ list_add(&msg->msg_activelist, &container->msc_active);
counters->msgs_alloc++;
if (counters->msgs_alloc > counters->msgs_max)
lnet_msg_decommit_rx(msg, status);
}
- cfs_list_del(&msg->msg_activelist);
+ list_del(&msg->msg_activelist);
msg->msg_onactivelist = 0;
the_lnet.ln_counters[cpt2]->msgs_alloc--;
lnet_net_lock(cpt);
container = the_lnet.ln_msg_containers[cpt];
- cfs_list_add_tail(&msg->msg_list, &container->msc_finalizing);
+ list_add_tail(&msg->msg_list, &container->msc_finalizing);
/* Recursion breaker. Don't complete the message here if I am (or
* enough other threads are) already completing messages */
container->msc_finalizers[0] = (struct lnet_msg_container *)1;
#endif
- while (!cfs_list_empty(&container->msc_finalizing)) {
- msg = cfs_list_entry(container->msc_finalizing.next,
- lnet_msg_t, msg_list);
+ while (!list_empty(&container->msc_finalizing)) {
+ msg = list_entry(container->msc_finalizing.next,
+ lnet_msg_t, msg_list);
- cfs_list_del(&msg->msg_list);
+ list_del(&msg->msg_list);
/* NB drops and regains the lnet lock if it actually does
* anything, so my finalizing friends can chomp along too */
if (container->msc_init == 0)
return;
- while (!cfs_list_empty(&container->msc_active)) {
- lnet_msg_t *msg = cfs_list_entry(container->msc_active.next,
- lnet_msg_t, msg_activelist);
+ while (!list_empty(&container->msc_active)) {
+ lnet_msg_t *msg = list_entry(container->msc_active.next,
+ lnet_msg_t, msg_activelist);
LASSERT(msg->msg_onactivelist);
msg->msg_onactivelist = 0;
- cfs_list_del(&msg->msg_activelist);
+ list_del(&msg->msg_activelist);
lnet_msg_free(msg);
count++;
}
container->msc_init = 1;
- CFS_INIT_LIST_HEAD(&container->msc_active);
- CFS_INIT_LIST_HEAD(&container->msc_finalizing);
+ INIT_LIST_HEAD(&container->msc_active);
+ INIT_LIST_HEAD(&container->msc_finalizing);
#ifdef LNET_USE_LIB_FREELIST
memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t));
*bmap |= 1ULL << pos;
}
-cfs_list_t *
+struct list_head *
lnet_mt_match_head(struct lnet_match_table *mtable,
lnet_process_id_t id, __u64 mbits)
{
lnet_mt_match_md(struct lnet_match_table *mtable,
struct lnet_match_info *info, struct lnet_msg *msg)
{
- cfs_list_t *head;
+ struct list_head *head;
lnet_me_t *me;
lnet_me_t *tmp;
int exhausted = 0;
int rc;
/* any ME with ignore bits? */
- if (!cfs_list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
+ if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
else
head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
exhausted = LNET_MATCHMD_EXHAUSTED;
- cfs_list_for_each_entry_safe(me, tmp, head, me_list) {
+ list_for_each_entry_safe(me, tmp, head, me_list) {
/* ME attached but MD not attached yet */
if (me->me_md == NULL)
continue;
if (lnet_ptl_is_lazy(ptl)) {
if (msg->msg_rx_ready_delay) {
msg->msg_rx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_delayed);
+ list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_delayed);
}
rc = LNET_MATCHMD_NONE;
} else {
lnet_ptl_lock(ptl);
if (i == 0) { /* the first try, attach on stealing list */
- cfs_list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_stealing);
+ list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_stealing);
}
- if (!cfs_list_empty(&msg->msg_list)) { /* on stealing list */
+ if (!list_empty(&msg->msg_list)) { /* on stealing list */
rc = lnet_mt_match_md(mtable, info, msg);
if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
lnet_ptl_disable_mt(ptl, cpt);
if ((rc & LNET_MATCHMD_FINISH) != 0)
- cfs_list_del_init(&msg->msg_list);
+ list_del_init(&msg->msg_list);
} else {
/* could be matched by lnet_ptl_attach_md()
LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
}
- if (!cfs_list_empty(&msg->msg_list) && /* not matched yet */
+ if (!list_empty(&msg->msg_list) && /* not matched yet */
(i == LNET_CPT_NUMBER - 1 || /* the last CPT */
ptl->ptl_mt_nmaps == 0 || /* no active CPT */
(ptl->ptl_mt_nmaps == 1 && /* the only active CPT */
ptl->ptl_mt_maps[0] == cpt))) {
/* nothing to steal, delay or drop */
- cfs_list_del_init(&msg->msg_list);
+ list_del_init(&msg->msg_list);
if (lnet_ptl_is_lazy(ptl)) {
msg->msg_rx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_delayed);
+ list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_delayed);
rc = LNET_MATCHMD_NONE;
} else {
rc = LNET_MATCHMD_DROP;
lnet_ptl_lock(ptl);
msg->msg_rx_delayed = 1;
- cfs_list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed);
+ list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed);
lnet_ptl_unlock(ptl);
lnet_res_unlock(mtable->mt_cpt);
/* called with lnet_res_lock held */
void
lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
- cfs_list_t *matches, cfs_list_t *drops)
+ struct list_head *matches, struct list_head *drops)
{
struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
struct lnet_match_table *mtable;
- cfs_list_t *head;
+ struct list_head *head;
lnet_msg_t *tmp;
lnet_msg_t *msg;
int exhausted = 0;
cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
mtable = ptl->ptl_mtables[cpt];
- if (cfs_list_empty(&ptl->ptl_msg_stealing) &&
- cfs_list_empty(&ptl->ptl_msg_delayed) &&
+ if (list_empty(&ptl->ptl_msg_stealing) &&
+ list_empty(&ptl->ptl_msg_delayed) &&
!lnet_mt_test_exhausted(mtable, me->me_pos))
return;
lnet_ptl_lock(ptl);
head = &ptl->ptl_msg_stealing;
again:
- cfs_list_for_each_entry_safe(msg, tmp, head, msg_list) {
+ list_for_each_entry_safe(msg, tmp, head, msg_list) {
struct lnet_match_info info;
lnet_hdr_t *hdr;
int rc;
/* Hurrah! This _is_ a match */
LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
- cfs_list_del_init(&msg->msg_list);
+ list_del_init(&msg->msg_list);
if (head == &ptl->ptl_msg_stealing) {
if (exhausted)
}
if ((rc & LNET_MATCHMD_OK) != 0) {
- cfs_list_add_tail(&msg->msg_list, matches);
+ list_add_tail(&msg->msg_list, matches);
CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
"match "LPU64" offset %d length %d.\n",
info.mi_portal, info.mi_mbits,
info.mi_roffset, info.mi_rlength);
} else {
- cfs_list_add_tail(&msg->msg_list, drops);
+ list_add_tail(&msg->msg_list, drops);
}
if (exhausted)
if (ptl->ptl_mtables == NULL) /* uninitialized portal */
return;
- LASSERT(cfs_list_empty(&ptl->ptl_msg_delayed));
- LASSERT(cfs_list_empty(&ptl->ptl_msg_stealing));
+ LASSERT(list_empty(&ptl->ptl_msg_delayed));
+ LASSERT(list_empty(&ptl->ptl_msg_stealing));
#ifndef __KERNEL__
# ifdef HAVE_LIBPTHREAD
pthread_mutex_destroy(&ptl->ptl_lock);
# endif
#endif
cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
- cfs_list_t *mhash;
- lnet_me_t *me;
- int j;
+ struct list_head *mhash;
+ lnet_me_t *me;
+ int j;
if (mtable->mt_mhash == NULL) /* uninitialized match-table */
continue;
mhash = mtable->mt_mhash;
/* cleanup ME */
for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
- while (!cfs_list_empty(&mhash[j])) {
- me = cfs_list_entry(mhash[j].next,
- lnet_me_t, me_list);
+ while (!list_empty(&mhash[j])) {
+ me = list_entry(mhash[j].next,
+ lnet_me_t, me_list);
CERROR("Active ME %p on exit\n", me);
- cfs_list_del(&me->me_list);
+ list_del(&me->me_list);
lnet_me_free(me);
}
}
lnet_ptl_setup(struct lnet_portal *ptl, int index)
{
struct lnet_match_table *mtable;
- cfs_list_t *mhash;
+ struct list_head *mhash;
int i;
int j;
}
ptl->ptl_index = index;
- CFS_INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
- CFS_INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
+ INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
+ INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
#ifdef __KERNEL__
spin_lock_init(&ptl->ptl_lock);
#else
LNET_MT_EXHAUSTED_BMAP);
mtable->mt_mhash = mhash;
for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++)
- CFS_INIT_LIST_HEAD(&mhash[j]);
+ INIT_LIST_HEAD(&mhash[j]);
mtable->mt_portal = index;
mtable->mt_cpt = i;
LNetClearLazyPortal(int portal)
{
struct lnet_portal *ptl;
- CFS_LIST_HEAD (zombies);
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
if (portal < 0 || portal >= the_lnet.ln_nportals)
return -EINVAL;
CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
/* grab all the blocked messages atomically */
- cfs_list_splice_init(&ptl->ptl_msg_delayed, &zombies);
+ list_splice_init(&ptl->ptl_msg_delayed, &zombies);
lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
lnet_peer_tables_create(void)
{
struct lnet_peer_table *ptable;
- cfs_list_t *hash;
+ struct list_head *hash;
int i;
int j;
}
cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- CFS_INIT_LIST_HEAD(&ptable->pt_deathrow);
+ INIT_LIST_HEAD(&ptable->pt_deathrow);
LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
LNET_PEER_HASH_SIZE * sizeof(*hash));
}
for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
- CFS_INIT_LIST_HEAD(&hash[j]);
+ INIT_LIST_HEAD(&hash[j]);
ptable->pt_hash = hash; /* sign of initialization */
}
lnet_peer_tables_destroy(void)
{
struct lnet_peer_table *ptable;
- cfs_list_t *hash;
+ struct list_head *hash;
int i;
int j;
if (hash == NULL) /* not intialized */
break;
- LASSERT(cfs_list_empty(&ptable->pt_deathrow));
+ LASSERT(list_empty(&ptable->pt_deathrow));
ptable->pt_hash = NULL;
for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
- LASSERT(cfs_list_empty(&hash[j]));
+ LASSERT(list_empty(&hash[j]));
LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
}
lnet_net_lock(i);
for (j = 0; j < LNET_PEER_HASH_SIZE; j++) {
- cfs_list_t *peers = &ptable->pt_hash[j];
+ struct list_head *peers = &ptable->pt_hash[j];
- while (!cfs_list_empty(peers)) {
- lnet_peer_t *lp = cfs_list_entry(peers->next,
+ while (!list_empty(peers)) {
+ lnet_peer_t *lp = list_entry(peers->next,
lnet_peer_t,
lp_hashlist);
- cfs_list_del_init(&lp->lp_hashlist);
+ list_del_init(&lp->lp_hashlist);
/* lose hash table's ref */
lnet_peer_decref_locked(lp);
}
}
cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- CFS_LIST_HEAD (deathrow);
+ struct list_head deathrow = LIST_HEAD_INIT(deathrow);
lnet_peer_t *lp;
lnet_net_lock(i);
cfs_pause(cfs_time_seconds(1) / 2);
lnet_net_lock(i);
}
- cfs_list_splice_init(&ptable->pt_deathrow, &deathrow);
+ list_splice_init(&ptable->pt_deathrow, &deathrow);
lnet_net_unlock(i);
- while (!cfs_list_empty(&deathrow)) {
- lp = cfs_list_entry(deathrow.next,
- lnet_peer_t, lp_hashlist);
- cfs_list_del(&lp->lp_hashlist);
+ while (!list_empty(&deathrow)) {
+ lp = list_entry(deathrow.next,
+ lnet_peer_t, lp_hashlist);
+ list_del(&lp->lp_hashlist);
LIBCFS_FREE(lp, sizeof(*lp));
}
}
LASSERT(lp->lp_refcount == 0);
LASSERT(lp->lp_rtr_refcount == 0);
- LASSERT(cfs_list_empty(&lp->lp_txq));
- LASSERT(cfs_list_empty(&lp->lp_hashlist));
+ LASSERT(list_empty(&lp->lp_txq));
+ LASSERT(list_empty(&lp->lp_hashlist));
LASSERT(lp->lp_txqnob == 0);
ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
lnet_ni_decref_locked(lp->lp_ni, lp->lp_cpt);
lp->lp_ni = NULL;
- cfs_list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+ list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
}
lnet_peer_t *
lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
{
- cfs_list_t *peers;
- lnet_peer_t *lp;
+ struct list_head *peers;
+ lnet_peer_t *lp;
LASSERT(!the_lnet.ln_shutdown);
peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
- cfs_list_for_each_entry(lp, peers, lp_hashlist) {
+ list_for_each_entry(lp, peers, lp_hashlist) {
if (lp->lp_nid == nid) {
lnet_peer_addref_locked(lp);
return lp;
return 0;
}
- if (!cfs_list_empty(&ptable->pt_deathrow)) {
- lp = cfs_list_entry(ptable->pt_deathrow.next,
- lnet_peer_t, lp_hashlist);
- cfs_list_del(&lp->lp_hashlist);
+ if (!list_empty(&ptable->pt_deathrow)) {
+ lp = list_entry(ptable->pt_deathrow.next,
+ lnet_peer_t, lp_hashlist);
+ list_del(&lp->lp_hashlist);
}
/*
goto out;
}
- CFS_INIT_LIST_HEAD(&lp->lp_txq);
- CFS_INIT_LIST_HEAD(&lp->lp_rtrq);
- CFS_INIT_LIST_HEAD(&lp->lp_routes);
+ INIT_LIST_HEAD(&lp->lp_txq);
+ INIT_LIST_HEAD(&lp->lp_rtrq);
+ INIT_LIST_HEAD(&lp->lp_routes);
lp->lp_notify = 0;
lp->lp_notifylnd = 0;
lp->lp_rtrcredits =
lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
- cfs_list_add_tail(&lp->lp_hashlist,
- &ptable->pt_hash[lnet_nid2peerhash(nid)]);
+ list_add_tail(&lp->lp_hashlist,
+ &ptable->pt_hash[lnet_nid2peerhash(nid)]);
ptable->pt_version++;
*lpp = lp;
return 0;
out:
if (lp != NULL)
- cfs_list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+ list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
ptable->pt_number--;
return rc;
}
LASSERT(lp->lp_rtr_refcount >= 0);
/* lnet_net_lock must be exclusively locked */
- lp->lp_rtr_refcount++;
- if (lp->lp_rtr_refcount == 1) {
- cfs_list_t *pos;
+ lp->lp_rtr_refcount++;
+ if (lp->lp_rtr_refcount == 1) {
+ struct list_head *pos;
- /* a simple insertion sort */
- cfs_list_for_each_prev(pos, &the_lnet.ln_routers) {
- lnet_peer_t *rtr = cfs_list_entry(pos, lnet_peer_t,
- lp_rtr_list);
+ /* a simple insertion sort */
+ list_for_each_prev(pos, &the_lnet.ln_routers) {
+ lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
+ lp_rtr_list);
- if (rtr->lp_nid < lp->lp_nid)
- break;
- }
+ if (rtr->lp_nid < lp->lp_nid)
+ break;
+ }
- cfs_list_add(&lp->lp_rtr_list, pos);
- /* addref for the_lnet.ln_routers */
- lnet_peer_addref_locked(lp);
- the_lnet.ln_routers_version++;
- }
+ list_add(&lp->lp_rtr_list, pos);
+ /* addref for the_lnet.ln_routers */
+ lnet_peer_addref_locked(lp);
+ the_lnet.ln_routers_version++;
+ }
}
static void
/* lnet_net_lock must be exclusively locked */
lp->lp_rtr_refcount--;
if (lp->lp_rtr_refcount == 0) {
- LASSERT(cfs_list_empty(&lp->lp_routes));
+ LASSERT(list_empty(&lp->lp_routes));
if (lp->lp_rcd != NULL) {
- cfs_list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_rcd_deathrow);
- lp->lp_rcd = NULL;
- }
+ list_add(&lp->lp_rcd->rcd_list,
+ &the_lnet.ln_rcd_deathrow);
+ lp->lp_rcd = NULL;
+ }
- cfs_list_del(&lp->lp_rtr_list);
- /* decref for the_lnet.ln_routers */
- lnet_peer_decref_locked(lp);
- the_lnet.ln_routers_version++;
- }
+ list_del(&lp->lp_rtr_list);
+ /* decref for the_lnet.ln_routers */
+ lnet_peer_decref_locked(lp);
+ the_lnet.ln_routers_version++;
+ }
}
lnet_remotenet_t *
lnet_find_net_locked (__u32 net)
{
- lnet_remotenet_t *rnet;
- cfs_list_t *tmp;
- cfs_list_t *rn_list;
+ lnet_remotenet_t *rnet;
+ struct list_head *tmp;
+ struct list_head *rn_list;
LASSERT(!the_lnet.ln_shutdown);
rn_list = lnet_net2rnethash(net);
- cfs_list_for_each(tmp, rn_list) {
- rnet = cfs_list_entry(tmp, lnet_remotenet_t, lrn_list);
+ list_for_each(tmp, rn_list) {
+ rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
if (rnet->lrn_net == net)
return rnet;
int lnd_type, seed[2];
struct timeval tv;
lnet_ni_t *ni;
- cfs_list_t *tmp;
+ struct list_head *tmp;
if (seeded)
return;
cfs_get_random_bytes(seed, sizeof(seed));
- /* Nodes with small feet have little entropy
- * the NID for this node gives the most entropy in the low bits */
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+ /* Nodes with small feet have little entropy
+ * the NID for this node gives the most entropy in the low bits */
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
+ lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
- if (lnd_type != LOLND)
- seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
- }
+ if (lnd_type != LOLND)
+ seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
+ }
do_gettimeofday(&tv);
cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
void
lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
{
- unsigned int len = 0;
- unsigned int offset = 0;
- cfs_list_t *e;
+ unsigned int len = 0;
+ unsigned int offset = 0;
+ struct list_head *e;
- lnet_shuffle_seed();
+ lnet_shuffle_seed();
- cfs_list_for_each (e, &rnet->lrn_routes) {
- len++;
- }
+ list_for_each(e, &rnet->lrn_routes) {
+ len++;
+ }
- /* len+1 positions to add a new entry, also prevents division by 0 */
- offset = cfs_rand() % (len + 1);
- cfs_list_for_each (e, &rnet->lrn_routes) {
- if (offset == 0)
- break;
- offset--;
- }
- cfs_list_add(&route->lr_list, e);
- cfs_list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
+ /* len+1 positions to add a new entry, also prevents division by 0 */
+ offset = cfs_rand() % (len + 1);
+ list_for_each(e, &rnet->lrn_routes) {
+ if (offset == 0)
+ break;
+ offset--;
+ }
+ list_add(&route->lr_list, e);
+ list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
the_lnet.ln_remote_nets_version++;
lnet_rtr_addref_locked(route->lr_gateway);
lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
unsigned int priority)
{
- cfs_list_t *e;
- lnet_remotenet_t *rnet;
- lnet_remotenet_t *rnet2;
- lnet_route_t *route;
- lnet_ni_t *ni;
- int add_route;
- int rc;
+ struct list_head *e;
+ lnet_remotenet_t *rnet;
+ lnet_remotenet_t *rnet2;
+ lnet_route_t *route;
+ lnet_ni_t *ni;
+ int add_route;
+ int rc;
CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
return -ENOMEM;
}
- CFS_INIT_LIST_HEAD(&rnet->lrn_routes);
+ INIT_LIST_HEAD(&rnet->lrn_routes);
rnet->lrn_net = net;
route->lr_hops = hops;
route->lr_net = net;
LASSERT (!the_lnet.ln_shutdown);
- rnet2 = lnet_find_net_locked(net);
- if (rnet2 == NULL) {
- /* new network */
- cfs_list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
- rnet2 = rnet;
- }
+ rnet2 = lnet_find_net_locked(net);
+ if (rnet2 == NULL) {
+ /* new network */
+ list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
+ rnet2 = rnet;
+ }
- /* Search for a duplicate route (it's a NOOP if it is) */
- add_route = 1;
- cfs_list_for_each (e, &rnet2->lrn_routes) {
- lnet_route_t *route2 = cfs_list_entry(e, lnet_route_t, lr_list);
+ /* Search for a duplicate route (it's a NOOP if it is) */
+ add_route = 1;
+ list_for_each(e, &rnet2->lrn_routes) {
+ lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
if (route2->lr_gateway == route->lr_gateway) {
add_route = 0;
int
lnet_check_routes(void)
{
- lnet_remotenet_t *rnet;
- lnet_route_t *route;
- lnet_route_t *route2;
- cfs_list_t *e1;
- cfs_list_t *e2;
- int cpt;
- cfs_list_t *rn_list;
- int i;
+ lnet_remotenet_t *rnet;
+ lnet_route_t *route;
+ lnet_route_t *route2;
+ struct list_head *e1;
+ struct list_head *e2;
+ int cpt;
+ struct list_head *rn_list;
+ int i;
cpt = lnet_net_lock_current();
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
rn_list = &the_lnet.ln_remote_nets_hash[i];
- cfs_list_for_each(e1, rn_list) {
- rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
+ list_for_each(e1, rn_list) {
+ rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
route2 = NULL;
- cfs_list_for_each(e2, &rnet->lrn_routes) {
+ list_for_each(e2, &rnet->lrn_routes) {
lnet_nid_t nid1;
lnet_nid_t nid2;
int net;
- route = cfs_list_entry(e2, lnet_route_t,
- lr_list);
+ route = list_entry(e2, lnet_route_t,
+ lr_list);
if (route2 == NULL) {
route2 = route;
struct lnet_peer *gateway;
lnet_remotenet_t *rnet;
lnet_route_t *route;
- cfs_list_t *e1;
- cfs_list_t *e2;
+ struct list_head *e1;
+ struct list_head *e2;
int rc = -ENOENT;
- cfs_list_t *rn_list;
+ struct list_head *rn_list;
int idx = 0;
CDEBUG(D_NET, "Del route: net %s : gw %s\n",
else
rn_list = lnet_net2rnethash(net);
- again:
- cfs_list_for_each(e1, rn_list) {
- rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
+again:
+ list_for_each(e1, rn_list) {
+ rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
net == rnet->lrn_net))
continue;
- cfs_list_for_each(e2, &rnet->lrn_routes) {
- route = cfs_list_entry(e2, lnet_route_t, lr_list);
+ list_for_each(e2, &rnet->lrn_routes) {
+ route = list_entry(e2, lnet_route_t, lr_list);
gateway = route->lr_gateway;
if (!(gw_nid == LNET_NID_ANY ||
gw_nid == gateway->lp_nid))
continue;
- cfs_list_del(&route->lr_list);
- cfs_list_del(&route->lr_gwlist);
+ list_del(&route->lr_list);
+ list_del(&route->lr_gwlist);
the_lnet.ln_remote_nets_version++;
- if (cfs_list_empty(&rnet->lrn_routes))
- cfs_list_del(&rnet->lrn_list);
+ if (list_empty(&rnet->lrn_routes))
+ list_del(&rnet->lrn_list);
else
rnet = NULL;
lnet_get_route(int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
{
- cfs_list_t *e1;
- cfs_list_t *e2;
- lnet_remotenet_t *rnet;
- lnet_route_t *route;
- int cpt;
- int i;
- cfs_list_t *rn_list;
+ struct list_head *e1;
+ struct list_head *e2;
+ lnet_remotenet_t *rnet;
+ lnet_route_t *route;
+ int cpt;
+ int i;
+ struct list_head *rn_list;
cpt = lnet_net_lock_current();
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
rn_list = &the_lnet.ln_remote_nets_hash[i];
- cfs_list_for_each(e1, rn_list) {
- rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
+ list_for_each(e1, rn_list) {
+ rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
- cfs_list_for_each(e2, &rnet->lrn_routes) {
- route = cfs_list_entry(e2, lnet_route_t,
- lr_list);
+ list_for_each(e2, &rnet->lrn_routes) {
+ route = list_entry(e2, lnet_route_t,
+ lr_list);
if (idx-- == 0) {
*net = rnet->lrn_net;
if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
return; /* can't carry NI status info */
- cfs_list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
+ list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
int down = 0;
int up = 0;
int i;
void
lnet_wait_known_routerstate(void)
{
- lnet_peer_t *rtr;
- cfs_list_t *entry;
- int all_known;
+ lnet_peer_t *rtr;
+ struct list_head *entry;
+ int all_known;
- LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
+ LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
- for (;;) {
- int cpt = lnet_net_lock_current();
+ for (;;) {
+ int cpt = lnet_net_lock_current();
- all_known = 1;
- cfs_list_for_each (entry, &the_lnet.ln_routers) {
- rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
+ all_known = 1;
+ list_for_each(entry, &the_lnet.ln_routers) {
+ rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
- if (rtr->lp_alive_count == 0) {
- all_known = 0;
- break;
- }
- }
+ if (rtr->lp_alive_count == 0) {
+ all_known = 0;
+ break;
+ }
+ }
lnet_net_unlock(cpt);
- if (all_known)
+ if (all_known)
return;
#ifndef __KERNEL__
MAX(live_router_check_interval, dead_router_check_interval);
now = cfs_time_current_sec();
- cfs_list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
if (ni->ni_lnd->lnd_type == LOLND)
continue;
void
lnet_destroy_rc_data(lnet_rc_data_t *rcd)
{
- LASSERT(cfs_list_empty(&rcd->rcd_list));
+ LASSERT(list_empty(&rcd->rcd_list));
/* detached from network */
LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
goto out;
LNetInvalidateHandle(&rcd->rcd_mdh);
- CFS_INIT_LIST_HEAD(&rcd->rcd_list);
+ INIT_LIST_HEAD(&rcd->rcd_list);
LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
if (pi == NULL)
* outstanding events as it is allowed outstanding sends */
eqsz = 0;
version = the_lnet.ln_routers_version;
- cfs_list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
+ list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
lnet_ni_t *ni = rtr->lp_ni;
lnet_process_id_t id;
lnet_rc_data_t *rcd;
lnet_rc_data_t *tmp;
lnet_peer_t *lp;
- cfs_list_t head;
- int i = 2;
+ struct list_head head;
+ int i = 2;
if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
- cfs_list_empty(&the_lnet.ln_rcd_deathrow) &&
- cfs_list_empty(&the_lnet.ln_rcd_zombie)))
+ list_empty(&the_lnet.ln_rcd_deathrow) &&
+ list_empty(&the_lnet.ln_rcd_zombie)))
return;
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
lnet_net_lock(LNET_LOCK_EX);
if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
/* router checker is stopping, prune all */
- cfs_list_for_each_entry(lp, &the_lnet.ln_routers,
- lp_rtr_list) {
+ list_for_each_entry(lp, &the_lnet.ln_routers,
+ lp_rtr_list) {
if (lp->lp_rcd == NULL)
continue;
- LASSERT(cfs_list_empty(&lp->lp_rcd->rcd_list));
- cfs_list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_rcd_deathrow);
+ LASSERT(list_empty(&lp->lp_rcd->rcd_list));
+ list_add(&lp->lp_rcd->rcd_list,
+ &the_lnet.ln_rcd_deathrow);
lp->lp_rcd = NULL;
}
}
/* unlink all RCDs on deathrow list */
- cfs_list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
+ list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
- if (!cfs_list_empty(&head)) {
+ if (!list_empty(&head)) {
lnet_net_unlock(LNET_LOCK_EX);
- cfs_list_for_each_entry(rcd, &head, rcd_list)
+ list_for_each_entry(rcd, &head, rcd_list)
LNetMDUnlink(rcd->rcd_mdh);
lnet_net_lock(LNET_LOCK_EX);
}
- cfs_list_splice_init(&head, &the_lnet.ln_rcd_zombie);
+ list_splice_init(&head, &the_lnet.ln_rcd_zombie);
/* release all zombie RCDs */
- while (!cfs_list_empty(&the_lnet.ln_rcd_zombie)) {
- cfs_list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
- rcd_list) {
+ while (!list_empty(&the_lnet.ln_rcd_zombie)) {
+ list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
+ rcd_list) {
if (LNetHandleIsInvalid(rcd->rcd_mdh))
- cfs_list_move(&rcd->rcd_list, &head);
+ list_move(&rcd->rcd_list, &head);
}
wait_unlink = wait_unlink &&
- !cfs_list_empty(&the_lnet.ln_rcd_zombie);
+ !list_empty(&the_lnet.ln_rcd_zombie);
lnet_net_unlock(LNET_LOCK_EX);
- while (!cfs_list_empty(&head)) {
- rcd = cfs_list_entry(head.next,
- lnet_rc_data_t, rcd_list);
- cfs_list_del_init(&rcd->rcd_list);
+ while (!list_empty(&head)) {
+ rcd = list_entry(head.next,
+ lnet_rc_data_t, rcd_list);
+ list_del_init(&rcd->rcd_list);
lnet_destroy_rc_data(rcd);
}
lnet_router_checker(void *arg)
{
lnet_peer_t *rtr;
- cfs_list_t *entry;
+ struct list_head *entry;
cfs_block_allsigs();
rescan:
version = the_lnet.ln_routers_version;
- cfs_list_for_each(entry, &the_lnet.ln_routers) {
- rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
+ list_for_each(entry, &the_lnet.ln_routers) {
+ rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid);
if (cpt != cpt2) {
if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
return;
- LASSERT (cfs_list_empty(&rbp->rbp_msgs));
- LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
+ LASSERT(list_empty(&rbp->rbp_msgs));
+ LASSERT(rbp->rbp_credits == rbp->rbp_nbuffers);
- while (!cfs_list_empty(&rbp->rbp_bufs)) {
- LASSERT (rbp->rbp_credits > 0);
+ while (!list_empty(&rbp->rbp_bufs)) {
+ LASSERT(rbp->rbp_credits > 0);
- rb = cfs_list_entry(rbp->rbp_bufs.next,
- lnet_rtrbuf_t, rb_list);
- cfs_list_del(&rb->rb_list);
- lnet_destroy_rtrbuf(rb, npages);
- nbuffers++;
- }
+ rb = list_entry(rbp->rbp_bufs.next,
+ lnet_rtrbuf_t, rb_list);
+ list_del(&rb->rb_list);
+ lnet_destroy_rtrbuf(rb, npages);
+ nbuffers++;
+ }
- LASSERT (rbp->rbp_nbuffers == nbuffers);
- LASSERT (rbp->rbp_credits == nbuffers);
+ LASSERT(rbp->rbp_nbuffers == nbuffers);
+ LASSERT(rbp->rbp_credits == nbuffers);
- rbp->rbp_nbuffers = rbp->rbp_credits = 0;
+ rbp->rbp_nbuffers = rbp->rbp_credits = 0;
}
int
return -ENOMEM;
}
- rbp->rbp_nbuffers++;
- rbp->rbp_credits++;
- rbp->rbp_mincredits++;
- cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
+ rbp->rbp_nbuffers++;
+ rbp->rbp_credits++;
+ rbp->rbp_mincredits++;
+ list_add(&rb->rb_list, &rbp->rbp_bufs);
/* No allocation "under fire" */
/* Otherwise we'd need code to schedule blocked msgs etc */
- LASSERT (!the_lnet.ln_routing);
- }
+ LASSERT(!the_lnet.ln_routing);
+ }
- LASSERT (rbp->rbp_credits == nbufs);
- return 0;
+ LASSERT(rbp->rbp_credits == nbufs);
+ return 0;
}
void
lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
{
- CFS_INIT_LIST_HEAD(&rbp->rbp_msgs);
- CFS_INIT_LIST_HEAD(&rbp->rbp_bufs);
+ INIT_LIST_HEAD(&rbp->rbp_msgs);
+ INIT_LIST_HEAD(&rbp->rbp_bufs);
rbp->rbp_npages = npages;
rbp->rbp_credits = 0;
lnet_net_lock(0);
- version = the_lnet.ln_routers_version;
- cfs_list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
- lnet_ping_router_locked(rtr);
- LASSERT (version == the_lnet.ln_routers_version);
- }
+ version = the_lnet.ln_routers_version;
+ list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
+ lnet_ping_router_locked(rtr);
+ LASSERT(version == the_lnet.ln_routers_version);
+ }
lnet_net_unlock(0);
lnet_net_unlock(0);
*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
} else {
- cfs_list_t *n;
- cfs_list_t *r;
+ struct list_head *n;
+ struct list_head *r;
lnet_route_t *route = NULL;
lnet_remotenet_t *rnet = NULL;
int skip = off - 1;
- cfs_list_t *rn_list;
+ struct list_head *rn_list;
int i;
lnet_net_lock(0);
n = rn_list->next;
while (n != rn_list && route == NULL) {
- rnet = cfs_list_entry(n, lnet_remotenet_t,
- lrn_list);
+ rnet = list_entry(n, lnet_remotenet_t,
+ lrn_list);
r = rnet->lrn_routes.next;
while (r != &rnet->lrn_routes) {
lnet_route_t *re =
- cfs_list_entry(r, lnet_route_t,
- lr_list);
+ list_entry(r, lnet_route_t,
+ lr_list);
if (skip == 0) {
route = re;
break;
lnet_net_unlock(0);
*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
} else {
- cfs_list_t *r;
- struct lnet_peer *peer = NULL;
- int skip = off - 1;
+ struct list_head *r;
+ struct lnet_peer *peer = NULL;
+ int skip = off - 1;
lnet_net_lock(0);
r = the_lnet.ln_routers.next;
- while (r != &the_lnet.ln_routers) {
- lnet_peer_t *lp = cfs_list_entry(r, lnet_peer_t,
- lp_rtr_list);
+ while (r != &the_lnet.ln_routers) {
+ lnet_peer_t *lp = list_entry(r, lnet_peer_t,
+ lp_rtr_list);
if (skip == 0) {
peer = lp;
if ((peer->lp_ping_feats &
LNET_PING_FEAT_NI_STATUS) != 0) {
- cfs_list_for_each_entry(rtr, &peer->lp_routes,
- lr_gwlist) {
+ list_for_each_entry(rtr, &peer->lp_routes,
+ lr_gwlist) {
/* downis on any route should be the
* number of downis on the gateway */
if (rtr->lr_downis != 0) {
hoff++;
} else {
struct lnet_peer *peer;
- cfs_list_t *p;
+ struct list_head *p;
int skip;
again:
p = NULL;
p = ptable->pt_hash[hash].next;
while (p != &ptable->pt_hash[hash]) {
- lnet_peer_t *lp = cfs_list_entry(p, lnet_peer_t,
- lp_hashlist);
+ lnet_peer_t *lp = list_entry(p, lnet_peer_t,
+ lp_hashlist);
if (skip == 0) {
peer = lp;
"rtr", "max", "tx", "min");
LASSERT (tmpstr + tmpsiz - s > 0);
} else {
- cfs_list_t *n;
+ struct list_head *n;
lnet_ni_t *ni = NULL;
int skip = *ppos - 1;
n = the_lnet.ln_nis.next;
- while (n != &the_lnet.ln_nis) {
- lnet_ni_t *a_ni = cfs_list_entry(n, lnet_ni_t, ni_list);
+ while (n != &the_lnet.ln_nis) {
+ lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
if (skip == 0) {
ni = a_ni;
static void
brw_client_fini (sfw_test_instance_t *tsi)
{
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
+ srpc_bulk_t *bulk;
+ sfw_test_unit_t *tsu;
- LASSERT (tsi->tsi_is_client);
+ LASSERT(tsi->tsi_is_client);
- cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
- sfw_test_unit_t, tsu_list) {
- bulk = tsu->tsu_private;
- if (bulk == NULL) continue;
+ list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
+ bulk = tsu->tsu_private;
+ if (bulk == NULL)
+ continue;
- srpc_free_bulk(bulk);
- tsu->tsu_private = NULL;
- }
+ srpc_free_bulk(bulk);
+ tsu->tsu_private = NULL;
+ }
}
int
flags != LST_BRW_CHECK_FULL && flags != LST_BRW_CHECK_SIMPLE)
return -EINVAL;
- cfs_list_for_each_entry_typed(tsu, &tsi->tsi_units,
- sfw_test_unit_t, tsu_list) {
+ list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
npg, len, opc == LST_BRW_READ);
- if (bulk == NULL) {
- brw_client_fini(tsi);
- return -ENOMEM;
- }
+ if (bulk == NULL) {
+ brw_client_fini(tsi);
+ return -ENOMEM;
+ }
- tsu->tsu_private = bulk;
- }
+ tsu->tsu_private = bulk;
+ }
return 0;
}
crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
feats, bulk_npg, bulk_len,
lstcon_rpc_done, (void *)crpc);
- if (crpc->crp_rpc == NULL)
- return -ENOMEM;
-
- crpc->crp_trans = NULL;
- crpc->crp_node = nd;
- crpc->crp_posted = 0;
- crpc->crp_finished = 0;
- crpc->crp_unpacked = 0;
- crpc->crp_status = 0;
- crpc->crp_stamp = 0;
+ if (crpc->crp_rpc == NULL)
+ return -ENOMEM;
+
+ crpc->crp_trans = NULL;
+ crpc->crp_node = nd;
+ crpc->crp_posted = 0;
+ crpc->crp_finished = 0;
+ crpc->crp_unpacked = 0;
+ crpc->crp_status = 0;
+ crpc->crp_stamp = 0;
crpc->crp_embedded = embedded;
- CFS_INIT_LIST_HEAD(&crpc->crp_link);
+ INIT_LIST_HEAD(&crpc->crp_link);
atomic_inc(&console_session.ses_rpc_counter);
- return 0;
+ return 0;
}
int
spin_lock(&console_session.ses_rpc_lock);
- if (!cfs_list_empty(&console_session.ses_rpc_freelist)) {
- crpc = cfs_list_entry(console_session.ses_rpc_freelist.next,
- lstcon_rpc_t, crp_link);
- cfs_list_del_init(&crpc->crp_link);
+ if (!list_empty(&console_session.ses_rpc_freelist)) {
+ crpc = list_entry(console_session.ses_rpc_freelist.next,
+ lstcon_rpc_t, crp_link);
+ list_del_init(&crpc->crp_link);
}
spin_unlock(&console_session.ses_rpc_lock);
void
lstcon_rpc_put(lstcon_rpc_t *crpc)
{
- srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
- int i;
+ srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
+ int i;
- LASSERT (cfs_list_empty(&crpc->crp_link));
+ LASSERT(list_empty(&crpc->crp_link));
- for (i = 0; i < bulk->bk_niov; i++) {
- if (bulk->bk_iovs[i].kiov_page == NULL)
- continue;
+ for (i = 0; i < bulk->bk_niov; i++) {
+ if (bulk->bk_iovs[i].kiov_page == NULL)
+ continue;
__free_page(bulk->bk_iovs[i].kiov_page);
- }
+ }
- srpc_client_rpc_decref(crpc->crp_rpc);
+ srpc_client_rpc_decref(crpc->crp_rpc);
if (crpc->crp_embedded) {
/* embedded RPC, don't recycle it */
} else {
spin_lock(&console_session.ses_rpc_lock);
- cfs_list_add(&crpc->crp_link,
- &console_session.ses_rpc_freelist);
+ list_add(&crpc->crp_link,
+ &console_session.ses_rpc_freelist);
spin_unlock(&console_session.ses_rpc_lock);
}
}
int
-lstcon_rpc_trans_prep(cfs_list_t *translist,
- int transop, lstcon_rpc_trans_t **transpp)
+lstcon_rpc_trans_prep(struct list_head *translist, int transop,
+ lstcon_rpc_trans_t **transpp)
{
- lstcon_rpc_trans_t *trans;
-
- if (translist != NULL) {
- cfs_list_for_each_entry_typed(trans, translist,
- lstcon_rpc_trans_t, tas_link) {
- /* Can't enqueue two private transaction on
- * the same object */
- if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE)
- return -EPERM;
- }
- }
+ lstcon_rpc_trans_t *trans;
+
+ if (translist != NULL) {
+ list_for_each_entry(trans, translist, tas_link) {
+ /* Can't enqueue two private transaction on
+ * the same object */
+ if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE)
+ return -EPERM;
+ }
+ }
- /* create a trans group */
- LIBCFS_ALLOC(trans, sizeof(*trans));
- if (trans == NULL)
- return -ENOMEM;
+ /* create a trans group */
+ LIBCFS_ALLOC(trans, sizeof(*trans));
+ if (trans == NULL)
+ return -ENOMEM;
- trans->tas_opc = transop;
+ trans->tas_opc = transop;
if (translist == NULL)
- CFS_INIT_LIST_HEAD(&trans->tas_olink);
- else
- cfs_list_add_tail(&trans->tas_olink, translist);
+ INIT_LIST_HEAD(&trans->tas_olink);
+ else
+ list_add_tail(&trans->tas_olink, translist);
- cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
+ list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
- CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
+ INIT_LIST_HEAD(&trans->tas_rpcs_list);
atomic_set(&trans->tas_remaining, 0);
init_waitqueue_head(&trans->tas_waitq);
void
lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc)
{
- cfs_list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
- crpc->crp_trans = trans;
+ list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
+ crpc->crp_trans = trans;
}
void
lstcon_rpc_t *crpc;
lstcon_node_t *nd;
- cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
- lstcon_rpc_t, crp_link) {
+ list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
rpc = crpc->crp_rpc;
spin_lock(&rpc->crpc_lock);
static int
lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
{
- if (console_session.ses_shutdown &&
- !cfs_list_empty(&trans->tas_olink)) /* Not an end session RPC */
- return 1;
+ if (console_session.ses_shutdown &&
+ !list_empty(&trans->tas_olink)) /* Not an end session RPC */
+ return 1;
return (atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
}
int
lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
{
- lstcon_rpc_t *crpc;
- int rc;
+ lstcon_rpc_t *crpc;
+ int rc;
- if (cfs_list_empty(&trans->tas_rpcs_list))
+ if (list_empty(&trans->tas_rpcs_list))
return 0;
- if (timeout < LST_TRANS_MIN_TIMEOUT)
- timeout = LST_TRANS_MIN_TIMEOUT;
+ if (timeout < LST_TRANS_MIN_TIMEOUT)
+ timeout = LST_TRANS_MIN_TIMEOUT;
- CDEBUG(D_NET, "Transaction %s started\n",
- lstcon_rpc_trans_name(trans->tas_opc));
+ CDEBUG(D_NET, "Transaction %s started\n",
+ lstcon_rpc_trans_name(trans->tas_opc));
- /* post all requests */
- cfs_list_for_each_entry_typed (crpc, &trans->tas_rpcs_list,
- lstcon_rpc_t, crp_link) {
- LASSERT (!crpc->crp_posted);
+ /* post all requests */
+ list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
+ LASSERT(!crpc->crp_posted);
- lstcon_rpc_post(crpc);
- }
+ lstcon_rpc_post(crpc);
+ }
mutex_unlock(&console_session.ses_mutex);
void
lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
{
- lstcon_rpc_t *crpc;
- srpc_msg_t *rep;
- int error;
+ lstcon_rpc_t *crpc;
+ srpc_msg_t *rep;
+ int error;
- LASSERT (stat != NULL);
+ LASSERT(stat != NULL);
- memset(stat, 0, sizeof(*stat));
+ memset(stat, 0, sizeof(*stat));
- cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
- lstcon_rpc_t, crp_link) {
- lstcon_rpc_stat_total(stat, 1);
+ list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
+ lstcon_rpc_stat_total(stat, 1);
- LASSERT (crpc->crp_stamp != 0);
+ LASSERT(crpc->crp_stamp != 0);
error = lstcon_rpc_get_reply(crpc, &rep);
if (error != 0) {
int
lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
- cfs_list_t *head_up,
- lstcon_rpc_readent_func_t readent)
+ struct list_head *head_up,
+ lstcon_rpc_readent_func_t readent)
{
- cfs_list_t tmp;
- cfs_list_t *next;
+ struct list_head tmp;
+ struct list_head *next;
lstcon_rpc_ent_t *ent;
srpc_generic_reply_t *rep;
lstcon_rpc_t *crpc;
struct timeval tv;
int error;
- LASSERT (head_up != NULL);
+ LASSERT(head_up != NULL);
- next = head_up;
+ next = head_up;
- cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
- lstcon_rpc_t, crp_link) {
+ list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
if (copy_from_user(&tmp, next,
- sizeof(cfs_list_t)))
- return -EFAULT;
+ sizeof(struct list_head)))
+ return -EFAULT;
- if (tmp.next == head_up)
- return 0;
+ if (tmp.next == head_up)
+ return 0;
- next = tmp.next;
+ next = tmp.next;
- ent = cfs_list_entry(next, lstcon_rpc_ent_t, rpe_link);
+ ent = list_entry(next, lstcon_rpc_ent_t, rpe_link);
- LASSERT (crpc->crp_stamp != 0);
+ LASSERT(crpc->crp_stamp != 0);
error = lstcon_rpc_get_reply(crpc, &msg);
void
lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
{
- srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_rpc_t *tmp;
- int count = 0;
+ srpc_client_rpc_t *rpc;
+ lstcon_rpc_t *crpc;
+ lstcon_rpc_t *tmp;
+ int count = 0;
- cfs_list_for_each_entry_safe_typed(crpc, tmp,
- &trans->tas_rpcs_list,
- lstcon_rpc_t, crp_link) {
- rpc = crpc->crp_rpc;
+ list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) {
+ rpc = crpc->crp_rpc;
spin_lock(&rpc->crpc_lock);
if (!crpc->crp_posted || crpc->crp_finished) {
spin_unlock(&rpc->crpc_lock);
- cfs_list_del_init(&crpc->crp_link);
- lstcon_rpc_put(crpc);
+ list_del_init(&crpc->crp_link);
+ lstcon_rpc_put(crpc);
- continue;
- }
+ continue;
+ }
- /* rpcs can be still not callbacked (even LNetMDUnlink is called)
- * because huge timeout for inaccessible network, don't make
- * user wait for them, just abandon them, they will be recycled
- * in callback */
+ /* rpcs can be still not callbacked (even LNetMDUnlink is
+ * called) because huge timeout for inaccessible network,
+ * don't make user wait for them, just abandon them, they
+ * will be recycled in callback */
- LASSERT (crpc->crp_status != 0);
+ LASSERT(crpc->crp_status != 0);
- crpc->crp_node = NULL;
- crpc->crp_trans = NULL;
- cfs_list_del_init(&crpc->crp_link);
- count ++;
+ crpc->crp_node = NULL;
+ crpc->crp_trans = NULL;
+ list_del_init(&crpc->crp_link);
+ count++;
spin_unlock(&rpc->crpc_lock);
atomic_dec(&trans->tas_remaining);
- }
+ }
- LASSERT (atomic_read(&trans->tas_remaining) == 0);
+ LASSERT(atomic_read(&trans->tas_remaining) == 0);
- cfs_list_del(&trans->tas_link);
- if (!cfs_list_empty(&trans->tas_olink))
- cfs_list_del(&trans->tas_olink);
+ list_del(&trans->tas_link);
+ if (!list_empty(&trans->tas_olink))
+ list_del(&trans->tas_olink);
- CDEBUG(D_NET, "Transaction %s destroyed with %d pending RPCs\n",
- lstcon_rpc_trans_name(trans->tas_opc), count);
+ CDEBUG(D_NET, "Transaction %s destroyed with %d pending RPCs\n",
+ lstcon_rpc_trans_name(trans->tas_opc), count);
- LIBCFS_FREE(trans, sizeof(*trans));
+ LIBCFS_FREE(trans, sizeof(*trans));
- return;
+ return;
}
int
start = ((idx / dist) * span) % grp->grp_nnode;
end = ((idx / dist) * span + span - 1) % grp->grp_nnode;
- cfs_list_for_each_entry_typed(ndl, &grp->grp_ndl_list,
- lstcon_ndlink_t, ndl_link) {
- nd = ndl->ndl_node;
- if (i < start) {
- i ++;
- continue;
- }
+ list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
+ nd = ndl->ndl_node;
+ if (i < start) {
+ i++;
+ continue;
+ }
- if (i > (end >= start ? end: grp->grp_nnode))
- break;
+ if (i > (end >= start ? end : grp->grp_nnode))
+ break;
- pid = lstcon_next_id((i - start), nkiov, kiov);
- pid->nid = nd->nd_id.nid;
- pid->pid = nd->nd_id.pid;
- i++;
- }
+ pid = lstcon_next_id((i - start), nkiov, kiov);
+ pid->nid = nd->nd_id.nid;
+ pid->pid = nd->nd_id.pid;
+ i++;
+ }
- if (start <= end) /* done */
- return 0;
+ if (start <= end) /* done */
+ return 0;
- cfs_list_for_each_entry_typed(ndl, &grp->grp_ndl_list,
- lstcon_ndlink_t, ndl_link) {
- if (i > grp->grp_nnode + end)
- break;
+ list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
+ if (i > grp->grp_nnode + end)
+ break;
- nd = ndl->ndl_node;
- pid = lstcon_next_id((i - start), nkiov, kiov);
- pid->nid = nd->nd_id.nid;
- pid->pid = nd->nd_id.pid;
- i++;
- }
+ nd = ndl->ndl_node;
+ pid = lstcon_next_id((i - start), nkiov, kiov);
+ pid->nid = nd->nd_id.nid;
+ pid->pid = nd->nd_id.pid;
+ i++;
+ }
- return 0;
+ return 0;
}
int
}
int
-lstcon_rpc_trans_ndlist(cfs_list_t *ndlist,
- cfs_list_t *translist, int transop,
- void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp)
+lstcon_rpc_trans_ndlist(struct list_head *ndlist,
+ struct list_head *translist, int transop,
+ void *arg, lstcon_rpc_cond_func_t condition,
+ lstcon_rpc_trans_t **transpp)
{
lstcon_rpc_trans_t *trans;
lstcon_ndlink_t *ndl;
}
feats = trans->tas_features;
- cfs_list_for_each_entry_typed(ndl, ndlist, lstcon_ndlink_t, ndl_link) {
+ list_for_each_entry(ndl, ndlist, ndl_link) {
rc = condition == NULL ? 1 :
condition(transop, ndl->ndl_node, arg);
(time_t)console_session.ses_timeout)
console_session.ses_expired = 1;
- trans = console_session.ses_ping;
+ trans = console_session.ses_ping;
- LASSERT (trans != NULL);
+ LASSERT(trans != NULL);
- cfs_list_for_each_entry_typed(ndl, &console_session.ses_ndl_list,
- lstcon_ndlink_t, ndl_link) {
- nd = ndl->ndl_node;
+ list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) {
+ nd = ndl->ndl_node;
if (console_session.ses_expired) {
/* idle console, end session on all nodes */
crpc = &nd->nd_ping;
- if (crpc->crp_rpc != NULL) {
- LASSERT (crpc->crp_trans == trans);
- LASSERT (!cfs_list_empty(&crpc->crp_link));
+ if (crpc->crp_rpc != NULL) {
+ LASSERT(crpc->crp_trans == trans);
+ LASSERT(!list_empty(&crpc->crp_link));
spin_lock(&crpc->crp_rpc->crpc_lock);
spin_unlock(&crpc->crp_rpc->crpc_lock);
- lstcon_rpc_get_reply(crpc, &rep);
+ lstcon_rpc_get_reply(crpc, &rep);
- cfs_list_del_init(&crpc->crp_link);
+ list_del_init(&crpc->crp_link);
- lstcon_rpc_put(crpc);
- }
+ lstcon_rpc_put(crpc);
+ }
if (nd->nd_state != LST_NODE_ACTIVE)
continue;
lstcon_rpc_trans_addreq(trans, crpc);
lstcon_rpc_post(crpc);
- count ++;
+ count++;
}
if (console_session.ses_expired) {
int
lstcon_rpc_pinger_start(void)
{
- stt_timer_t *ptimer;
- int rc;
+ stt_timer_t *ptimer;
+ int rc;
- LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
- LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT(list_empty(&console_session.ses_rpc_freelist));
+ LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
&console_session.ses_ping);
void
lstcon_rpc_cleanup_wait(void)
{
- lstcon_rpc_trans_t *trans;
- lstcon_rpc_t *crpc;
- cfs_list_t *pacer;
- cfs_list_t zlist;
+ lstcon_rpc_trans_t *trans;
+ lstcon_rpc_t *crpc;
+ struct list_head *pacer;
+ struct list_head zlist;
- /* Called with hold of global mutex */
+ /* Called with hold of global mutex */
- LASSERT (console_session.ses_shutdown);
+ LASSERT(console_session.ses_shutdown);
- while (!cfs_list_empty(&console_session.ses_trans_list)) {
- cfs_list_for_each(pacer, &console_session.ses_trans_list) {
- trans = cfs_list_entry(pacer, lstcon_rpc_trans_t,
- tas_link);
+ while (!list_empty(&console_session.ses_trans_list)) {
+ list_for_each(pacer, &console_session.ses_trans_list) {
+ trans = list_entry(pacer, lstcon_rpc_trans_t,
+ tas_link);
CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
lstcon_rpc_trans_name(trans->tas_opc));
cfs_pause(cfs_time_seconds(1));
mutex_lock(&console_session.ses_mutex);
- }
+ }
spin_lock(&console_session.ses_rpc_lock);
"waiting for %d console RPCs to being recycled\n",
atomic_read(&console_session.ses_rpc_counter));
- cfs_list_add(&zlist, &console_session.ses_rpc_freelist);
- cfs_list_del_init(&console_session.ses_rpc_freelist);
+ list_add(&zlist, &console_session.ses_rpc_freelist);
+ list_del_init(&console_session.ses_rpc_freelist);
spin_unlock(&console_session.ses_rpc_lock);
- while (!cfs_list_empty(&zlist)) {
- crpc = cfs_list_entry(zlist.next, lstcon_rpc_t, crp_link);
+ while (!list_empty(&zlist)) {
+ crpc = list_entry(zlist.next, lstcon_rpc_t, crp_link);
- cfs_list_del(&crpc->crp_link);
- LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
- }
+ list_del(&crpc->crp_link);
+ LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
+ }
}
int
lstcon_rpc_module_init(void)
{
- CFS_INIT_LIST_HEAD(&console_session.ses_ping_timer.stt_list);
+ INIT_LIST_HEAD(&console_session.ses_ping_timer.stt_list);
console_session.ses_ping_timer.stt_func = lstcon_rpc_pinger;
console_session.ses_ping_timer.stt_data = &console_session.ses_ping_timer;
spin_lock_init(&console_session.ses_rpc_lock);
atomic_set(&console_session.ses_rpc_counter, 0);
- CFS_INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
+ INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
return 0;
}
void
lstcon_rpc_module_fini(void)
{
- LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
- LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT(list_empty(&console_session.ses_rpc_freelist));
+ LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
}
#endif
struct lstcon_node;
typedef struct lstcon_rpc {
- cfs_list_t crp_link; /* chain on rpc transaction */
- srpc_client_rpc_t *crp_rpc; /* client rpc */
- struct lstcon_node *crp_node; /* destination node */
- struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
+ struct list_head crp_link; /* chain on rpc transaction */
+ srpc_client_rpc_t *crp_rpc; /* client rpc */
+ struct lstcon_node *crp_node; /* destination node */
+ struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
unsigned int crp_posted:1; /* rpc is posted */
unsigned int crp_finished:1; /* rpc is finished */
} lstcon_rpc_t;
typedef struct lstcon_rpc_trans {
- cfs_list_t tas_olink; /* link chain on owner list */
- cfs_list_t tas_link; /* link chain on global list */
- int tas_opc; /* operation code of transaction */
+ /* link chain on owner list */
+ struct list_head tas_olink;
+ /* link chain on global list */
+ struct list_head tas_link;
+ /* operation code of transaction */
+ int tas_opc;
/* features mask is uptodate */
- unsigned tas_feats_updated;
+ unsigned tas_feats_updated;
/* test features mask */
- unsigned tas_features;
- wait_queue_head_t tas_waitq; /* wait queue head */
- atomic_t tas_remaining; /* # of un-scheduled rpcs */
- cfs_list_t tas_rpcs_list; /* queued requests */
+ unsigned tas_features;
+ wait_queue_head_t tas_waitq; /* wait queue head */
+ atomic_t tas_remaining; /* # of un-scheduled rpcs */
+ struct list_head tas_rpcs_list; /* queued requests */
} lstcon_rpc_trans_t;
#define LST_TRANS_PRIVATE 0x1000
int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
lstcon_rpc_t **crpc);
void lstcon_rpc_put(lstcon_rpc_t *crpc);
-int lstcon_rpc_trans_prep(cfs_list_t *translist,
- int transop, lstcon_rpc_trans_t **transpp);
-int lstcon_rpc_trans_ndlist(cfs_list_t *ndlist,
- cfs_list_t *translist, int transop,
- void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp);
+int lstcon_rpc_trans_prep(struct list_head *translist,
+ int transop, lstcon_rpc_trans_t **transpp);
+int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
+ struct list_head *translist, int transop,
+ void *arg, lstcon_rpc_cond_func_t condition,
+ lstcon_rpc_trans_t **transpp);
void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
- lstcon_trans_stat_t *stat);
+ lstcon_trans_stat_t *stat);
int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
- cfs_list_t *head_up,
- lstcon_rpc_readent_func_t readent);
+ struct list_head *head_up,
+ lstcon_rpc_readent_func_t readent);
void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
void lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *req);
#endif
-#endif
+#endif
static int
lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
{
- lstcon_ndlink_t *ndl;
- unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
+ lstcon_ndlink_t *ndl;
+ unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
- LASSERT (id.nid != LNET_NID_ANY);
+ LASSERT(id.nid != LNET_NID_ANY);
- cfs_list_for_each_entry_typed(ndl, &console_session.ses_ndl_hash[idx],
- lstcon_ndlink_t, ndl_hlink) {
- if (ndl->ndl_node->nd_id.nid != id.nid ||
- ndl->ndl_node->nd_id.pid != id.pid)
- continue;
+ list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx],
+ ndl_hlink) {
+ if (ndl->ndl_node->nd_id.nid != id.nid ||
+ ndl->ndl_node->nd_id.pid != id.pid)
+ continue;
- lstcon_node_get(ndl->ndl_node);
- *ndpp = ndl->ndl_node;
- return 0;
- }
+ lstcon_node_get(ndl->ndl_node);
+ *ndpp = ndl->ndl_node;
+ return 0;
+ }
if (!create)
return -ENOENT;
ndl->ndl_node->nd_timeout = 0;
memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t));
- /* queued in global hash & list, no refcount is taken by
- * global hash & list, if caller release his refcount,
- * node will be released */
- cfs_list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
- cfs_list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
+ /* queued in global hash & list, no refcount is taken by
+ * global hash & list, if caller release his refcount,
+ * node will be released */
+ list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
+ list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
- return 0;
+ return 0;
}
void
lstcon_node_put(lstcon_node_t *nd)
{
- lstcon_ndlink_t *ndl;
+ lstcon_ndlink_t *ndl;
- LASSERT (nd->nd_ref > 0);
+ LASSERT(nd->nd_ref > 0);
- if (--nd->nd_ref > 0)
- return;
+ if (--nd->nd_ref > 0)
+ return;
- ndl = (lstcon_ndlink_t *)(nd + 1);
+ ndl = (lstcon_ndlink_t *)(nd + 1);
- LASSERT (!cfs_list_empty(&ndl->ndl_link));
- LASSERT (!cfs_list_empty(&ndl->ndl_hlink));
+ LASSERT(!list_empty(&ndl->ndl_link));
+ LASSERT(!list_empty(&ndl->ndl_hlink));
- /* remove from session */
- cfs_list_del(&ndl->ndl_link);
- cfs_list_del(&ndl->ndl_hlink);
+ /* remove from session */
+ list_del(&ndl->ndl_link);
+ list_del(&ndl->ndl_hlink);
- LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
+ LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
}
static int
-lstcon_ndlink_find(cfs_list_t *hash,
+lstcon_ndlink_find(struct list_head *hash,
lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create)
{
- unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
- int rc;
+ unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
+ lstcon_ndlink_t *ndl;
+ lstcon_node_t *nd;
+ int rc;
- if (id.nid == LNET_NID_ANY)
- return -EINVAL;
+ if (id.nid == LNET_NID_ANY)
+ return -EINVAL;
- /* search in hash */
- cfs_list_for_each_entry_typed(ndl, &hash[idx],
- lstcon_ndlink_t, ndl_hlink) {
- if (ndl->ndl_node->nd_id.nid != id.nid ||
- ndl->ndl_node->nd_id.pid != id.pid)
- continue;
+ /* search in hash */
+ list_for_each_entry(ndl, &hash[idx], ndl_hlink) {
+ if (ndl->ndl_node->nd_id.nid != id.nid ||
+ ndl->ndl_node->nd_id.pid != id.pid)
+ continue;
- *ndlpp = ndl;
- return 0;
- }
+ *ndlpp = ndl;
+ return 0;
+ }
if (create == 0)
return -ENOENT;
*ndlpp = ndl;
- ndl->ndl_node = nd;
- CFS_INIT_LIST_HEAD(&ndl->ndl_link);
- cfs_list_add_tail(&ndl->ndl_hlink, &hash[idx]);
+ ndl->ndl_node = nd;
+ INIT_LIST_HEAD(&ndl->ndl_link);
+ list_add_tail(&ndl->ndl_hlink, &hash[idx]);
- return 0;
+ return 0;
}
static void
lstcon_ndlink_release(lstcon_ndlink_t *ndl)
{
- LASSERT (cfs_list_empty(&ndl->ndl_link));
- LASSERT (!cfs_list_empty(&ndl->ndl_hlink));
+ LASSERT(list_empty(&ndl->ndl_link));
+ LASSERT(!list_empty(&ndl->ndl_hlink));
- cfs_list_del(&ndl->ndl_hlink); /* delete from hash */
+ list_del(&ndl->ndl_hlink); /* delete from hash */
lstcon_node_put(ndl->ndl_node);
LIBCFS_FREE(ndl, sizeof(*ndl));
static int
lstcon_group_alloc(char *name, lstcon_group_t **grpp)
{
- lstcon_group_t *grp;
- int i;
+ lstcon_group_t *grp;
+ int i;
LIBCFS_ALLOC(grp, offsetof(lstcon_group_t,
grp_ndl_hash[LST_NODE_HASHSIZE]));
strncpy(grp->grp_name, name, sizeof(grp->grp_name));
}
- CFS_INIT_LIST_HEAD(&grp->grp_link);
- CFS_INIT_LIST_HEAD(&grp->grp_ndl_list);
- CFS_INIT_LIST_HEAD(&grp->grp_trans_list);
+ INIT_LIST_HEAD(&grp->grp_link);
+ INIT_LIST_HEAD(&grp->grp_ndl_list);
+ INIT_LIST_HEAD(&grp->grp_trans_list);
- for (i = 0; i < LST_NODE_HASHSIZE; i++)
- CFS_INIT_LIST_HEAD(&grp->grp_ndl_hash[i]);
+ for (i = 0; i < LST_NODE_HASHSIZE; i++)
+ INIT_LIST_HEAD(&grp->grp_ndl_hash[i]);
- *grpp = grp;
+ *grpp = grp;
- return 0;
+ return 0;
}
static void
lstcon_group_addref(lstcon_group_t *grp)
{
- grp->grp_ref ++;
+ grp->grp_ref++;
}
static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *);
static void
lstcon_group_drain(lstcon_group_t *grp, int keep)
{
- lstcon_ndlink_t *ndl;
- lstcon_ndlink_t *tmp;
+ lstcon_ndlink_t *ndl;
+ lstcon_ndlink_t *tmp;
- cfs_list_for_each_entry_safe_typed(ndl, tmp, &grp->grp_ndl_list,
- lstcon_ndlink_t, ndl_link) {
- if ((ndl->ndl_node->nd_state & keep) == 0)
- lstcon_group_ndlink_release(grp, ndl);
- }
+ list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
+ if ((ndl->ndl_node->nd_state & keep) == 0)
+ lstcon_group_ndlink_release(grp, ndl);
+ }
}
static void
lstcon_group_decref(lstcon_group_t *grp)
{
- int i;
+ int i;
- if (--grp->grp_ref > 0)
- return;
+ if (--grp->grp_ref > 0)
+ return;
- if (!cfs_list_empty(&grp->grp_link))
- cfs_list_del(&grp->grp_link);
+ if (!list_empty(&grp->grp_link))
+ list_del(&grp->grp_link);
- lstcon_group_drain(grp, 0);
+ lstcon_group_drain(grp, 0);
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT (cfs_list_empty(&grp->grp_ndl_hash[i]));
- }
+ for (i = 0; i < LST_NODE_HASHSIZE; i++)
+ LASSERT(list_empty(&grp->grp_ndl_hash[i]));
- LIBCFS_FREE(grp, offsetof(lstcon_group_t,
- grp_ndl_hash[LST_NODE_HASHSIZE]));
+ LIBCFS_FREE(grp, offsetof(lstcon_group_t,
+ grp_ndl_hash[LST_NODE_HASHSIZE]));
}
static int
lstcon_group_find(const char *name, lstcon_group_t **grpp)
{
- lstcon_group_t *grp;
+ lstcon_group_t *grp;
- cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list,
- lstcon_group_t, grp_link) {
+ list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0)
continue;
lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
lstcon_ndlink_t **ndlpp, int create)
{
- int rc;
+ int rc;
- rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create);
- if (rc != 0)
- return rc;
+ rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create);
+ if (rc != 0)
+ return rc;
- if (!cfs_list_empty(&(*ndlpp)->ndl_link))
- return 0;
+ if (!list_empty(&(*ndlpp)->ndl_link))
+ return 0;
- cfs_list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
- grp->grp_nnode ++;
+ list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
+ grp->grp_nnode++;
- return 0;
+ return 0;
}
static void
lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
{
- cfs_list_del_init(&ndl->ndl_link);
- lstcon_ndlink_release(ndl);
- grp->grp_nnode --;
+ list_del_init(&ndl->ndl_link);
+ lstcon_ndlink_release(ndl);
+ grp->grp_nnode--;
}
static void
lstcon_group_ndlink_move(lstcon_group_t *old,
lstcon_group_t *new, lstcon_ndlink_t *ndl)
{
- unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
- LST_NODE_HASHSIZE;
+ unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
+ LST_NODE_HASHSIZE;
- cfs_list_del(&ndl->ndl_hlink);
- cfs_list_del(&ndl->ndl_link);
- old->grp_nnode --;
+ list_del(&ndl->ndl_hlink);
+ list_del(&ndl->ndl_link);
+ old->grp_nnode--;
- cfs_list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
- cfs_list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
- new->grp_nnode ++;
+ list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
+ list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
+ new->grp_nnode++;
- return;
+ return;
}
static void
lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
{
- lstcon_ndlink_t *ndl;
+ lstcon_ndlink_t *ndl;
- while (!cfs_list_empty(&old->grp_ndl_list)) {
- ndl = cfs_list_entry(old->grp_ndl_list.next,
- lstcon_ndlink_t, ndl_link);
- lstcon_group_ndlink_move(old, new, ndl);
- }
+ while (!list_empty(&old->grp_ndl_list)) {
+ ndl = list_entry(old->grp_ndl_list.next,
+ lstcon_ndlink_t, ndl_link);
+ lstcon_group_ndlink_move(old, new, ndl);
+ }
}
int
static int
lstcon_group_nodes_add(lstcon_group_t *grp,
int count, lnet_process_id_t *ids_up,
- unsigned *featp, cfs_list_t *result_up)
+ unsigned *featp, struct list_head *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_ndlink_t *ndl;
static int
lstcon_group_nodes_remove(lstcon_group_t *grp,
int count, lnet_process_id_t *ids_up,
- cfs_list_t *result_up)
+ struct list_head *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_ndlink_t *ndl;
return -ENOMEM;
}
- cfs_list_add_tail(&grp->grp_link, &console_session.ses_grp_list);
+ list_add_tail(&grp->grp_link, &console_session.ses_grp_list);
- return rc;
+ return rc;
}
int
lstcon_nodes_add(char *name, int count, lnet_process_id_t *ids_up,
- unsigned *featp, cfs_list_t *result_up)
+ unsigned *featp, struct list_head *result_up)
{
lstcon_group_t *grp;
int rc;
lstcon_group_drain(grp, args);
- lstcon_group_put(grp);
- /* release empty group */
- if (cfs_list_empty(&grp->grp_ndl_list))
- lstcon_group_put(grp);
+ lstcon_group_put(grp);
+ /* release empty group */
+ if (list_empty(&grp->grp_ndl_list))
+ lstcon_group_put(grp);
- return 0;
+ return 0;
}
int
lstcon_nodes_remove(char *name, int count,
- lnet_process_id_t *ids_up, cfs_list_t *result_up)
+ lnet_process_id_t *ids_up, struct list_head *result_up)
{
lstcon_group_t *grp = NULL;
int rc;
rc = lstcon_group_nodes_remove(grp, count, ids_up, result_up);
- lstcon_group_put(grp);
- /* release empty group */
- if (cfs_list_empty(&grp->grp_ndl_list))
- lstcon_group_put(grp);
+ lstcon_group_put(grp);
+ /* release empty group */
+ if (list_empty(&grp->grp_ndl_list))
+ lstcon_group_put(grp);
- return rc;
+ return rc;
}
int
-lstcon_group_refresh(char *name, cfs_list_t *result_up)
+lstcon_group_refresh(char *name, struct list_head *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_group_t *grp;
int
lstcon_group_list(int index, int len, char *name_up)
{
- lstcon_group_t *grp;
+ lstcon_group_t *grp;
- LASSERT (index >= 0);
- LASSERT (name_up != NULL);
+ LASSERT(index >= 0);
+ LASSERT(name_up != NULL);
- cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list,
- lstcon_group_t, grp_link) {
- if (index-- == 0) {
+ list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
+ if (index-- == 0) {
return copy_to_user(name_up, grp->grp_name, len) ?
- -EFAULT : 0;
- }
- }
+ -EFAULT : 0;
+ }
+ }
- return -ENOENT;
+ return -ENOENT;
}
static int
-lstcon_nodes_getent(cfs_list_t *head, int *index_p,
+lstcon_nodes_getent(struct list_head *head, int *index_p,
int *count_p, lstcon_node_ent_t *dents_up)
{
lstcon_ndlink_t *ndl;
int count = 0;
int index = 0;
- LASSERT (index_p != NULL && count_p != NULL);
- LASSERT (dents_up != NULL);
- LASSERT (*index_p >= 0);
- LASSERT (*count_p > 0);
+ LASSERT(index_p != NULL && count_p != NULL);
+ LASSERT(dents_up != NULL);
+ LASSERT(*index_p >= 0);
+ LASSERT(*count_p > 0);
- cfs_list_for_each_entry_typed(ndl, head, lstcon_ndlink_t, ndl_link) {
- if (index++ < *index_p)
- continue;
+ list_for_each_entry(ndl, head, ndl_link) {
+ if (index++ < *index_p)
+ continue;
if (count >= *count_p)
break;
return -ENOMEM;
}
- memset(gentp, 0, sizeof(lstcon_ndlist_ent_t));
+ memset(gentp, 0, sizeof(lstcon_ndlist_ent_t));
- cfs_list_for_each_entry_typed(ndl, &grp->grp_ndl_list,
- lstcon_ndlink_t, ndl_link)
+ list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
rc = copy_to_user(gents_p, gentp,
- sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0;
+ sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0;
- LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
+ LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
- lstcon_group_put(grp);
+ lstcon_group_put(grp);
- return 0;
+ return 0;
}
static int
lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
{
- lstcon_batch_t *bat;
+ lstcon_batch_t *bat;
- cfs_list_for_each_entry_typed(bat, &console_session.ses_bat_list,
- lstcon_batch_t, bat_link) {
+ list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) {
*batpp = bat;
return 0;
}
LIBCFS_ALLOC(bat->bat_cli_hash,
- sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
+ sizeof(struct list_head) * LST_NODE_HASHSIZE);
if (bat->bat_cli_hash == NULL) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
}
LIBCFS_ALLOC(bat->bat_srv_hash,
- sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
+ sizeof(struct list_head) * LST_NODE_HASHSIZE);
if (bat->bat_srv_hash == NULL) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
bat->bat_ntest = 0;
bat->bat_state = LST_BATCH_IDLE;
- CFS_INIT_LIST_HEAD(&bat->bat_cli_list);
- CFS_INIT_LIST_HEAD(&bat->bat_srv_list);
- CFS_INIT_LIST_HEAD(&bat->bat_test_list);
- CFS_INIT_LIST_HEAD(&bat->bat_trans_list);
+ INIT_LIST_HEAD(&bat->bat_cli_list);
+ INIT_LIST_HEAD(&bat->bat_srv_list);
+ INIT_LIST_HEAD(&bat->bat_test_list);
+ INIT_LIST_HEAD(&bat->bat_trans_list);
for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- CFS_INIT_LIST_HEAD(&bat->bat_cli_hash[i]);
- CFS_INIT_LIST_HEAD(&bat->bat_srv_hash[i]);
+ INIT_LIST_HEAD(&bat->bat_cli_hash[i]);
+ INIT_LIST_HEAD(&bat->bat_srv_hash[i]);
}
- cfs_list_add_tail(&bat->bat_link, &console_session.ses_bat_list);
+ list_add_tail(&bat->bat_link, &console_session.ses_bat_list);
return rc;
}
int
lstcon_batch_list(int index, int len, char *name_up)
{
- lstcon_batch_t *bat;
+ lstcon_batch_t *bat;
- LASSERT (name_up != NULL);
- LASSERT (index >= 0);
+ LASSERT(name_up != NULL);
+ LASSERT(index >= 0);
- cfs_list_for_each_entry_typed(bat, &console_session.ses_bat_list,
- lstcon_batch_t, bat_link) {
- if (index-- == 0) {
+ list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
+ if (index-- == 0) {
return copy_to_user(name_up, bat->bat_name, len) ?
- -EFAULT: 0;
- }
- }
+ -EFAULT : 0;
+ }
+ }
- return -ENOENT;
+ return -ENOENT;
}
int
lstcon_node_ent_t *dents_up)
{
lstcon_test_batch_ent_t *entp;
- cfs_list_t *clilst;
- cfs_list_t *srvlst;
+ struct list_head *clilst;
+ struct list_head *srvlst;
lstcon_test_t *test = NULL;
lstcon_batch_t *bat;
lstcon_ndlink_t *ndl;
return -ENOENT;
}
- if (testidx > 0) {
- /* query test, test index start from 1 */
- cfs_list_for_each_entry_typed(test, &bat->bat_test_list,
- lstcon_test_t, tes_link) {
- if (testidx-- == 1)
- break;
- }
+ if (testidx > 0) {
+ /* query test, test index start from 1 */
+ list_for_each_entry(test, &bat->bat_test_list, tes_link) {
+ if (testidx-- == 1)
+ break;
+ }
if (testidx > 0) {
CDEBUG(D_NET, "Can't find specified test in batch\n");
entp->u.tbe_test.tse_concur = test->tes_concur;
}
- cfs_list_for_each_entry_typed(ndl, clilst, lstcon_ndlink_t, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_cli_nle);
+ list_for_each_entry(ndl, clilst, ndl_link)
+ LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_cli_nle);
- cfs_list_for_each_entry_typed(ndl, srvlst, lstcon_ndlink_t, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
+ list_for_each_entry(ndl, srvlst, ndl_link)
+ LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
rc = copy_to_user(ent_up, entp,
- sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
+ sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
- LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t));
+ LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t));
- return rc;
+ return rc;
}
int
static int
lstcon_batch_op(lstcon_batch_t *bat, int transop,
- cfs_list_t *result_up)
+ struct list_head *result_up)
{
lstcon_rpc_trans_t *trans;
int rc;
}
int
-lstcon_batch_run(char *name, int timeout, cfs_list_t *result_up)
+lstcon_batch_run(char *name, int timeout, struct list_head *result_up)
{
lstcon_batch_t *bat;
int rc;
}
int
-lstcon_batch_stop(char *name, int force, cfs_list_t *result_up)
+lstcon_batch_stop(char *name, int force, struct list_head *result_up)
{
lstcon_batch_t *bat;
int rc;
lstcon_test_t *test;
int i;
- cfs_list_del(&bat->bat_link);
+ list_del(&bat->bat_link);
- while (!cfs_list_empty(&bat->bat_test_list)) {
- test = cfs_list_entry(bat->bat_test_list.next,
- lstcon_test_t, tes_link);
- LASSERT (cfs_list_empty(&test->tes_trans_list));
+ while (!list_empty(&bat->bat_test_list)) {
+ test = list_entry(bat->bat_test_list.next,
+ lstcon_test_t, tes_link);
+ LASSERT(list_empty(&test->tes_trans_list));
- cfs_list_del(&test->tes_link);
+ list_del(&test->tes_link);
- lstcon_group_put(test->tes_src_grp);
- lstcon_group_put(test->tes_dst_grp);
+ lstcon_group_put(test->tes_src_grp);
+ lstcon_group_put(test->tes_dst_grp);
- LIBCFS_FREE(test, offsetof(lstcon_test_t,
- tes_param[test->tes_paramlen]));
- }
+ LIBCFS_FREE(test, offsetof(lstcon_test_t,
+ tes_param[test->tes_paramlen]));
+ }
- LASSERT (cfs_list_empty(&bat->bat_trans_list));
+ LASSERT(list_empty(&bat->bat_trans_list));
- while (!cfs_list_empty(&bat->bat_cli_list)) {
- ndl = cfs_list_entry(bat->bat_cli_list.next,
- lstcon_ndlink_t, ndl_link);
- cfs_list_del_init(&ndl->ndl_link);
+ while (!list_empty(&bat->bat_cli_list)) {
+ ndl = list_entry(bat->bat_cli_list.next,
+ lstcon_ndlink_t, ndl_link);
+ list_del_init(&ndl->ndl_link);
- lstcon_ndlink_release(ndl);
- }
+ lstcon_ndlink_release(ndl);
+ }
- while (!cfs_list_empty(&bat->bat_srv_list)) {
- ndl = cfs_list_entry(bat->bat_srv_list.next,
- lstcon_ndlink_t, ndl_link);
- cfs_list_del_init(&ndl->ndl_link);
+ while (!list_empty(&bat->bat_srv_list)) {
+ ndl = list_entry(bat->bat_srv_list.next,
+ lstcon_ndlink_t, ndl_link);
+ list_del_init(&ndl->ndl_link);
- lstcon_ndlink_release(ndl);
- }
+ lstcon_ndlink_release(ndl);
+ }
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT (cfs_list_empty(&bat->bat_cli_hash[i]));
- LASSERT (cfs_list_empty(&bat->bat_srv_hash[i]));
- }
+ for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+ LASSERT(list_empty(&bat->bat_cli_hash[i]));
+ LASSERT(list_empty(&bat->bat_srv_hash[i]));
+ }
- LIBCFS_FREE(bat->bat_cli_hash,
- sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat->bat_srv_hash,
- sizeof(cfs_list_t) * LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat->bat_cli_hash,
+ sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ LIBCFS_FREE(bat->bat_srv_hash,
+ sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
}
int
lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
{
- lstcon_test_t *test;
- lstcon_batch_t *batch;
- lstcon_ndlink_t *ndl;
- cfs_list_t *hash;
- cfs_list_t *head;
+ lstcon_test_t *test;
+ lstcon_batch_t *batch;
+ lstcon_ndlink_t *ndl;
+ struct list_head *hash;
+ struct list_head *head;
- test = (lstcon_test_t *)arg;
- LASSERT (test != NULL);
+ test = (lstcon_test_t *)arg;
+ LASSERT(test != NULL);
- batch = test->tes_batch;
- LASSERT (batch != NULL);
+ batch = test->tes_batch;
+ LASSERT(batch != NULL);
if (test->tes_oneside &&
transop == LST_TRANS_TSBSRVADD)
if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0)
return -ENOMEM;
- if (cfs_list_empty(&ndl->ndl_link))
- cfs_list_add_tail(&ndl->ndl_link, head);
+ if (list_empty(&ndl->ndl_link))
+ list_add_tail(&ndl->ndl_link, head);
- return 1;
+ return 1;
}
static int
-lstcon_test_nodes_add(lstcon_test_t *test, cfs_list_t *result_up)
+lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_group_t *grp;
return rc;
}
- cfs_list_for_each_entry_typed(ndl, &(*grp)->grp_ndl_list,
- lstcon_ndlink_t, ndl_link) {
+ list_for_each_entry(ndl, &(*grp)->grp_ndl_list, ndl_link) {
if (ndl->ndl_node->nd_state == LST_NODE_ACTIVE) {
return 0;
}
int concur, int dist, int span,
char *src_name, char *dst_name,
void *param, int paramlen, int *retp,
- cfs_list_t *result_up)
+ struct list_head *result_up)
{
lstcon_test_t *test = NULL;
int rc;
test->tes_cliidx = 0; /* just used for creating RPC */
test->tes_src_grp = src_grp;
test->tes_dst_grp = dst_grp;
- CFS_INIT_LIST_HEAD(&test->tes_trans_list);
+ INIT_LIST_HEAD(&test->tes_trans_list);
if (param != NULL) {
test->tes_paramlen = paramlen;
batch_name);
/* add to test list anyway, so user can check what's going on */
- cfs_list_add_tail(&test->tes_link, &batch->bat_test_list);
+ list_add_tail(&test->tes_link, &batch->bat_test_list);
batch->bat_ntest++;
test->tes_hdr.tsb_index = batch->bat_ntest;
int
lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
{
- lstcon_test_t *test;
+ lstcon_test_t *test;
- cfs_list_for_each_entry_typed(test, &batch->bat_test_list,
- lstcon_test_t, tes_link) {
- if (idx == test->tes_hdr.tsb_index) {
- *testpp = test;
- return 0;
- }
- }
+ list_for_each_entry(test, &batch->bat_test_list, tes_link) {
+ if (idx == test->tes_hdr.tsb_index) {
+ *testpp = test;
+ return 0;
+ }
+ }
- return -ENOENT;
+ return -ENOENT;
}
int
int
lstcon_test_batch_query(char *name, int testidx, int client,
- int timeout, cfs_list_t *result_up)
+ int timeout, struct list_head *result_up)
{
lstcon_rpc_trans_t *trans;
- cfs_list_t *translist;
- cfs_list_t *ndlist;
+ struct list_head *translist;
+ struct list_head *ndlist;
lstcon_tsb_hdr_t *hdr;
lstcon_batch_t *batch;
lstcon_test_t *test = NULL;
}
int
-lstcon_ndlist_stat(cfs_list_t *ndlist,
- int timeout, cfs_list_t *result_up)
+lstcon_ndlist_stat(struct list_head *ndlist,
+ int timeout, struct list_head *result_up)
{
- cfs_list_t head;
- lstcon_rpc_trans_t *trans;
- int rc;
+ struct list_head head;
+ lstcon_rpc_trans_t *trans;
+ int rc;
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
rc = lstcon_rpc_trans_ndlist(ndlist, &head,
LST_TRANS_STATQRY, NULL, NULL, &trans);
}
int
-lstcon_group_stat(char *grp_name, int timeout, cfs_list_t *result_up)
+lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up)
{
lstcon_group_t *grp;
int rc;
int
lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
- int timeout, cfs_list_t *result_up)
+ int timeout, struct list_head *result_up)
{
lstcon_ndlink_t *ndl;
lstcon_group_t *tmp;
}
int
-lstcon_debug_ndlist(cfs_list_t *ndlist,
- cfs_list_t *translist,
- int timeout, cfs_list_t *result_up)
+lstcon_debug_ndlist(struct list_head *ndlist,
+ struct list_head *translist,
+ int timeout, struct list_head *result_up)
{
lstcon_rpc_trans_t *trans;
int rc;
}
int
-lstcon_session_debug(int timeout, cfs_list_t *result_up)
+lstcon_session_debug(int timeout, struct list_head *result_up)
{
return lstcon_debug_ndlist(&console_session.ses_ndl_list,
NULL, timeout, result_up);
int
lstcon_batch_debug(int timeout, char *name,
- int client, cfs_list_t *result_up)
+ int client, struct list_head *result_up)
{
lstcon_batch_t *bat;
int rc;
int
lstcon_group_debug(int timeout, char *name,
- cfs_list_t *result_up)
+ struct list_head *result_up)
{
lstcon_group_t *grp;
int rc;
int
lstcon_nodes_debug(int timeout,
int count, lnet_process_id_t *ids_up,
- cfs_list_t *result_up)
+ struct list_head *result_up)
{
lnet_process_id_t id;
lstcon_ndlink_t *ndl;
}
for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
- LASSERT(cfs_list_empty(&console_session.ses_ndl_hash[i]));
+ LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
lstcon_new_session_id(&console_session.ses_id);
memset(entp, 0, sizeof(*entp));
- cfs_list_for_each_entry_typed(ndl, &console_session.ses_ndl_list,
- lstcon_ndlink_t, ndl_link)
- LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
+ list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link)
+ LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
if (copy_to_user(sid_up, &console_session.ses_id,
sizeof(lst_sid_t)) ||
console_session.ses_force = 0;
console_session.ses_feats_updated = 0;
- /* destroy all batches */
- while (!cfs_list_empty(&console_session.ses_bat_list)) {
- bat = cfs_list_entry(console_session.ses_bat_list.next,
- lstcon_batch_t, bat_link);
+ /* destroy all batches */
+ while (!list_empty(&console_session.ses_bat_list)) {
+ bat = list_entry(console_session.ses_bat_list.next,
+ lstcon_batch_t, bat_link);
- lstcon_batch_destroy(bat);
- }
+ lstcon_batch_destroy(bat);
+ }
- /* destroy all groups */
- while (!cfs_list_empty(&console_session.ses_grp_list)) {
- grp = cfs_list_entry(console_session.ses_grp_list.next,
- lstcon_group_t, grp_link);
- LASSERT (grp->grp_ref == 1);
+ /* destroy all groups */
+ while (!list_empty(&console_session.ses_grp_list)) {
+ grp = list_entry(console_session.ses_grp_list.next,
+ lstcon_group_t, grp_link);
+ LASSERT(grp->grp_ref == 1);
- lstcon_group_put(grp);
- }
+ lstcon_group_put(grp);
+ }
- /* all nodes should be released */
- LASSERT (cfs_list_empty(&console_session.ses_ndl_list));
+ /* all nodes should be released */
+ LASSERT(list_empty(&console_session.ses_ndl_list));
- console_session.ses_shutdown = 0;
- console_session.ses_expired = 0;
+ console_session.ses_shutdown = 0;
+ console_session.ses_expired = 0;
- return rc;
+ return rc;
}
int
goto out;
}
- cfs_list_add_tail(&grp->grp_link,
- &console_session.ses_grp_list);
- lstcon_group_addref(grp);
- }
+ list_add_tail(&grp->grp_link,
+ &console_session.ses_grp_list);
+ lstcon_group_addref(grp);
+ }
if (grp->grp_ref > 2) {
/* Group in using */
mutex_init(&console_session.ses_mutex);
- CFS_INIT_LIST_HEAD(&console_session.ses_ndl_list);
- CFS_INIT_LIST_HEAD(&console_session.ses_grp_list);
- CFS_INIT_LIST_HEAD(&console_session.ses_bat_list);
- CFS_INIT_LIST_HEAD(&console_session.ses_trans_list);
+ INIT_LIST_HEAD(&console_session.ses_ndl_list);
+ INIT_LIST_HEAD(&console_session.ses_grp_list);
+ INIT_LIST_HEAD(&console_session.ses_bat_list);
+ INIT_LIST_HEAD(&console_session.ses_trans_list);
- LIBCFS_ALLOC(console_session.ses_ndl_hash,
- sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
- if (console_session.ses_ndl_hash == NULL)
- return -ENOMEM;
+ LIBCFS_ALLOC(console_session.ses_ndl_hash,
+ sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ if (console_session.ses_ndl_hash == NULL)
+ return -ENOMEM;
- for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
- CFS_INIT_LIST_HEAD(&console_session.ses_ndl_hash[i]);
+ for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
+ INIT_LIST_HEAD(&console_session.ses_ndl_hash[i]);
/* initialize acceptor service table */
lstcon_init_acceptor_service();
- rc = srpc_add_service(&lstcon_acceptor_service);
- LASSERT (rc != -EBUSY);
- if (rc != 0) {
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
- return rc;
- }
+ rc = srpc_add_service(&lstcon_acceptor_service);
+ LASSERT(rc != -EBUSY);
+ if (rc != 0) {
+ LIBCFS_FREE(console_session.ses_ndl_hash,
+ sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ return rc;
+ }
rc = srpc_service_add_buffers(&lstcon_acceptor_service,
lstcon_acceptor_service.sv_wi_total);
}
out:
- srpc_shutdown_service(&lstcon_acceptor_service);
- srpc_remove_service(&lstcon_acceptor_service);
+ srpc_shutdown_service(&lstcon_acceptor_service);
+ srpc_remove_service(&lstcon_acceptor_service);
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
+ LIBCFS_FREE(console_session.ses_ndl_hash,
+ sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
- srpc_wait_service_shutdown(&lstcon_acceptor_service);
+ srpc_wait_service_shutdown(&lstcon_acceptor_service);
- return rc;
+ return rc;
}
int
mutex_unlock(&console_session.ses_mutex);
- LASSERT (cfs_list_empty(&console_session.ses_ndl_list));
- LASSERT (cfs_list_empty(&console_session.ses_grp_list));
- LASSERT (cfs_list_empty(&console_session.ses_bat_list));
- LASSERT (cfs_list_empty(&console_session.ses_trans_list));
+ LASSERT(list_empty(&console_session.ses_ndl_list));
+ LASSERT(list_empty(&console_session.ses_grp_list));
+ LASSERT(list_empty(&console_session.ses_bat_list));
+ LASSERT(list_empty(&console_session.ses_trans_list));
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
- LASSERT (cfs_list_empty(&console_session.ses_ndl_hash[i]));
- }
+ for (i = 0; i < LST_NODE_HASHSIZE; i++)
+ LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(cfs_list_t) * LST_GLOBAL_HASHSIZE);
+ LIBCFS_FREE(console_session.ses_ndl_hash,
+ sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
- srpc_wait_service_shutdown(&lstcon_acceptor_service);
+ srpc_wait_service_shutdown(&lstcon_acceptor_service);
- return 0;
+ return 0;
}
#endif
} lstcon_node_t; /*** node descriptor */
typedef struct {
- cfs_list_t ndl_link; /* chain on list */
- cfs_list_t ndl_hlink; /* chain on hash */
- lstcon_node_t *ndl_node; /* pointer to node */
-} lstcon_ndlink_t; /*** node link descriptor */
+ struct list_head ndl_link; /* chain on list */
+ struct list_head ndl_hlink; /* chain on hash */
+ lstcon_node_t *ndl_node; /* pointer to node */
+} lstcon_ndlink_t; /*** node link descriptor */
typedef struct {
- cfs_list_t grp_link; /* chain on global group list */
- int grp_ref; /* reference count */
- int grp_userland; /* has userland nodes */
- int grp_nnode; /* # of nodes */
- char grp_name[LST_NAME_SIZE]; /* group name */
+ struct list_head grp_link; /* chain on global group list */
+ int grp_ref; /* reference count */
+ int grp_userland; /* has userland nodes */
+ int grp_nnode; /* # of nodes */
+ char grp_name[LST_NAME_SIZE]; /* group name */
- cfs_list_t grp_trans_list; /* transaction list */
- cfs_list_t grp_ndl_list; /* nodes list */
- cfs_list_t grp_ndl_hash[0];/* hash table for nodes */
-} lstcon_group_t; /*** (alias of nodes) group descriptor */
+ struct list_head grp_trans_list; /* transaction list */
+ struct list_head grp_ndl_list; /* nodes list */
+ struct list_head grp_ndl_hash[0];/* hash table for nodes */
+} lstcon_group_t; /*** (alias of nodes) group descriptor */
#define LST_BATCH_IDLE 0xB0 /* idle batch */
#define LST_BATCH_RUNNING 0xB1 /* running batch */
} lstcon_tsb_hdr_t;
typedef struct {
- lstcon_tsb_hdr_t bat_hdr; /* test_batch header */
- cfs_list_t bat_link; /* chain on session's batches list */
- int bat_ntest; /* # of test */
- int bat_state; /* state of the batch */
- int bat_arg; /* parameter for run|stop, timeout for run, force for stop */
- char bat_name[LST_NAME_SIZE]; /* name of batch */
-
- cfs_list_t bat_test_list; /* list head of tests (lstcon_test_t) */
- cfs_list_t bat_trans_list; /* list head of transaction */
- cfs_list_t bat_cli_list; /* list head of client nodes (lstcon_node_t) */
- cfs_list_t *bat_cli_hash; /* hash table of client nodes */
- cfs_list_t bat_srv_list; /* list head of server nodes */
- cfs_list_t *bat_srv_hash; /* hash table of server nodes */
-} lstcon_batch_t; /*** (tests ) batch descritptor */
+ /* test_batch header */
+ lstcon_tsb_hdr_t bat_hdr;
+ /* chain on session's batches list */
+ struct list_head bat_link;
+ /* # of test */
+ int bat_ntest;
+ /* state of the batch */
+ int bat_state;
+ /* parameter for run|stop, timeout for run, force for stop */
+ int bat_arg;
+ /* name of batch */
+ char bat_name[LST_NAME_SIZE];
+
+ /* list head of tests (lstcon_test_t) */
+ struct list_head bat_test_list;
+ /* list head of transaction */
+ struct list_head bat_trans_list;
+ /* list head of client nodes (lstcon_node_t) */
+ struct list_head bat_cli_list;
+ /* hash table of client nodes */
+ struct list_head *bat_cli_hash;
+ /* list head of server nodes */
+ struct list_head bat_srv_list;
+ /* hash table of server nodes */
+ struct list_head *bat_srv_hash;
+} lstcon_batch_t; /*** (tests ) batch descritptor */
typedef struct lstcon_test {
- lstcon_tsb_hdr_t tes_hdr; /* test batch header */
- cfs_list_t tes_link; /* chain on batch's tests list */
- lstcon_batch_t *tes_batch; /* pointer to batch */
+ /* test batch header */
+ lstcon_tsb_hdr_t tes_hdr;
+ /* chain on batch's tests list */
+ struct list_head tes_link;
+ /* pointer to batch */
+ lstcon_batch_t *tes_batch;
int tes_type; /* type of the test, i.e: bulk, ping */
int tes_stop_onerr; /* stop on error */
int tes_span; /* nodes span of target group */
int tes_cliidx; /* client index, used for RPC creating */
- cfs_list_t tes_trans_list; /* transaction list */
- lstcon_group_t *tes_src_grp; /* group run the test */
- lstcon_group_t *tes_dst_grp; /* target group */
+ struct list_head tes_trans_list; /* transaction list */
+ lstcon_group_t *tes_src_grp; /* group run the test */
+ lstcon_group_t *tes_dst_grp; /* target group */
int tes_paramlen; /* test parameter length */
char tes_param[0]; /* test parameter */
stt_timer_t ses_ping_timer; /* timer for pinger */
lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
- cfs_list_t ses_trans_list; /* global list of transaction */
- cfs_list_t ses_grp_list; /* global list of groups */
- cfs_list_t ses_bat_list; /* global list of batches */
- cfs_list_t ses_ndl_list; /* global list of nodes */
- cfs_list_t *ses_ndl_hash; /* hash table of nodes */
+ struct list_head ses_trans_list; /* global list of transaction */
+ struct list_head ses_grp_list; /* global list of groups */
+ struct list_head ses_bat_list; /* global list of batches */
+ struct list_head ses_ndl_list; /* global list of nodes */
+ struct list_head *ses_ndl_hash; /* hash table of nodes */
- spinlock_t ses_rpc_lock; /* serialize */
- atomic_t ses_rpc_counter;/* # of initialized RPCs */
- cfs_list_t ses_rpc_freelist; /* idle console rpc */
-} lstcon_session_t; /*** session descriptor */
+ spinlock_t ses_rpc_lock; /* serialize */
+ atomic_t ses_rpc_counter;/* # of initialized RPCs */
+ struct list_head ses_rpc_freelist;/* idle console rpc */
+} lstcon_session_t; /*** session descriptor */
extern lstcon_session_t console_session;
return &console_session.ses_trans_stat;
}
-static inline cfs_list_t *
-lstcon_id2hash (lnet_process_id_t id, cfs_list_t *hash)
+static inline struct list_head *
+lstcon_id2hash(lnet_process_id_t id, struct list_head *hash)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
extern int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
lstcon_ndlist_ent_t *entp, char *name_up, int len);
extern int lstcon_session_end(void);
-extern int lstcon_session_debug(int timeout, cfs_list_t *result_up);
+extern int lstcon_session_debug(int timeout, struct list_head *result_up);
extern int lstcon_session_feats_check(unsigned feats);
extern int lstcon_batch_debug(int timeout, char *name,
- int client, cfs_list_t *result_up);
+ int client, struct list_head *result_up);
extern int lstcon_group_debug(int timeout, char *name,
- cfs_list_t *result_up);
+ struct list_head *result_up);
extern int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
- cfs_list_t *result_up);
+ struct list_head *result_up);
extern int lstcon_group_add(char *name);
extern int lstcon_group_del(char *name);
extern int lstcon_group_clean(char *name, int args);
-extern int lstcon_group_refresh(char *name, cfs_list_t *result_up);
+extern int lstcon_group_refresh(char *name, struct list_head *result_up);
extern int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
- unsigned *featp, cfs_list_t *result_up);
+ unsigned *featp, struct list_head *result_up);
extern int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
- cfs_list_t *result_up);
+ struct list_head *result_up);
extern int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
extern int lstcon_group_list(int idx, int len, char *name_up);
extern int lstcon_batch_add(char *name);
extern int lstcon_batch_run(char *name, int timeout,
- cfs_list_t *result_up);
+ struct list_head *result_up);
extern int lstcon_batch_stop(char *name, int force,
- cfs_list_t *result_up);
+ struct list_head *result_up);
extern int lstcon_test_batch_query(char *name, int testidx,
- int client, int timeout,
- cfs_list_t *result_up);
+ int client, int timeout,
+ struct list_head *result_up);
extern int lstcon_batch_del(char *name);
extern int lstcon_batch_list(int idx, int namelen, char *name_up);
extern int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
int server, int testidx, int *index_p,
int *ndent_p, lstcon_node_ent_t *dents_up);
extern int lstcon_group_stat(char *grp_name, int timeout,
- cfs_list_t *result_up);
+ struct list_head *result_up);
extern int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
- int timeout, cfs_list_t *result_up);
+ int timeout, struct list_head *result_up);
extern int lstcon_test_add(char *batch_name, int type, int loop,
int concur, int dist, int span,
char *src_name, char *dst_name,
void *param, int paramlen, int *retp,
- cfs_list_t *result_up);
+ struct list_head *result_up);
#endif
#endif
#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0)
struct smoketest_framework {
- cfs_list_t fw_zombie_rpcs; /* RPCs to be recycled */
- cfs_list_t fw_zombie_sessions; /* stopping sessions */
- cfs_list_t fw_tests; /* registered test cases */
- atomic_t fw_nzombies; /* # zombie sessions */
- spinlock_t fw_lock; /* serialise */
- sfw_session_t *fw_session; /* _the_ session */
- int fw_shuttingdown; /* shutdown in progress */
- srpc_server_rpc_t *fw_active_srpc; /* running RPC */
+ /* RPCs to be recycled */
+ struct list_head fw_zombie_rpcs;
+ /* stopping sessions */
+ struct list_head fw_zombie_sessions;
+ /* registered test cases */
+ struct list_head fw_tests;
+ /* # zombie sessions */
+ atomic_t fw_nzombies;
+ /* serialise */
+ spinlock_t fw_lock;
+ /* _the_ session */
+ sfw_session_t *fw_session;
+ /* shutdown in progress */
+ int fw_shuttingdown;
+ /* running RPC */
+ srpc_server_rpc_t *fw_active_srpc;
} sfw_data;
/* forward ref's */
static inline sfw_test_case_t *
sfw_find_test_case(int id)
{
- sfw_test_case_t *tsc;
+ sfw_test_case_t *tsc;
- LASSERT (id <= SRPC_SERVICE_MAX_ID);
- LASSERT (id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
+ LASSERT(id <= SRPC_SERVICE_MAX_ID);
+ LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
- cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
- sfw_test_case_t, tsc_list) {
- if (tsc->tsc_srv_service->sv_id == id)
- return tsc;
- }
+ list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
+ if (tsc->tsc_srv_service->sv_id == id)
+ return tsc;
+ }
- return NULL;
+ return NULL;
}
static int
tsc->tsc_cli_ops = cliops;
tsc->tsc_srv_service = service;
- cfs_list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
- return 0;
+ list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
+ return 0;
}
void
if (sn == NULL) return;
- LASSERT (!sn->sn_timer_active);
+ LASSERT(!sn->sn_timer_active);
- sfw_data.fw_session = NULL;
+ sfw_data.fw_session = NULL;
atomic_inc(&sfw_data.fw_nzombies);
- cfs_list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
+ list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
spin_unlock(&sfw_data.fw_lock);
- cfs_list_for_each_entry_typed(tsc, &sfw_data.fw_tests,
- sfw_test_case_t, tsc_list) {
+ list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
srpc_abort_service(tsc->tsc_srv_service);
}
spin_lock(&sfw_data.fw_lock);
- cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
- sfw_batch_t, bat_list) {
- if (sfw_batch_active(tsb)) {
- nactive++;
- sfw_stop_batch(tsb, 1);
- }
- }
+ list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
+ if (sfw_batch_active(tsb)) {
+ nactive++;
+ sfw_stop_batch(tsb, 1);
+ }
+ }
- if (nactive != 0)
- return; /* wait for active batches to stop */
+ if (nactive != 0)
+ return; /* wait for active batches to stop */
- cfs_list_del_init(&sn->sn_list);
+ list_del_init(&sn->sn_list);
spin_unlock(&sfw_data.fw_lock);
sfw_destroy_session(sn);
stt_timer_t *timer = &sn->sn_timer;
memset(sn, 0, sizeof(sfw_session_t));
- CFS_INIT_LIST_HEAD(&sn->sn_list);
- CFS_INIT_LIST_HEAD(&sn->sn_batches);
+ INIT_LIST_HEAD(&sn->sn_list);
+ INIT_LIST_HEAD(&sn->sn_batches);
atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
atomic_set(&sn->sn_brw_errors, 0);
atomic_set(&sn->sn_ping_errors, 0);
timer->stt_data = sn;
timer->stt_func = sfw_session_expired;
- CFS_INIT_LIST_HEAD(&timer->stt_list);
+ INIT_LIST_HEAD(&timer->stt_list);
}
/* completion handler for incoming framework RPCs */
void
sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
{
- LASSERT (rpc->crpc_bulk.bk_niov == 0);
- LASSERT (cfs_list_empty(&rpc->crpc_list));
- LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
+ LASSERT(rpc->crpc_bulk.bk_niov == 0);
+ LASSERT(list_empty(&rpc->crpc_list));
+ LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
#ifndef __KERNEL__
- LASSERT (rpc->crpc_bulk.bk_pages == NULL);
+ LASSERT(rpc->crpc_bulk.bk_pages == NULL);
#endif
- CDEBUG (D_NET,
- "Outgoing framework RPC done: "
- "service %d, peer %s, status %s:%d:%d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(rpc->crpc_wi.swi_state),
- rpc->crpc_aborted, rpc->crpc_status);
+ CDEBUG(D_NET, "Outgoing framework RPC done: "
+ "service %d, peer %s, status %s:%d:%d\n",
+ rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+ swi_state2str(rpc->crpc_wi.swi_state),
+ rpc->crpc_aborted, rpc->crpc_status);
spin_lock(&sfw_data.fw_lock);
/* my callers must finish all RPCs before shutting me down */
LASSERT(!sfw_data.fw_shuttingdown);
- cfs_list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
+ list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
spin_unlock(&sfw_data.fw_lock);
}
sfw_batch_t *
sfw_find_batch (lst_bid_t bid)
{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_batch_t *bat;
+ sfw_session_t *sn = sfw_data.fw_session;
+ sfw_batch_t *bat;
- LASSERT (sn != NULL);
+ LASSERT(sn != NULL);
- cfs_list_for_each_entry_typed (bat, &sn->sn_batches,
- sfw_batch_t, bat_list) {
- if (bat->bat_id.bat_id == bid.bat_id)
- return bat;
- }
+ list_for_each_entry(bat, &sn->sn_batches, bat_list) {
+ if (bat->bat_id.bat_id == bid.bat_id)
+ return bat;
+ }
- return NULL;
+ return NULL;
}
sfw_batch_t *
if (bat == NULL)
return NULL;
- bat->bat_error = 0;
- bat->bat_session = sn;
- bat->bat_id = bid;
+ bat->bat_error = 0;
+ bat->bat_session = sn;
+ bat->bat_id = bid;
atomic_set(&bat->bat_nactive, 0);
- CFS_INIT_LIST_HEAD(&bat->bat_tests);
+ INIT_LIST_HEAD(&bat->bat_tests);
- cfs_list_add_tail(&bat->bat_list, &sn->sn_batches);
- return bat;
+ list_add_tail(&bat->bat_list, &sn->sn_batches);
+ return bat;
}
int
cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
- cnt->active_batches = 0;
- cfs_list_for_each_entry_typed (bat, &sn->sn_batches,
- sfw_batch_t, bat_list) {
+ cnt->active_batches = 0;
+ list_for_each_entry(bat, &sn->sn_batches, bat_list) {
if (atomic_read(&bat->bat_nactive) > 0)
- cnt->active_batches++;
- }
+ cnt->active_batches++;
+ }
- reply->str_status = 0;
- return 0;
+ reply->str_status = 0;
+ return 0;
}
int
void
sfw_test_rpc_fini (srpc_client_rpc_t *rpc)
{
- sfw_test_unit_t *tsu = rpc->crpc_priv;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ sfw_test_unit_t *tsu = rpc->crpc_priv;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
- /* Called with hold of tsi->tsi_lock */
- LASSERT (cfs_list_empty(&rpc->crpc_list));
- cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+ /* Called with hold of tsi->tsi_lock */
+ LASSERT(list_empty(&rpc->crpc_list));
+ list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
}
static inline int
tsi->tsi_ops->tso_fini(tsi);
- LASSERT (!tsi->tsi_stopping);
- LASSERT (cfs_list_empty(&tsi->tsi_active_rpcs));
- LASSERT (!sfw_test_active(tsi));
+ LASSERT(!tsi->tsi_stopping);
+ LASSERT(list_empty(&tsi->tsi_active_rpcs));
+ LASSERT(!sfw_test_active(tsi));
- while (!cfs_list_empty(&tsi->tsi_units)) {
- tsu = cfs_list_entry(tsi->tsi_units.next,
- sfw_test_unit_t, tsu_list);
- cfs_list_del(&tsu->tsu_list);
- LIBCFS_FREE(tsu, sizeof(*tsu));
- }
+ while (!list_empty(&tsi->tsi_units)) {
+ tsu = list_entry(tsi->tsi_units.next,
+ sfw_test_unit_t, tsu_list);
+ list_del(&tsu->tsu_list);
+ LIBCFS_FREE(tsu, sizeof(*tsu));
+ }
- while (!cfs_list_empty(&tsi->tsi_free_rpcs)) {
- rpc = cfs_list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- cfs_list_del(&rpc->crpc_list);
- LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
- }
+ while (!list_empty(&tsi->tsi_free_rpcs)) {
+ rpc = list_entry(tsi->tsi_free_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ list_del(&rpc->crpc_list);
+ LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+ }
clean:
- sfw_unload_test(tsi);
- LIBCFS_FREE(tsi, sizeof(*tsi));
- return;
+ sfw_unload_test(tsi);
+ LIBCFS_FREE(tsi, sizeof(*tsi));
+ return;
}
void
sfw_destroy_batch (sfw_batch_t *tsb)
{
- sfw_test_instance_t *tsi;
+ sfw_test_instance_t *tsi;
- LASSERT (!sfw_batch_active(tsb));
- LASSERT (cfs_list_empty(&tsb->bat_list));
+ LASSERT(!sfw_batch_active(tsb));
+ LASSERT(list_empty(&tsb->bat_list));
- while (!cfs_list_empty(&tsb->bat_tests)) {
- tsi = cfs_list_entry(tsb->bat_tests.next,
- sfw_test_instance_t, tsi_list);
- cfs_list_del_init(&tsi->tsi_list);
- sfw_destroy_test_instance(tsi);
- }
+ while (!list_empty(&tsb->bat_tests)) {
+ tsi = list_entry(tsb->bat_tests.next,
+ sfw_test_instance_t, tsi_list);
+ list_del_init(&tsi->tsi_list);
+ sfw_destroy_test_instance(tsi);
+ }
- LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
- return;
+ LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
+ return;
}
void
sfw_destroy_session (sfw_session_t *sn)
{
- sfw_batch_t *batch;
+ sfw_batch_t *batch;
- LASSERT (cfs_list_empty(&sn->sn_list));
- LASSERT (sn != sfw_data.fw_session);
+ LASSERT(list_empty(&sn->sn_list));
+ LASSERT(sn != sfw_data.fw_session);
- while (!cfs_list_empty(&sn->sn_batches)) {
- batch = cfs_list_entry(sn->sn_batches.next,
- sfw_batch_t, bat_list);
- cfs_list_del_init(&batch->bat_list);
- sfw_destroy_batch(batch);
- }
+ while (!list_empty(&sn->sn_batches)) {
+ batch = list_entry(sn->sn_batches.next,
+ sfw_batch_t, bat_list);
+ list_del_init(&batch->bat_list);
+ sfw_destroy_batch(batch);
+ }
- LIBCFS_FREE(sn, sizeof(*sn));
+ LIBCFS_FREE(sn, sizeof(*sn));
atomic_dec(&sfw_data.fw_nzombies);
- return;
+ return;
}
void
return;
}
- LBUG ();
- return;
+ LBUG();
+ return;
}
int
memset(tsi, 0, sizeof(*tsi));
spin_lock_init(&tsi->tsi_lock);
atomic_set(&tsi->tsi_nactive, 0);
- CFS_INIT_LIST_HEAD(&tsi->tsi_units);
- CFS_INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
- CFS_INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
+ INIT_LIST_HEAD(&tsi->tsi_units);
+ INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
+ INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
tsi->tsi_stopping = 0;
tsi->tsi_batch = tsb;
LASSERT (!sfw_batch_active(tsb));
- if (!tsi->tsi_is_client) {
- /* it's test server, just add it to tsb */
- cfs_list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
- return 0;
- }
+ if (!tsi->tsi_is_client) {
+ /* it's test server, just add it to tsb */
+ list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
+ return 0;
+ }
LASSERT (bk != NULL);
#ifndef __KERNEL__
goto error;
}
- tsu->tsu_dest.nid = id.nid;
- tsu->tsu_dest.pid = id.pid;
- tsu->tsu_instance = tsi;
- tsu->tsu_private = NULL;
- cfs_list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
- }
- }
+ tsu->tsu_dest.nid = id.nid;
+ tsu->tsu_dest.pid = id.pid;
+ tsu->tsu_instance = tsi;
+ tsu->tsu_private = NULL;
+ list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
+ }
+ }
- rc = tsi->tsi_ops->tso_init(tsi);
- if (rc == 0) {
- cfs_list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
- return 0;
- }
+ rc = tsi->tsi_ops->tso_init(tsi);
+ if (rc == 0) {
+ list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
+ return 0;
+ }
error:
- LASSERT (rc != 0);
- sfw_destroy_test_instance(tsi);
- return rc;
+ LASSERT(rc != 0);
+ sfw_destroy_test_instance(tsi);
+ return rc;
}
static void
return;
}
- LASSERT (!cfs_list_empty(&sn->sn_list)); /* I'm a zombie! */
+ LASSERT(!list_empty(&sn->sn_list)); /* I'm a zombie! */
- cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
- sfw_batch_t, bat_list) {
- if (sfw_batch_active(tsb)) {
+ list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
+ if (sfw_batch_active(tsb)) {
spin_unlock(&sfw_data.fw_lock);
return;
}
}
- cfs_list_del_init(&sn->sn_list);
+ list_del_init(&sn->sn_list);
spin_unlock(&sfw_data.fw_lock);
sfw_destroy_session(sn);
spin_lock(&tsi->tsi_lock);
- LASSERT (sfw_test_active(tsi));
- LASSERT (!cfs_list_empty(&rpc->crpc_list));
+ LASSERT(sfw_test_active(tsi));
+ LASSERT(!list_empty(&rpc->crpc_list));
- cfs_list_del_init(&rpc->crpc_list);
+ list_del_init(&rpc->crpc_list);
/* batch is stopping or loop is done or get error */
if (tsi->tsi_stopping ||
LASSERT (sfw_test_active(tsi));
- if (!cfs_list_empty(&tsi->tsi_free_rpcs)) {
- /* pick request from buffer */
- rpc = cfs_list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- LASSERT (nblk == rpc->crpc_bulk.bk_niov);
- cfs_list_del_init(&rpc->crpc_list);
- }
+ if (!list_empty(&tsi->tsi_free_rpcs)) {
+ /* pick request from buffer */
+ rpc = list_entry(tsi->tsi_free_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ LASSERT(nblk == rpc->crpc_bulk.bk_niov);
+ list_del_init(&rpc->crpc_list);
+ }
spin_unlock(&tsi->tsi_lock);
spin_lock(&tsi->tsi_lock);
if (tsi->tsi_stopping) {
- cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+ list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
spin_unlock(&tsi->tsi_lock);
goto test_done;
}
if (tsu->tsu_loop > 0)
tsu->tsu_loop--;
- cfs_list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
+ list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
spin_unlock(&tsi->tsi_lock);
spin_lock(&rpc->crpc_lock);
return 0;
}
- cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
- sfw_test_instance_t, tsi_list) {
- if (!tsi->tsi_is_client) /* skip server instances */
- continue;
+ list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
+ if (!tsi->tsi_is_client) /* skip server instances */
+ continue;
- LASSERT (!tsi->tsi_stopping);
- LASSERT (!sfw_test_active(tsi));
+ LASSERT(!tsi->tsi_stopping);
+ LASSERT(!sfw_test_active(tsi));
atomic_inc(&tsb->bat_nactive);
- cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
- sfw_test_unit_t, tsu_list) {
+ list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
atomic_inc(&tsi->tsi_nactive);
- tsu->tsu_loop = tsi->tsi_loop;
- wi = &tsu->tsu_worker;
+ tsu->tsu_loop = tsi->tsi_loop;
+ wi = &tsu->tsu_worker;
swi_init_workitem(wi, tsu, sfw_run_test,
lst_sched_test[\
lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
- swi_schedule_workitem(wi);
- }
- }
+ swi_schedule_workitem(wi);
+ }
+ }
- return 0;
+ return 0;
}
int
return 0;
}
- cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
- sfw_test_instance_t, tsi_list) {
+ list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
spin_lock(&tsi->tsi_lock);
if (!tsi->tsi_is_client ||
}
/* abort launched rpcs in the test */
- cfs_list_for_each_entry_typed(rpc, &tsi->tsi_active_rpcs,
- srpc_client_rpc_t, crpc_list) {
+ list_for_each_entry(rpc, &tsi->tsi_active_rpcs, crpc_list) {
spin_lock(&rpc->crpc_lock);
srpc_abort_rpc(rpc, -EINTR);
return 0;
}
- cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
- sfw_test_instance_t, tsi_list) {
+ list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
if (testidx-- > 1)
continue;
LASSERT (!sfw_data.fw_shuttingdown);
LASSERT (service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
- if (nbulkiov == 0 && !cfs_list_empty(&sfw_data.fw_zombie_rpcs)) {
- rpc = cfs_list_entry(sfw_data.fw_zombie_rpcs.next,
+ if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) {
+ rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
srpc_client_rpc_t, crpc_list);
- cfs_list_del(&rpc->crpc_list);
+ list_del(&rpc->crpc_list);
srpc_init_client_rpc(rpc, peer, service, 0, 0,
done, sfw_client_rpc_fini, priv);
{
spin_lock(&rpc->crpc_lock);
- LASSERT (!rpc->crpc_closed);
- LASSERT (!rpc->crpc_aborted);
- LASSERT (cfs_list_empty(&rpc->crpc_list));
- LASSERT (!sfw_data.fw_shuttingdown);
+ LASSERT(!rpc->crpc_closed);
+ LASSERT(!rpc->crpc_aborted);
+ LASSERT(list_empty(&rpc->crpc_list));
+ LASSERT(!sfw_data.fw_shuttingdown);
- rpc->crpc_timeout = rpc_timeout;
- srpc_post_rpc(rpc);
+ rpc->crpc_timeout = rpc_timeout;
+ srpc_post_rpc(rpc);
spin_unlock(&rpc->crpc_lock);
return;
sfw_data.fw_active_srpc = NULL;
spin_lock_init(&sfw_data.fw_lock);
atomic_set(&sfw_data.fw_nzombies, 0);
- CFS_INIT_LIST_HEAD(&sfw_data.fw_tests);
- CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
- CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
+ INIT_LIST_HEAD(&sfw_data.fw_tests);
+ INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
+ INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
brw_init_test_client();
brw_init_test_service();
rc = sfw_register_test(&ping_test_service, &ping_test_client);
LASSERT (rc == 0);
- error = 0;
- cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
- sfw_test_case_t, tsc_list) {
- sv = tsc->tsc_srv_service;
+ error = 0;
+ list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
+ sv = tsc->tsc_srv_service;
- rc = srpc_add_service(sv);
- LASSERT (rc != -EBUSY);
- if (rc != 0) {
- CWARN ("Failed to add %s service: %d\n",
- sv->sv_name, rc);
- error = rc;
- }
- }
+ rc = srpc_add_service(sv);
+ LASSERT(rc != -EBUSY);
+ if (rc != 0) {
+ CWARN("Failed to add %s service: %d\n",
+ sv->sv_name, rc);
+ error = rc;
+ }
+ }
for (i = 0; ; i++) {
sv = &sfw_services[i];
srpc_remove_service(sv);
}
- cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
- sfw_test_case_t, tsc_list) {
+ list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
sv = tsc->tsc_srv_service;
srpc_shutdown_service(sv);
srpc_remove_service(sv);
}
- while (!cfs_list_empty(&sfw_data.fw_zombie_rpcs)) {
- srpc_client_rpc_t *rpc;
+ while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
+ srpc_client_rpc_t *rpc;
- rpc = cfs_list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
- cfs_list_del(&rpc->crpc_list);
+ rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ list_del(&rpc->crpc_list);
- LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
- }
+ LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+ }
for (i = 0; ; i++) {
sv = &sfw_services[i];
srpc_wait_service_shutdown(sv);
}
- while (!cfs_list_empty(&sfw_data.fw_tests)) {
- tsc = cfs_list_entry(sfw_data.fw_tests.next,
- sfw_test_case_t, tsc_list);
+ while (!list_empty(&sfw_data.fw_tests)) {
+ tsc = list_entry(sfw_data.fw_tests.next,
+ sfw_test_case_t, tsc_list);
- srpc_wait_service_shutdown(tsc->tsc_srv_service);
+ srpc_wait_service_shutdown(tsc->tsc_srv_service);
- cfs_list_del(&tsc->tsc_list);
- LIBCFS_FREE(tsc, sizeof(*tsc));
- }
+ list_del(&tsc->tsc_list);
+ LIBCFS_FREE(tsc, sizeof(*tsc));
+ }
- return;
+ return;
}
struct srpc_service_cd *scd;
struct srpc_server_rpc *rpc;
struct srpc_buffer *buf;
- cfs_list_t *q;
+ struct list_head *q;
int i;
if (svc->sv_cpt_data == NULL)
cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
while (1) {
- if (!cfs_list_empty(&scd->scd_buf_posted))
+ if (!list_empty(&scd->scd_buf_posted))
q = &scd->scd_buf_posted;
- else if (!cfs_list_empty(&scd->scd_buf_blocked))
+ else if (!list_empty(&scd->scd_buf_blocked))
q = &scd->scd_buf_blocked;
else
break;
- while (!cfs_list_empty(q)) {
- buf = cfs_list_entry(q->next,
+ while (!list_empty(q)) {
+ buf = list_entry(q->next,
struct srpc_buffer,
buf_list);
- cfs_list_del(&buf->buf_list);
+ list_del(&buf->buf_list);
LIBCFS_FREE(buf, sizeof(*buf));
}
}
- LASSERT(cfs_list_empty(&scd->scd_rpc_active));
+ LASSERT(list_empty(&scd->scd_rpc_active));
- while (!cfs_list_empty(&scd->scd_rpc_free)) {
- rpc = cfs_list_entry(scd->scd_rpc_free.next,
+ while (!list_empty(&scd->scd_rpc_free)) {
+ rpc = list_entry(scd->scd_rpc_free.next,
struct srpc_server_rpc,
srpc_list);
- cfs_list_del(&rpc->srpc_list);
+ list_del(&rpc->srpc_list);
LIBCFS_FREE(rpc, sizeof(*rpc));
}
}
scd->scd_cpt = i;
scd->scd_svc = svc;
spin_lock_init(&scd->scd_lock);
- CFS_INIT_LIST_HEAD(&scd->scd_rpc_free);
- CFS_INIT_LIST_HEAD(&scd->scd_rpc_active);
- CFS_INIT_LIST_HEAD(&scd->scd_buf_posted);
- CFS_INIT_LIST_HEAD(&scd->scd_buf_blocked);
+ INIT_LIST_HEAD(&scd->scd_rpc_free);
+ INIT_LIST_HEAD(&scd->scd_rpc_active);
+ INIT_LIST_HEAD(&scd->scd_buf_posted);
+ INIT_LIST_HEAD(&scd->scd_buf_blocked);
scd->scd_ev.ev_data = scd;
scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
srpc_service_fini(svc);
return -ENOMEM;
}
- cfs_list_add(&rpc->srpc_list, &scd->scd_rpc_free);
+ list_add(&rpc->srpc_list, &scd->scd_rpc_free);
}
}
int rc;
LNetInvalidateHandle(&buf->buf_mdh);
- cfs_list_add(&buf->buf_list, &scd->scd_buf_posted);
+ list_add(&buf->buf_list, &scd->scd_buf_posted);
scd->scd_buf_nposted++;
spin_unlock(&scd->scd_lock);
if (sv->sv_shuttingdown)
return rc; /* don't allow to change scd_buf_posted */
- cfs_list_del(&buf->buf_list);
+ list_del(&buf->buf_list);
spin_unlock(&scd->scd_lock);
LIBCFS_FREE(buf, sizeof(*buf));
return 0;
}
- if (cfs_list_empty(&scd->scd_rpc_active)) {
+ if (list_empty(&scd->scd_rpc_active)) {
spin_unlock(&scd->scd_lock);
continue;
}
- rpc = cfs_list_entry(scd->scd_rpc_active.next,
+ rpc = list_entry(scd->scd_rpc_active.next,
struct srpc_server_rpc, srpc_list);
CNETERR("Active RPC %p on shutdown: sv %s, peer %s, "
"wi %s scheduled %d running %d, "
/* schedule in-flight RPCs to notice the abort, NB:
* racing with incoming RPCs; complete fix should make test
* RPCs carry session ID in its headers */
- cfs_list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
+ list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
rpc->srpc_aborted = 1;
swi_schedule_workitem(&rpc->srpc_wi);
}
spin_lock(&scd->scd_lock);
/* schedule in-flight RPCs to notice the shutdown */
- cfs_list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
+ list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
swi_schedule_workitem(&rpc->srpc_wi);
spin_unlock(&scd->scd_lock);
/* OK to traverse scd_buf_posted without lock, since no one
* touches scd_buf_posted now */
- cfs_list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
+ list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
LNetMDUnlink(buf->buf_mdh);
}
}
rpc->srpc_reqstbuf = NULL;
}
- cfs_list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
+ list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
/*
* No one can schedule me now since:
LASSERT(rpc->srpc_ev.ev_fired);
swi_exit_workitem(&rpc->srpc_wi);
- if (!sv->sv_shuttingdown && !cfs_list_empty(&scd->scd_buf_blocked)) {
- buffer = cfs_list_entry(scd->scd_buf_blocked.next,
+ if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
+ buffer = list_entry(scd->scd_buf_blocked.next,
srpc_buffer_t, buf_list);
- cfs_list_del(&buffer->buf_list);
+ list_del(&buffer->buf_list);
srpc_init_server_rpc(rpc, scd, buffer);
- cfs_list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
+ list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
swi_schedule_workitem(&rpc->srpc_wi);
} else {
- cfs_list_add(&rpc->srpc_list, &scd->scd_rpc_free);
+ list_add(&rpc->srpc_list, &scd->scd_rpc_free);
}
spin_unlock(&scd->scd_lock);
}
inline void
-srpc_add_client_rpc_timer (srpc_client_rpc_t *rpc)
+srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
{
- stt_timer_t *timer = &rpc->crpc_timer;
+ stt_timer_t *timer = &rpc->crpc_timer;
- if (rpc->crpc_timeout == 0) return;
+ if (rpc->crpc_timeout == 0)
+ return;
- CFS_INIT_LIST_HEAD(&timer->stt_list);
- timer->stt_data = rpc;
- timer->stt_func = srpc_client_rpc_expired;
- timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
- cfs_time_current_sec());
- stt_add_timer(timer);
- return;
+ INIT_LIST_HEAD(&timer->stt_list);
+ timer->stt_data = rpc;
+ timer->stt_func = srpc_client_rpc_expired;
+ timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
+ cfs_time_current_sec());
+ stt_add_timer(timer);
+ return;
}
/*
swi_schedule_workitem(&scd->scd_buf_wi);
}
- cfs_list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
- msg = &buffer->buf_msg;
- type = srpc_service2request(sv->sv_id);
+ list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
+ msg = &buffer->buf_msg;
+ type = srpc_service2request(sv->sv_id);
if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
(msg->msg_type != type &&
msg->msg_magic = 0;
}
- if (!cfs_list_empty(&scd->scd_rpc_free)) {
- srpc = cfs_list_entry(scd->scd_rpc_free.next,
- struct srpc_server_rpc,
- srpc_list);
- cfs_list_del(&srpc->srpc_list);
+ if (!list_empty(&scd->scd_rpc_free)) {
+ srpc = list_entry(scd->scd_rpc_free.next,
+ struct srpc_server_rpc,
+ srpc_list);
+ list_del(&srpc->srpc_list);
srpc_init_server_rpc(srpc, scd, buffer);
- cfs_list_add_tail(&srpc->srpc_list,
- &scd->scd_rpc_active);
+ list_add_tail(&srpc->srpc_list,
+ &scd->scd_rpc_active);
swi_schedule_workitem(&srpc->srpc_wi);
} else {
- cfs_list_add_tail(&buffer->buf_list,
- &scd->scd_buf_blocked);
+ list_add_tail(&buffer->buf_list,
+ &scd->scd_buf_blocked);
}
spin_unlock(&scd->scd_lock);
/* message buffer descriptor */
typedef struct srpc_buffer {
- cfs_list_t buf_list; /* chain on srpc_service::*_msgq */
- srpc_msg_t buf_msg;
- lnet_handle_md_t buf_mdh;
- lnet_nid_t buf_self;
- lnet_process_id_t buf_peer;
+ struct list_head buf_list; /* chain on srpc_service::*_msgq */
+ srpc_msg_t buf_msg;
+ lnet_handle_md_t buf_mdh;
+ lnet_nid_t buf_self;
+ lnet_process_id_t buf_peer;
} srpc_buffer_t;
struct swi_workitem;
/* server-side state of a RPC */
typedef struct srpc_server_rpc {
/* chain on srpc_service::*_rpcq */
- cfs_list_t srpc_list;
+ struct list_head srpc_list;
struct srpc_service_cd *srpc_scd;
- swi_workitem_t srpc_wi;
- srpc_event_t srpc_ev; /* bulk/reply event */
- lnet_nid_t srpc_self;
- lnet_process_id_t srpc_peer;
- srpc_msg_t srpc_replymsg;
- lnet_handle_md_t srpc_replymdh;
- srpc_buffer_t *srpc_reqstbuf;
- srpc_bulk_t *srpc_bulk;
-
- unsigned int srpc_aborted; /* being given up */
- int srpc_status;
- void (*srpc_done)(struct srpc_server_rpc *);
+ swi_workitem_t srpc_wi;
+ srpc_event_t srpc_ev; /* bulk/reply event */
+ lnet_nid_t srpc_self;
+ lnet_process_id_t srpc_peer;
+ srpc_msg_t srpc_replymsg;
+ lnet_handle_md_t srpc_replymdh;
+ srpc_buffer_t *srpc_reqstbuf;
+ srpc_bulk_t *srpc_bulk;
+
+ unsigned int srpc_aborted; /* being given up */
+ int srpc_status;
+ void (*srpc_done)(struct srpc_server_rpc *);
} srpc_server_rpc_t;
/* client-side state of a RPC */
typedef struct srpc_client_rpc {
- cfs_list_t crpc_list; /* chain on user's lists */
+ struct list_head crpc_list; /* chain on user's lists */
spinlock_t crpc_lock; /* serialize */
- int crpc_service;
- atomic_t crpc_refcount;
- int crpc_timeout; /* # seconds to wait for reply */
- stt_timer_t crpc_timer;
- swi_workitem_t crpc_wi;
- lnet_process_id_t crpc_dest;
+ int crpc_service;
+ atomic_t crpc_refcount;
+ /* # seconds to wait for reply */
+ int crpc_timeout;
+ stt_timer_t crpc_timer;
+ swi_workitem_t crpc_wi;
+ lnet_process_id_t crpc_dest;
void (*crpc_done)(struct srpc_client_rpc *);
void (*crpc_fini)(struct srpc_client_rpc *);
/** event buffer */
srpc_event_t scd_ev;
/** free RPC descriptors */
- cfs_list_t scd_rpc_free;
+ struct list_head scd_rpc_free;
/** in-flight RPCs */
- cfs_list_t scd_rpc_active;
+ struct list_head scd_rpc_active;
/** workitem for posting buffer */
swi_workitem_t scd_buf_wi;
/** CPT id */
/** increase/decrease some buffers */
int scd_buf_adjust;
/** posted message buffers */
- cfs_list_t scd_buf_posted;
+ struct list_head scd_buf_posted;
/** blocked for RPC descriptor */
- cfs_list_t scd_buf_blocked;
+ struct list_head scd_buf_blocked;
};
/* number of server workitems (mini-thread) for testing service */
} srpc_service_t;
typedef struct {
- cfs_list_t sn_list; /* chain on fw_zombie_sessions */
- lst_sid_t sn_id; /* unique identifier */
- unsigned int sn_timeout; /* # seconds' inactivity to expire */
- int sn_timer_active;
- unsigned int sn_features;
- stt_timer_t sn_timer;
- cfs_list_t sn_batches; /* list of batches */
- char sn_name[LST_NAME_SIZE];
- atomic_t sn_refcount;
- atomic_t sn_brw_errors;
- atomic_t sn_ping_errors;
- cfs_time_t sn_started;
+ /* chain on fw_zombie_sessions */
+ struct list_head sn_list;
+ lst_sid_t sn_id; /* unique identifier */
+ /* # seconds' inactivity to expire */
+ unsigned int sn_timeout;
+ int sn_timer_active;
+ unsigned int sn_features;
+ stt_timer_t sn_timer;
+ struct list_head sn_batches; /* list of batches */
+ char sn_name[LST_NAME_SIZE];
+ atomic_t sn_refcount;
+ atomic_t sn_brw_errors;
+ atomic_t sn_ping_errors;
+ cfs_time_t sn_started;
} sfw_session_t;
#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
(sid0).ses_stamp == (sid1).ses_stamp)
typedef struct {
- cfs_list_t bat_list; /* chain on sn_batches */
- lst_bid_t bat_id; /* batch id */
- int bat_error; /* error code of batch */
- sfw_session_t *bat_session; /* batch's session */
- atomic_t bat_nactive; /* # of active tests */
- cfs_list_t bat_tests; /* test instances */
+ struct list_head bat_list; /* chain on sn_batches */
+ lst_bid_t bat_id; /* batch id */
+ int bat_error; /* error code of batch */
+ sfw_session_t *bat_session; /* batch's session */
+ atomic_t bat_nactive; /* # of active tests */
+ struct list_head bat_tests; /* test instances */
} sfw_batch_t;
typedef struct {
} sfw_test_client_ops_t;
typedef struct sfw_test_instance {
- cfs_list_t tsi_list; /* chain on batch */
- int tsi_service; /* test type */
- sfw_batch_t *tsi_batch; /* batch */
- sfw_test_client_ops_t *tsi_ops; /* test client operations */
+ struct list_head tsi_list; /* chain on batch */
+ int tsi_service; /* test type */
+ sfw_batch_t *tsi_batch; /* batch */
+ sfw_test_client_ops_t *tsi_ops; /* test client operations */
/* public parameter for all test units */
unsigned int tsi_is_client:1; /* is test client */
int tsi_loop; /* loop count */
/* status of test instance */
- spinlock_t tsi_lock; /* serialize */
- unsigned int tsi_stopping:1; /* test is stopping */
- atomic_t tsi_nactive; /* # of active test unit */
- cfs_list_t tsi_units; /* test units */
- cfs_list_t tsi_free_rpcs; /* free rpcs */
- cfs_list_t tsi_active_rpcs; /* active rpcs */
+ spinlock_t tsi_lock; /* serialize */
+ unsigned int tsi_stopping:1; /* test is stopping */
+ atomic_t tsi_nactive; /* # of active test unit */
+ struct list_head tsi_units; /* test units */
+ struct list_head tsi_free_rpcs; /* free rpcs */
+ struct list_head tsi_active_rpcs;/* active rpcs */
union {
test_ping_req_t ping; /* ping parameter */
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
typedef struct sfw_test_unit {
- cfs_list_t tsu_list; /* chain on lst_test_instance */
- lnet_process_id_t tsu_dest; /* id of dest node */
- int tsu_loop; /* loop count of the test */
- sfw_test_instance_t *tsu_instance; /* pointer to test instance */
- void *tsu_private; /* private data */
- swi_workitem_t tsu_worker; /* workitem of the test unit */
+ struct list_head tsu_list; /* chain on lst_test_instance */
+ lnet_process_id_t tsu_dest; /* id of dest node */
+ int tsu_loop; /* loop count of the test */
+ sfw_test_instance_t *tsu_instance; /* pointer to test instance */
+ void *tsu_private; /* private data */
+ swi_workitem_t tsu_worker; /* workitem of the test unit */
} sfw_test_unit_t;
typedef struct sfw_test_case {
- cfs_list_t tsc_list; /* chain on fw_tests */
- srpc_service_t *tsc_srv_service; /* test service */
- sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
+ struct list_head tsc_list; /* chain on fw_tests */
+ srpc_service_t *tsc_srv_service; /* test service */
+ sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
} sfw_test_case_t;
srpc_client_rpc_t *
}
static inline void
-srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
- int service, int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer,
+ int service, int nbulkiov, int bulklen,
+ void (*rpc_done)(srpc_client_rpc_t *),
+ void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
{
- LASSERT (nbulkiov <= LNET_MAX_IOV);
+ LASSERT(nbulkiov <= LNET_MAX_IOV);
- memset(rpc, 0, offsetof(srpc_client_rpc_t,
- crpc_bulk.bk_iovs[nbulkiov]));
+ memset(rpc, 0, offsetof(srpc_client_rpc_t,
+ crpc_bulk.bk_iovs[nbulkiov]));
- CFS_INIT_LIST_HEAD(&rpc->crpc_list);
+ INIT_LIST_HEAD(&rpc->crpc_list);
swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc,
lst_sched_test[lnet_cpt_of_nid(peer.nid)]);
spin_lock_init(&rpc->crpc_lock);
spinlock_t stt_lock;
/* start time of the slot processed previously */
cfs_time_t stt_prev_slot;
- cfs_list_t stt_hash[STTIMER_NSLOTS];
+ struct list_head stt_hash[STTIMER_NSLOTS];
int stt_shuttingdown;
#ifdef __KERNEL__
wait_queue_head_t stt_waitq;
void
stt_add_timer(stt_timer_t *timer)
{
- cfs_list_t *pos;
+ struct list_head *pos;
spin_lock(&stt_data.stt_lock);
#ifdef __KERNEL__
- LASSERT (stt_data.stt_nthreads > 0);
+ LASSERT(stt_data.stt_nthreads > 0);
#endif
- LASSERT (!stt_data.stt_shuttingdown);
- LASSERT (timer->stt_func != NULL);
- LASSERT (cfs_list_empty(&timer->stt_list));
- LASSERT (cfs_time_after(timer->stt_expires, cfs_time_current_sec()));
+ LASSERT(!stt_data.stt_shuttingdown);
+ LASSERT(timer->stt_func != NULL);
+ LASSERT(list_empty(&timer->stt_list));
+ LASSERT(cfs_time_after(timer->stt_expires, cfs_time_current_sec()));
- /* a simple insertion sort */
- cfs_list_for_each_prev (pos, STTIMER_SLOT(timer->stt_expires)) {
- stt_timer_t *old = cfs_list_entry(pos, stt_timer_t, stt_list);
+ /* a simple insertion sort */
+ list_for_each_prev(pos, STTIMER_SLOT(timer->stt_expires)) {
+ stt_timer_t *old = list_entry(pos, stt_timer_t, stt_list);
- if (cfs_time_aftereq(timer->stt_expires, old->stt_expires))
- break;
- }
- cfs_list_add(&timer->stt_list, pos);
+ if (cfs_time_aftereq(timer->stt_expires, old->stt_expires))
+ break;
+ }
+ list_add(&timer->stt_list, pos);
spin_unlock(&stt_data.stt_lock);
}
* another CPU.
*/
int
-stt_del_timer (stt_timer_t *timer)
+stt_del_timer(stt_timer_t *timer)
{
int ret = 0;
spin_lock(&stt_data.stt_lock);
#ifdef __KERNEL__
- LASSERT (stt_data.stt_nthreads > 0);
+ LASSERT(stt_data.stt_nthreads > 0);
#endif
- LASSERT (!stt_data.stt_shuttingdown);
+ LASSERT(!stt_data.stt_shuttingdown);
- if (!cfs_list_empty(&timer->stt_list)) {
- ret = 1;
- cfs_list_del_init(&timer->stt_list);
- }
+ if (!list_empty(&timer->stt_list)) {
+ ret = 1;
+ list_del_init(&timer->stt_list);
+ }
spin_unlock(&stt_data.stt_lock);
return ret;
/* called with stt_data.stt_lock held */
int
-stt_expire_list (cfs_list_t *slot, cfs_time_t now)
+stt_expire_list(struct list_head *slot, cfs_time_t now)
{
- int expired = 0;
- stt_timer_t *timer;
+ int expired = 0;
+ stt_timer_t *timer;
- while (!cfs_list_empty(slot)) {
- timer = cfs_list_entry(slot->next, stt_timer_t, stt_list);
+ while (!list_empty(slot)) {
+ timer = list_entry(slot->next, stt_timer_t, stt_list);
- if (cfs_time_after(timer->stt_expires, now))
- break;
+ if (cfs_time_after(timer->stt_expires, now))
+ break;
- cfs_list_del_init(&timer->stt_list);
+ list_del_init(&timer->stt_list);
spin_unlock(&stt_data.stt_lock);
expired++;
spin_lock_init(&stt_data.stt_lock);
for (i = 0; i < STTIMER_NSLOTS; i++)
- CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
+ INIT_LIST_HEAD(&stt_data.stt_hash[i]);
#ifdef __KERNEL__
stt_data.stt_nthreads = 0;
}
void
-stt_shutdown (void)
+stt_shutdown(void)
{
- int i;
+ int i;
spin_lock(&stt_data.stt_lock);
- for (i = 0; i < STTIMER_NSLOTS; i++)
- LASSERT (cfs_list_empty(&stt_data.stt_hash[i]));
+ for (i = 0; i < STTIMER_NSLOTS; i++)
+ LASSERT(list_empty(&stt_data.stt_hash[i]));
- stt_data.stt_shuttingdown = 1;
+ stt_data.stt_shuttingdown = 1;
#ifdef __KERNEL__
wake_up(&stt_data.stt_waitq);
#define __SELFTEST_TIMER_H__
typedef struct {
- cfs_list_t stt_list;
- cfs_time_t stt_expires;
- void (*stt_func) (void *);
- void *stt_data;
+ struct list_head stt_list;
+ cfs_time_t stt_expires;
+ void (*stt_func) (void *);
+ void *stt_data;
} stt_timer_t;
-void stt_add_timer (stt_timer_t *timer);
-int stt_del_timer (stt_timer_t *timer);
-int stt_startup (void);
-void stt_shutdown (void);
+void stt_add_timer(stt_timer_t *timer);
+int stt_del_timer(stt_timer_t *timer);
+int stt_startup(void);
+void stt_shutdown(void);
#endif /* __SELFTEST_TIMER_H__ */
int decref_flag = 0;
int killall_flag = 0;
void *rx_lnetmsg = NULL;
- CFS_LIST_HEAD (zombie_txs);
+ struct list_head zombie_txs = LIST_HEAD_INIT(zombie_txs);
if (peer == NULL) /* nothing to tear */
return;
}
/* we cannot finilize txs right now (bug #18844) */
- cfs_list_splice_init(&conn->uc_tx_list, &zombie_txs);
+ list_splice_init(&conn->uc_tx_list, &zombie_txs);
peer->up_conns[idx] = NULL;
conn->uc_peer = NULL;
for (i = 0; i < N_CONN_TYPES; i++)
LASSERT (peer->up_conns[i] == NULL);
- cfs_list_del(&peer->up_list);
+ list_del(&peer->up_list);
if (peer->up_errored &&
(peer->up_peerid.pid & LNET_PID_USERFLAG) == 0)
conn->uc_state = UC_RECEIVING_HELLO;
conn->uc_pt_idx = usocklnd_ip2pt_idx(peer_ip);
conn->uc_ni = ni;
- CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
- CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
+ INIT_LIST_HEAD(&conn->uc_tx_list);
+ INIT_LIST_HEAD(&conn->uc_zcack_list);
pthread_mutex_init(&conn->uc_lock, NULL);
mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
conn->uc_peer = peer;
usocklnd_peer_addref(peer);
- CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
- CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
+ INIT_LIST_HEAD(&conn->uc_tx_list);
+ INIT_LIST_HEAD(&conn->uc_zcack_list);
pthread_mutex_init(&conn->uc_lock, NULL);
mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
}
void
-usocklnd_destroy_txlist(lnet_ni_t *ni, cfs_list_t *txlist)
+usocklnd_destroy_txlist(lnet_ni_t *ni, struct list_head *txlist)
{
usock_tx_t *tx;
- while (!cfs_list_empty(txlist)) {
- tx = cfs_list_entry(txlist->next, usock_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
+ while (!list_empty(txlist)) {
+ tx = list_entry(txlist->next, usock_tx_t, tx_list);
+ list_del(&tx->tx_list);
usocklnd_destroy_tx(ni, tx);
}
}
void
-usocklnd_destroy_zcack_list(cfs_list_t *zcack_list)
+usocklnd_destroy_zcack_list(struct list_head *zcack_list)
{
usock_zc_ack_t *zcack;
- while (!cfs_list_empty(zcack_list)) {
- zcack = cfs_list_entry(zcack_list->next, usock_zc_ack_t,
+ while (!list_empty(zcack_list)) {
+ zcack = list_entry(zcack_list->next, usock_zc_ack_t,
zc_list);
- cfs_list_del(&zcack->zc_list);
+ list_del(&zcack->zc_list);
LIBCFS_FREE (zcack, sizeof(*zcack));
}
lnet_finalize(conn->uc_peer->up_ni, conn->uc_rx_lnetmsg, -EIO);
}
- if (!cfs_list_empty(&conn->uc_tx_list)) {
+ if (!list_empty(&conn->uc_tx_list)) {
LASSERT (conn->uc_peer != NULL);
usocklnd_destroy_txlist(conn->uc_peer->up_ni, &conn->uc_tx_list);
}
usock_peer_t *
usocklnd_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
{
- cfs_list_t *peer_list = usocklnd_nid2peerlist(id.nid);
- cfs_list_t *tmp;
+ struct list_head *peer_list = usocklnd_nid2peerlist(id.nid);
+ struct list_head *tmp;
usock_peer_t *peer;
- cfs_list_for_each (tmp, peer_list) {
+ list_for_each(tmp, peer_list) {
- peer = cfs_list_entry (tmp, usock_peer_t, up_list);
+ peer = list_entry(tmp, usock_peer_t, up_list);
if (peer->up_ni != ni)
continue;
/* peer table will take 1 of my refs on peer */
usocklnd_peer_addref(peer);
- cfs_list_add_tail (&peer->up_list,
+ list_add_tail(&peer->up_list,
usocklnd_nid2peerlist(id.nid));
} else {
usocklnd_peer_decref(peer); /* should destroy peer */
usocklnd_enqueue_zcack(usock_conn_t *conn, usock_zc_ack_t *zc_ack)
{
if (conn->uc_state == UC_READY &&
- cfs_list_empty(&conn->uc_tx_list) &&
- cfs_list_empty(&conn->uc_zcack_list) &&
+ list_empty(&conn->uc_tx_list) &&
+ list_empty(&conn->uc_zcack_list) &&
!conn->uc_sending) {
int rc = usocklnd_add_pollrequest(conn, POLL_TX_SET_REQUEST,
POLLOUT);
return rc;
}
- cfs_list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list);
+ list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list);
return 0;
}
int *send_immediately)
{
if (conn->uc_state == UC_READY &&
- cfs_list_empty(&conn->uc_tx_list) &&
- cfs_list_empty(&conn->uc_zcack_list) &&
+ list_empty(&conn->uc_tx_list) &&
+ list_empty(&conn->uc_zcack_list) &&
!conn->uc_sending) {
conn->uc_sending = 1;
*send_immediately = 1;
}
*send_immediately = 0;
- cfs_list_add_tail(&tx->tx_list, &conn->uc_tx_list);
+ list_add_tail(&tx->tx_list, &conn->uc_tx_list);
}
/* Safely create new conn if needed. Save result in *connp.
* Don't try to link it to peer because the conn
* has already had a chance to proceed at the beginning */
if (peer == NULL) {
- LASSERT(cfs_list_empty(&conn->uc_tx_list) &&
- cfs_list_empty(&conn->uc_zcack_list));
+ LASSERT(list_empty(&conn->uc_tx_list) &&
+ list_empty(&conn->uc_zcack_list));
usocklnd_conn_kill(conn);
return 0;
* make us zombie soon and take care of our txs and
* zc_acks */
- cfs_list_t tx_list, zcack_list;
+ struct list_head tx_list, zcack_list;
usock_conn_t *conn2;
int idx = usocklnd_type2idx(conn->uc_type);
- CFS_INIT_LIST_HEAD (&tx_list);
- CFS_INIT_LIST_HEAD (&zcack_list);
+ INIT_LIST_HEAD(&tx_list);
+ INIT_LIST_HEAD(&zcack_list);
/* Block usocklnd_send() to check peer->up_conns[idx]
* and to enqueue more txs */
conn2->uc_peer = peer;
/* unlink txs and zcack from the conn */
- cfs_list_add(&tx_list, &conn->uc_tx_list);
- cfs_list_del_init(&conn->uc_tx_list);
- cfs_list_add(&zcack_list, &conn->uc_zcack_list);
- cfs_list_del_init(&conn->uc_zcack_list);
+ list_add(&tx_list, &conn->uc_tx_list);
+ list_del_init(&conn->uc_tx_list);
+ list_add(&zcack_list, &conn->uc_zcack_list);
+ list_del_init(&conn->uc_zcack_list);
/* link they to the conn2 */
- cfs_list_add(&conn2->uc_tx_list, &tx_list);
- cfs_list_del_init(&tx_list);
- cfs_list_add(&conn2->uc_zcack_list, &zcack_list);
- cfs_list_del_init(&zcack_list);
+ list_add(&conn2->uc_tx_list, &tx_list);
+ list_del_init(&tx_list);
+ list_add(&conn2->uc_zcack_list, &zcack_list);
+ list_del_init(&zcack_list);
/* make conn zombie */
conn->uc_peer = NULL;
* received hello, but maybe we've smth. to
* send? */
LASSERT (conn->uc_sending == 0);
- if ( !cfs_list_empty(&conn->uc_tx_list) ||
- !cfs_list_empty(&conn->uc_zcack_list) ) {
+ if (!list_empty(&conn->uc_tx_list) ||
+ !list_empty(&conn->uc_zcack_list)) {
conn->uc_tx_deadline =
cfs_time_shift(usock_tuns.ut_timeout);
LASSERT (peer != NULL);
ni = peer->up_ni;
- if (cfs_list_empty(&conn->uc_tx_list) &&
- cfs_list_empty(&conn->uc_zcack_list)) {
+ if (list_empty(&conn->uc_tx_list) &&
+ list_empty(&conn->uc_zcack_list)) {
LASSERT(usock_tuns.ut_fair_limit > 1);
pthread_mutex_unlock(&conn->uc_lock);
return 0;
rc = usocklnd_send_tx(conn, tx);
if (rc == 0) { /* partial send or connection closed */
pthread_mutex_lock(&conn->uc_lock);
- cfs_list_add(&tx->tx_list, &conn->uc_tx_list);
+ list_add(&tx->tx_list, &conn->uc_tx_list);
conn->uc_sending = 0;
pthread_mutex_unlock(&conn->uc_lock);
break;
pthread_mutex_lock(&conn->uc_lock);
conn->uc_sending = 0;
if (conn->uc_state != UC_DEAD &&
- cfs_list_empty(&conn->uc_tx_list) &&
- cfs_list_empty(&conn->uc_zcack_list)) {
+ list_empty(&conn->uc_tx_list) &&
+ list_empty(&conn->uc_zcack_list)) {
conn->uc_tx_flag = 0;
ret = usocklnd_add_pollrequest(conn,
POLL_TX_SET_REQUEST, 0);
* brand new noop tx for zc_ack from zcack_list. Return NULL
* if an error happened */
usock_tx_t *
-usocklnd_try_piggyback(cfs_list_t *tx_list_p,
- cfs_list_t *zcack_list_p)
+usocklnd_try_piggyback(struct list_head *tx_list_p,
+ struct list_head *zcack_list_p)
{
usock_tx_t *tx;
usock_zc_ack_t *zc_ack;
/* assign tx and zc_ack */
- if (cfs_list_empty(tx_list_p))
+ if (list_empty(tx_list_p))
tx = NULL;
else {
- tx = cfs_list_entry(tx_list_p->next, usock_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
+ tx = list_entry(tx_list_p->next, usock_tx_t, tx_list);
+ list_del(&tx->tx_list);
/* already piggybacked or partially send */
if (tx->tx_msg.ksm_zc_cookies[1] != 0 ||
return tx;
}
- if (cfs_list_empty(zcack_list_p)) {
+ if (list_empty(zcack_list_p)) {
/* nothing to piggyback */
return tx;
} else {
- zc_ack = cfs_list_entry(zcack_list_p->next,
+ zc_ack = list_entry(zcack_list_p->next,
usock_zc_ack_t, zc_list);
- cfs_list_del(&zc_ack->zc_list);
+ list_del(&zc_ack->zc_list);
}
if (tx != NULL)
{
usock_conn_t *conn2;
usock_peer_t *peer;
- cfs_list_t tx_list;
- cfs_list_t zcack_list;
+ struct list_head tx_list;
+ struct list_head zcack_list;
int idx;
int rc = 0;
/* all code below is race resolution, because normally
* passive conn is linked to peer just after receiving hello */
- CFS_INIT_LIST_HEAD (&tx_list);
- CFS_INIT_LIST_HEAD (&zcack_list);
+ INIT_LIST_HEAD(&tx_list);
+ INIT_LIST_HEAD(&zcack_list);
/* conn is passive and isn't linked to any peer,
so its tx and zc_ack lists have to be empty */
- LASSERT (cfs_list_empty(&conn->uc_tx_list) &&
- cfs_list_empty(&conn->uc_zcack_list) &&
+ LASSERT(list_empty(&conn->uc_tx_list) &&
+ list_empty(&conn->uc_zcack_list) &&
conn->uc_sending == 0);
rc = usocklnd_find_or_create_peer(conn->uc_ni, conn->uc_peerid, &peer);
* We're sure that nobody but us can access to conn,
* nevertheless we use mutex (if we're wrong yet,
* deadlock is easy to see that corrupted list */
- cfs_list_add(&tx_list, &conn2->uc_tx_list);
- cfs_list_del_init(&conn2->uc_tx_list);
- cfs_list_add(&zcack_list, &conn2->uc_zcack_list);
- cfs_list_del_init(&conn2->uc_zcack_list);
+ list_add(&tx_list, &conn2->uc_tx_list);
+ list_del_init(&conn2->uc_tx_list);
+ list_add(&zcack_list, &conn2->uc_zcack_list);
+ list_del_init(&conn2->uc_zcack_list);
pthread_mutex_lock(&conn->uc_lock);
- cfs_list_add_tail(&conn->uc_tx_list, &tx_list);
- cfs_list_del_init(&tx_list);
- cfs_list_add_tail(&conn->uc_zcack_list, &zcack_list);
- cfs_list_del_init(&zcack_list);
+ list_add_tail(&conn->uc_tx_list, &tx_list);
+ list_del_init(&tx_list);
+ list_add_tail(&conn->uc_zcack_list, &zcack_list);
+ list_del_init(&zcack_list);
conn->uc_peer = peer;
pthread_mutex_unlock(&conn->uc_lock);
/* we're ready to recive incoming packets and maybe
already have smth. to transmit */
LASSERT (conn->uc_sending == 0);
- if ( cfs_list_empty(&conn->uc_tx_list) &&
- cfs_list_empty(&conn->uc_zcack_list) ) {
+ if (list_empty(&conn->uc_tx_list) &&
+ list_empty(&conn->uc_zcack_list)) {
conn->uc_tx_flag = 0;
rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
POLLIN);
void
usocklnd_process_stale_list(usock_pollthread_t *pt_data)
{
- while (!cfs_list_empty(&pt_data->upt_stale_list)) {
+ while (!list_empty(&pt_data->upt_stale_list)) {
usock_conn_t *conn;
- conn = cfs_list_entry(pt_data->upt_stale_list.next,
+ conn = list_entry(pt_data->upt_stale_list.next,
usock_conn_t, uc_stale_list);
- cfs_list_del(&conn->uc_stale_list);
+ list_del(&conn->uc_stale_list);
usocklnd_tear_peer_conn(conn);
usocklnd_conn_decref(conn); /* -1 for idx2conn[idx] or pr */
/* Process all enqueued poll requests */
pthread_mutex_lock(&pt_data->upt_pollrequests_lock);
- while (!cfs_list_empty(&pt_data->upt_pollrequests)) {
+ while (!list_empty(&pt_data->upt_pollrequests)) {
usock_pollrequest_t *pr;
- pr = cfs_list_entry(pt_data->upt_pollrequests.next,
+ pr = list_entry(pt_data->upt_pollrequests.next,
usock_pollrequest_t, upr_list);
- cfs_list_del(&pr->upr_list);
+ list_del(&pr->upr_list);
rc = usocklnd_process_pollrequest(pr, pt_data);
if (rc)
break;
/* Block new poll requests to be enqueued */
pt_data->upt_errno = rc;
- while (!cfs_list_empty(&pt_data->upt_pollrequests)) {
+ while (!list_empty(&pt_data->upt_pollrequests)) {
usock_pollrequest_t *pr;
- pr = cfs_list_entry(pt_data->upt_pollrequests.next,
+ pr = list_entry(pt_data->upt_pollrequests.next,
usock_pollrequest_t, upr_list);
- cfs_list_del(&pr->upr_list);
+ list_del(&pr->upr_list);
if (pr->upr_type == POLL_ADD_REQUEST) {
libcfs_sock_release(pr->upr_conn->uc_sock);
- cfs_list_add_tail(&pr->upr_conn->uc_stale_list,
+ list_add_tail(&pr->upr_conn->uc_stale_list,
&pt_data->upt_stale_list);
} else {
usocklnd_conn_decref(pr->upr_conn);
return rc;
}
- cfs_list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
+ list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
pthread_mutex_unlock(&pt->upt_pollrequests_lock);
return 0;
}
return; /* conn will be killed in poll thread anyway */
}
- cfs_list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
+ list_add_tail(&pr->upr_list, &pt->upt_pollrequests);
pthread_mutex_unlock(&pt->upt_pollrequests_lock);
conn->uc_preq = NULL;
}
libcfs_sock_release(conn->uc_sock);
- cfs_list_add_tail(&conn->uc_stale_list,
+ list_add_tail(&conn->uc_stale_list,
&pt_data->upt_stale_list);
break;
case POLL_RX_SET_REQUEST:
pt->upt_idx2conn[0] = NULL;
pt->upt_errno = 0;
- CFS_INIT_LIST_HEAD (&pt->upt_pollrequests);
- CFS_INIT_LIST_HEAD (&pt->upt_stale_list);
+ INIT_LIST_HEAD(&pt->upt_pollrequests);
+ INIT_LIST_HEAD(&pt->upt_stale_list);
pthread_mutex_init(&pt->upt_pollrequests_lock, NULL);
init_completion(&pt->upt_completion);
}
/* Initialize peer hash list */
for (i = 0; i < UD_PEER_HASH_SIZE; i++)
- CFS_INIT_LIST_HEAD(&usock_data.ud_peers[i]);
+ INIT_LIST_HEAD(&usock_data.ud_peers[i]);
pthread_rwlock_init(&usock_data.ud_peers_lock, NULL);
void
usocklnd_del_all_peers(lnet_ni_t *ni)
{
- cfs_list_t *ptmp;
- cfs_list_t *pnxt;
+ struct list_head *ptmp;
+ struct list_head *pnxt;
usock_peer_t *peer;
int i;
pthread_rwlock_wrlock(&usock_data.ud_peers_lock);
for (i = 0; i < UD_PEER_HASH_SIZE; i++) {
- cfs_list_for_each_safe (ptmp, pnxt, &usock_data.ud_peers[i]) {
- peer = cfs_list_entry (ptmp, usock_peer_t, up_list);
+ list_for_each_safe(ptmp, pnxt, &usock_data.ud_peers[i]) {
+ peer = list_entry(ptmp, usock_peer_t, up_list);
if (peer->up_ni != ni)
continue;
pthread_mutex_unlock(&peer->up_lock);
/* peer hash list is still protected by the caller */
- cfs_list_del(&peer->up_list);
+ list_del(&peer->up_list);
usocklnd_peer_decref(peer); /* peer isn't in hash list anymore */
}
#include <lnet/socklnd.h>
typedef struct {
- cfs_list_t tx_list; /* neccessary to form tx list */
+ struct list_head tx_list; /* neccessary to form tx list */
lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
ksock_msg_t tx_msg; /* buffer for wire header of ksock msg */
int tx_resid; /* # of residual bytes */
struct usock_preq_s *uc_preq; /* preallocated request */
__u32 uc_peer_ip; /* IP address of the peer */
__u16 uc_peer_port; /* port of the peer */
- cfs_list_t uc_stale_list; /* orphaned connections */
+ struct list_head uc_stale_list; /* orphaned connections */
/* Receive state */
int uc_rx_state; /* message or hello state */
ksock_msg_t uc_rx_msg; /* message buffer */
/* Send state */
- cfs_list_t uc_tx_list; /* pending txs */
- cfs_list_t uc_zcack_list; /* pending zc_acks */
+ struct list_head uc_tx_list; /* pending txs */
+ struct list_head uc_zcack_list; /* pending zc_acks */
cfs_time_t uc_tx_deadline; /* when to time out */
int uc_tx_flag; /* deadline valid? */
int uc_sending; /* send op is in progress */
#define N_CONN_TYPES 3 /* CONTROL, BULK_IN and BULK_OUT */
typedef struct usock_peer_s {
- cfs_list_t up_list; /* neccessary to form peer list */
+ /* neccessary to form peer list */
+ struct list_head up_list;
lnet_process_id_t up_peerid; /* id of remote peer */
usock_conn_t *up_conns[N_CONN_TYPES]; /* conns that connect us
* us with the peer */
__u64 up_incarnation; /* peer's incarnation */
int up_incrn_is_set;/* 0 if peer's incarnation
* hasn't been set so far */
- mt_atomic_t up_refcount; /* # of users */
+ mt_atomic_t up_refcount; /* # of users */
pthread_mutex_t up_lock; /* serialize */
int up_errored; /* a flag for lnet_notify() */
cfs_time_t up_last_alive; /* when the peer was last alive */
* by fd */
int upt_nfd2idx; /* # of allocated elements
* of upt_fd2idx[] */
- cfs_list_t upt_stale_list; /* list of orphaned conns */
- cfs_list_t upt_pollrequests; /* list of poll requests */
+ struct list_head upt_stale_list; /* list of orphaned conns */
+ struct list_head upt_pollrequests; /* list of poll requests */
pthread_mutex_t upt_pollrequests_lock; /* serialize */
int upt_errno; /* non-zero if errored */
- struct completion upt_completion; /* wait/signal facility for
+ struct completion upt_completion; /* wait/signal facility for
* syncronizing shutdown */
} usock_pollthread_t;
usock_pollthread_t *ud_pollthreads; /* their state */
int ud_shutdown; /* shutdown flag */
int ud_nets_count; /* # of instances */
- cfs_list_t ud_peers[UD_PEER_HASH_SIZE]; /* peer hash table */
+ struct list_head ud_peers[UD_PEER_HASH_SIZE]; /* peer hash table */
pthread_rwlock_t ud_peers_lock; /* serialize */
} usock_data_t;
short upr_value; /* bitmask of POLLIN and POLLOUT bits */
usock_conn_t * upr_conn; /* a conn for the sake of which
* action will be performed */
- cfs_list_t upr_list; /* neccessary to form list */
+ struct list_head upr_list; /* neccessary to form list */
} usock_pollrequest_t;
/* Allowable poll request types are: */
#define POLL_SET_REQUEST 5
typedef struct {
- cfs_list_t zc_list; /* neccessary to form zc_ack list */
+ struct list_head zc_list; /* neccessary to form zc_ack list */
__u64 zc_cookie; /* zero-copy cookie */
} usock_zc_ack_t;
return ip % usock_data.ud_npollthreads;
}
-static inline cfs_list_t *
+static inline struct list_head *
usocklnd_nid2peerlist(lnet_nid_t nid)
{
unsigned int hash = ((unsigned int)nid) % UD_PEER_HASH_SIZE;
int usocklnd_activeconn_hellorecv(usock_conn_t *conn);
int usocklnd_passiveconn_hellorecv(usock_conn_t *conn);
int usocklnd_write_handler(usock_conn_t *conn);
-usock_tx_t * usocklnd_try_piggyback(cfs_list_t *tx_list_p,
- cfs_list_t *zcack_list_p);
+usock_tx_t *usocklnd_try_piggyback(struct list_head *tx_list_p,
+ struct list_head *zcack_list_p);
int usocklnd_activeconn_hellosent(usock_conn_t *conn);
int usocklnd_passiveconn_hellosent(usock_conn_t *conn);
int usocklnd_send_tx(usock_conn_t *conn, usock_tx_t *tx);
usock_tx_t *usocklnd_create_cr_hello_tx(lnet_ni_t *ni,
int type, lnet_nid_t peer_nid);
void usocklnd_destroy_tx(lnet_ni_t *ni, usock_tx_t *tx);
-void usocklnd_destroy_txlist(lnet_ni_t *ni, cfs_list_t *txlist);
-void usocklnd_destroy_zcack_list(cfs_list_t *zcack_list);
+void usocklnd_destroy_txlist(lnet_ni_t *ni, struct list_head *txlist);
+void usocklnd_destroy_zcack_list(struct list_head *zcack_list);
void usocklnd_destroy_peer (usock_peer_t *peer);
int usocklnd_get_conn_type(lnet_msg_t *lntmsg);
int usocklnd_type2idx(int type);
rc = usocklnd_send_tx(conn, tx);
if (rc == 0) { /* partial send or connection closed */
pthread_mutex_lock(&conn->uc_lock);
- cfs_list_add(&tx->tx_list, &conn->uc_tx_list);
+ list_add(&tx->tx_list, &conn->uc_tx_list);
conn->uc_sending = 0;
pthread_mutex_unlock(&conn->uc_lock);
partial_send = 1;
/* schedule write handler */
if (partial_send ||
(conn->uc_state == UC_READY &&
- (!cfs_list_empty(&conn->uc_tx_list) ||
- !cfs_list_empty(&conn->uc_zcack_list)))) {
+ (!list_empty(&conn->uc_tx_list) ||
+ !list_empty(&conn->uc_zcack_list)))) {
conn->uc_tx_deadline =
cfs_time_shift(usock_tuns.ut_timeout);
conn->uc_tx_flag = 1;
}
void
-lst_free_rpcent(cfs_list_t *head)
+lst_free_rpcent(struct list_head *head)
{
- lstcon_rpc_ent_t *ent;
+ lstcon_rpc_ent_t *ent;
- while (!cfs_list_empty(head)) {
- ent = cfs_list_entry(head->next, lstcon_rpc_ent_t, rpe_link);
+ while (!list_empty(head)) {
+ ent = list_entry(head->next, lstcon_rpc_ent_t, rpe_link);
- cfs_list_del(&ent->rpe_link);
- free(ent);
- }
+ list_del(&ent->rpe_link);
+ free(ent);
+ }
}
void
-lst_reset_rpcent(cfs_list_t *head)
+lst_reset_rpcent(struct list_head *head)
{
- lstcon_rpc_ent_t *ent;
+ lstcon_rpc_ent_t *ent;
- cfs_list_for_each_entry_typed(ent, head, lstcon_rpc_ent_t, rpe_link) {
- ent->rpe_sid = LST_INVALID_SID;
- ent->rpe_peer.nid = LNET_NID_ANY;
- ent->rpe_peer.pid = LNET_PID_ANY;
- ent->rpe_rpc_errno = ent->rpe_fwk_errno = 0;
- }
+ list_for_each_entry(ent, head, rpe_link) {
+ ent->rpe_sid = LST_INVALID_SID;
+ ent->rpe_peer.nid = LNET_NID_ANY;
+ ent->rpe_peer.pid = LNET_PID_ANY;
+ ent->rpe_rpc_errno = ent->rpe_fwk_errno = 0;
+ }
}
int
-lst_alloc_rpcent(cfs_list_t *head, int count, int offset)
+lst_alloc_rpcent(struct list_head *head, int count, int offset)
{
lstcon_rpc_ent_t *ent;
int i;
memset(ent, 0, offsetof(lstcon_rpc_ent_t, rpe_payload[offset]));
- ent->rpe_sid = LST_INVALID_SID;
- ent->rpe_peer.nid = LNET_NID_ANY;
- ent->rpe_peer.pid = LNET_PID_ANY;
- cfs_list_add(&ent->rpe_link, head);
- }
+ ent->rpe_sid = LST_INVALID_SID;
+ ent->rpe_peer.nid = LNET_NID_ANY;
+ ent->rpe_peer.pid = LNET_PID_ANY;
+ list_add(&ent->rpe_link, head);
+ }
- return 0;
+ return 0;
}
void
-lst_print_transerr(cfs_list_t *head, char *optstr)
+lst_print_transerr(struct list_head *head, char *optstr)
{
- lstcon_rpc_ent_t *ent;
+ lstcon_rpc_ent_t *ent;
- cfs_list_for_each_entry_typed(ent, head, lstcon_rpc_ent_t, rpe_link) {
- if (ent->rpe_rpc_errno == 0 && ent->rpe_fwk_errno == 0)
- continue;
+ list_for_each_entry(ent, head, rpe_link) {
+ if (ent->rpe_rpc_errno == 0 && ent->rpe_fwk_errno == 0)
+ continue;
if (ent->rpe_rpc_errno != 0) {
fprintf(stderr, "%s RPC failed on %s: %s\n",
int *idx, int *count, lstcon_node_ent_t *dents);
int lst_query_batch_ioctl(char *batch, int test, int server,
- int timeout, cfs_list_t *head);
+ int timeout, struct list_head *head);
int
lst_ioctl(unsigned int opc, void *buf, int len)
int
lst_ping_ioctl(char *str, int type, int timeout,
- int count, lnet_process_id_t *ids, cfs_list_t *head)
+ int count, lnet_process_id_t *ids, struct list_head *head)
{
lstio_debug_args_t args = {0};
int
jt_lst_ping(int argc, char **argv)
{
- cfs_list_t head;
- lnet_process_id_t *ids = NULL;
- lstcon_rpc_ent_t *ent = NULL;
+ struct list_head head;
+ lnet_process_id_t *ids = NULL;
+ lstcon_rpc_ent_t *ent = NULL;
char *str = NULL;
int optidx = 0;
int server = 0;
return -1;
}
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
rc = lst_alloc_rpcent(&head, count, LST_NAME_SIZE);
if (rc != 0) {
goto out;
}
- /* ignore RPC errors and framwork errors */
- cfs_list_for_each_entry_typed(ent, &head, lstcon_rpc_ent_t, rpe_link) {
- fprintf(stdout, "\t%s: %s [session: %s id: %s]\n",
- libcfs_id2str(ent->rpe_peer),
- lst_node_state2str(ent->rpe_state),
- (ent->rpe_state == LST_NODE_ACTIVE ||
- ent->rpe_state == LST_NODE_BUSY)?
- (ent->rpe_rpc_errno == 0 ?
- &ent->rpe_payload[0] : "Unknown") :
- "<NULL>", libcfs_nid2str(ent->rpe_sid.ses_nid));
- }
+ /* ignore RPC errors and framwork errors */
+ list_for_each_entry(ent, &head, rpe_link) {
+ fprintf(stdout, "\t%s: %s [session: %s id: %s]\n",
+ libcfs_id2str(ent->rpe_peer),
+ lst_node_state2str(ent->rpe_state),
+ (ent->rpe_state == LST_NODE_ACTIVE ||
+ ent->rpe_state == LST_NODE_BUSY) ?
+ (ent->rpe_rpc_errno == 0 ?
+ &ent->rpe_payload[0] : "Unknown") :
+ "<NULL>", libcfs_nid2str(ent->rpe_sid.ses_nid));
+ }
out:
lst_free_rpcent(&head);
int
lst_add_nodes_ioctl (char *name, int count, lnet_process_id_t *ids,
- unsigned *featp, cfs_list_t *resultp)
+ unsigned *featp, struct list_head *resultp)
{
lstio_group_nodes_args_t args = {0};
int
jt_lst_add_group(int argc, char **argv)
{
- cfs_list_t head;
+ struct list_head head;
lnet_process_id_t *ids;
char *name;
unsigned feats = session_features;
return -1;
}
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
for (i = 2; i < argc; i++) {
/* parse address list */
int
lst_update_group_ioctl(int opc, char *name, int clean, int count,
- lnet_process_id_t *ids, cfs_list_t *resultp)
+ lnet_process_id_t *ids, struct list_head *resultp)
{
lstio_group_update_args_t args = {0};
int
jt_lst_update_group(int argc, char **argv)
{
- cfs_list_t head;
+ struct list_head head;
lnet_process_id_t *ids = NULL;
char *str = NULL;
char *grp = NULL;
grp = argv[optind];
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
if (opc == LST_GROUP_RMND || opc == LST_GROUP_REFRESH) {
rc = lst_get_node_count(opc == LST_GROUP_RMND ? LST_OPC_NODES :
int
lst_stat_ioctl (char *name, int count, lnet_process_id_t *idsp,
- int timeout, cfs_list_t *resultp)
+ int timeout, struct list_head *resultp)
{
lstio_stat_args_t args = {0};
}
typedef struct {
- cfs_list_t srp_link;
+ struct list_head srp_link;
int srp_count;
char *srp_name;
lnet_process_id_t *srp_ids;
- cfs_list_t srp_result[2];
+ struct list_head srp_result[2];
} lst_stat_req_param_t;
static void
return -ENOMEM;
memset(srp, 0, sizeof(*srp));
- CFS_INIT_LIST_HEAD(&srp->srp_result[0]);
- CFS_INIT_LIST_HEAD(&srp->srp_result[1]);
+ INIT_LIST_HEAD(&srp->srp_result[0]);
+ INIT_LIST_HEAD(&srp->srp_result[1]);
rc = lst_get_node_count(LST_OPC_GROUP, name,
&srp->srp_count, NULL);
}
void
-lst_print_stat(char *name, cfs_list_t *resultp,
+lst_print_stat(char *name, struct list_head *resultp,
int idx, int lnet, int bwrt, int rdwr, int type)
{
- cfs_list_t tmp[2];
+ struct list_head tmp[2];
lstcon_rpc_ent_t *new;
lstcon_rpc_ent_t *old;
sfw_counters_t *sfwk_new;
float delta;
int errcount = 0;
- CFS_INIT_LIST_HEAD(&tmp[0]);
- CFS_INIT_LIST_HEAD(&tmp[1]);
+ INIT_LIST_HEAD(&tmp[0]);
+ INIT_LIST_HEAD(&tmp[1]);
memset(&lnet_stat_result, 0, sizeof(lnet_stat_result));
- while (!cfs_list_empty(&resultp[idx])) {
- if (cfs_list_empty(&resultp[1 - idx])) {
+ while (!list_empty(&resultp[idx])) {
+ if (list_empty(&resultp[1 - idx])) {
fprintf(stderr, "Group is changed, re-run stat\n");
break;
}
- new = cfs_list_entry(resultp[idx].next, lstcon_rpc_ent_t,
+ new = list_entry(resultp[idx].next, lstcon_rpc_ent_t,
rpe_link);
- old = cfs_list_entry(resultp[1 - idx].next, lstcon_rpc_ent_t,
+ old = list_entry(resultp[1 - idx].next, lstcon_rpc_ent_t,
rpe_link);
/* first time get stats result, can't calculate diff */
break;
}
- cfs_list_del(&new->rpe_link);
- cfs_list_add_tail(&new->rpe_link, &tmp[idx]);
+ list_del(&new->rpe_link);
+ list_add_tail(&new->rpe_link, &tmp[idx]);
- cfs_list_del(&old->rpe_link);
- cfs_list_add_tail(&old->rpe_link, &tmp[1 - idx]);
+ list_del(&old->rpe_link);
+ list_add_tail(&old->rpe_link, &tmp[1 - idx]);
if (new->rpe_rpc_errno != 0 || new->rpe_fwk_errno != 0 ||
old->rpe_rpc_errno != 0 || old->rpe_fwk_errno != 0) {
lst_cal_lnet_stat(delta, lnet_new, lnet_old);
}
- cfs_list_splice(&tmp[idx], &resultp[idx]);
- cfs_list_splice(&tmp[1 - idx], &resultp[1 - idx]);
+ list_splice(&tmp[idx], &resultp[idx]);
+ list_splice(&tmp[1 - idx], &resultp[1 - idx]);
if (errcount > 0)
fprintf(stdout, "Failed to stat on %d nodes\n", errcount);
int
jt_lst_stat(int argc, char **argv)
{
- cfs_list_t head;
+ struct list_head head;
lst_stat_req_param_t *srp;
time_t last = 0;
int optidx = 0;
if (count != -1)
count++;
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
while (optind < argc) {
rc = lst_stat_req_param_alloc(argv[optind++], &srp, 1);
if (rc != 0)
goto out;
- cfs_list_add_tail(&srp->srp_link, &head);
+ list_add_tail(&srp->srp_link, &head);
}
do {
sleep(delay - now + last);
time(&now);
}
+ last = now;
- last = now;
-
- cfs_list_for_each_entry_typed(srp, &head, lst_stat_req_param_t,
- srp_link) {
+ list_for_each_entry(srp, &head, srp_link) {
rc = lst_stat_ioctl(srp->srp_name,
srp->srp_count, srp->srp_ids,
timeout, &srp->srp_result[idx]);
} while (count == -1 || count > 0);
out:
- while (!cfs_list_empty(&head)) {
- srp = cfs_list_entry(head.next, lst_stat_req_param_t, srp_link);
+ while (!list_empty(&head)) {
+ srp = list_entry(head.next, lst_stat_req_param_t, srp_link);
- cfs_list_del(&srp->srp_link);
+ list_del(&srp->srp_link);
lst_stat_req_param_free(srp);
}
int
jt_lst_show_error(int argc, char **argv)
{
- cfs_list_t head;
+ struct list_head head;
lst_stat_req_param_t *srp;
lstcon_rpc_ent_t *ent;
sfw_counters_t *sfwk;
return -1;
}
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
while (optind < argc) {
rc = lst_stat_req_param_alloc(argv[optind++], &srp, 0);
if (rc != 0)
goto out;
- cfs_list_add_tail(&srp->srp_link, &head);
+ list_add_tail(&srp->srp_link, &head);
}
- cfs_list_for_each_entry_typed(srp, &head, lst_stat_req_param_t,
- srp_link) {
+ list_for_each_entry(srp, &head, srp_link) {
rc = lst_stat_ioctl(srp->srp_name, srp->srp_count,
srp->srp_ids, 10, &srp->srp_result[0]);
ecount = 0;
- cfs_list_for_each_entry_typed(ent, &srp->srp_result[0],
- lstcon_rpc_ent_t, rpe_link) {
+ list_for_each_entry(ent, &srp->srp_result[0], rpe_link) {
if (ent->rpe_rpc_errno != 0) {
ecount ++;
fprintf(stderr, "RPC failure, can't show error on %s\n",
fprintf(stdout, "Total %d error nodes in %s\n", ecount, srp->srp_name);
}
out:
- while (!cfs_list_empty(&head)) {
- srp = cfs_list_entry(head.next, lst_stat_req_param_t, srp_link);
+ while (!list_empty(&head)) {
+ srp = list_entry(head.next, lst_stat_req_param_t, srp_link);
- cfs_list_del(&srp->srp_link);
+ list_del(&srp->srp_link);
lst_stat_req_param_free(srp);
}
}
int
-lst_start_batch_ioctl (char *name, int timeout, cfs_list_t *resultp)
+lst_start_batch_ioctl(char *name, int timeout, struct list_head *resultp)
{
lstio_batch_run_args_t args = {0};
int
jt_lst_start_batch(int argc, char **argv)
{
- cfs_list_t head;
+ struct list_head head;
char *batch;
int optidx = 0;
int timeout = 0;
return -1;
}
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
rc = lst_alloc_rpcent(&head, count, 0);
if (rc != 0) {
}
int
-lst_stop_batch_ioctl(char *name, int force, cfs_list_t *resultp)
+lst_stop_batch_ioctl(char *name, int force, struct list_head *resultp)
{
lstio_batch_stop_args_t args = {0};
int
jt_lst_stop_batch(int argc, char **argv)
{
- cfs_list_t head;
+ struct list_head head;
char *batch;
int force = 0;
int optidx;
return -1;
}
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
rc = lst_alloc_rpcent(&head, count, 0);
if (rc != 0) {
int
lst_query_batch_ioctl(char *batch, int test, int server,
- int timeout, cfs_list_t *head)
+ int timeout, struct list_head *head)
{
lstio_batch_query_args_t args = {0};
}
void
-lst_print_tsb_verbose(cfs_list_t *head,
+lst_print_tsb_verbose(struct list_head *head,
int active, int idle, int error)
{
lstcon_rpc_ent_t *ent;
- cfs_list_for_each_entry_typed(ent, head, lstcon_rpc_ent_t, rpe_link) {
+ list_for_each_entry(ent, head, rpe_link) {
if (ent->rpe_priv[0] == 0 && active)
continue;
jt_lst_query_batch(int argc, char **argv)
{
lstcon_test_batch_ent_t ent;
- cfs_list_t head;
+ struct list_head head;
char *batch = NULL;
time_t last = 0;
int optidx = 0;
}
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
if (verbose) {
rc = lst_info_batch_ioctl(batch, test, server,
int
lst_add_test_ioctl(char *batch, int type, int loop, int concur,
int dist, int span, char *sgrp, char *dgrp,
- void *param, int plen, int *retp, cfs_list_t *resultp)
+ void *param, int plen, int *retp, struct list_head *resultp)
{
lstio_test_args_t args = {0};
int
jt_lst_add_test(int argc, char **argv)
{
- cfs_list_t head;
+ struct list_head head;
char *batch = NULL;
char *test = NULL;
char *dstr = NULL;
return -1;
}
- CFS_INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&head);
rc = lst_get_node_count(LST_OPC_GROUP, from, &fcount, NULL);
if (rc != 0) {