When declaring a local list head, instead of
struct list_head list;
INIT_LIST_HEAD(&list);
use
LIST_HEAD(list);
which does both steps.
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: Ia1f1f1abf1b8a9f50e3033976990010b1d2100db
Reviewed-on: https://review.whamcloud.com/36954
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: Chris Horn <hornc@cray.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
{
struct list_head *peers = &kgnilnd_data.kgn_peers[idx];
struct list_head *ctmp, *ctmpN;
{
struct list_head *peers = &kgnilnd_data.kgn_peers[idx];
struct list_head *ctmp, *ctmpN;
- struct list_head geriatrics;
- struct list_head souls;
-
- INIT_LIST_HEAD(&geriatrics);
- INIT_LIST_HEAD(&souls);
+ LIST_HEAD(geriatrics);
+ LIST_HEAD(souls);
write_lock(&kgnilnd_data.kgn_peer_conn_lock);
write_lock(&kgnilnd_data.kgn_peer_conn_lock);
int
kgnilnd_cancel_net_dgrams(kgn_net_t *net)
{
int
kgnilnd_cancel_net_dgrams(kgn_net_t *net)
{
- kgn_dgram_t *dg, *dgN;
- struct list_head zombies;
- int i;
+ kgn_dgram_t *dg, *dgN;
+ LIST_HEAD(zombies);
+ int i;
ENTRY;
/* we want to cancel any outstanding dgrams - we don't want to rely
ENTRY;
/* we want to cancel any outstanding dgrams - we don't want to rely
"in reset %d\n", net->gnn_shutdown,
kgnilnd_data.kgn_in_reset);
"in reset %d\n", net->gnn_shutdown,
kgnilnd_data.kgn_in_reset);
- INIT_LIST_HEAD(&zombies);
-
spin_lock(&net->gnn_dev->gnd_dgram_lock);
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
spin_lock(&net->gnn_dev->gnd_dgram_lock);
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
kgnilnd_cancel_wc_dgrams(kgn_device_t *dev)
{
kgn_dgram_t *dg, *dgN;
kgnilnd_cancel_wc_dgrams(kgn_device_t *dev)
{
kgn_dgram_t *dg, *dgN;
- struct list_head zombies;
ENTRY;
/* Time to kill the outstanding WC's
ENTRY;
/* Time to kill the outstanding WC's
"in reset %d\n", kgnilnd_data.kgn_wc_kill,
kgnilnd_data.kgn_in_reset);
"in reset %d\n", kgnilnd_data.kgn_wc_kill,
kgnilnd_data.kgn_in_reset);
- INIT_LIST_HEAD(&zombies);
spin_lock(&dev->gnd_dgram_lock);
do {
spin_lock(&dev->gnd_dgram_lock);
do {
bool
kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni)
{
bool
kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni)
{
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- char *reason = NULL;
- struct list_head txs;
- unsigned long flags;
-
- INIT_LIST_HEAD(&txs);
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ char *reason = NULL;
+ LIST_HEAD(txs);
+ unsigned long flags;
write_lock_irqsave(glock, flags);
if (peer_ni->ibp_reconnecting == 0) {
write_lock_irqsave(glock, flags);
if (peer_ni->ibp_reconnecting == 0) {
{
struct kib_peer_ni *peer_ni = conn->ibc_peer;
struct kib_tx *tx;
{
struct kib_peer_ni *peer_ni = conn->ibc_peer;
struct kib_tx *tx;
unsigned long flags;
int active;
unsigned long flags;
int active;
}
/* grab pending txs while I have the lock */
}
/* grab pending txs while I have the lock */
list_splice_init(&peer_ni->ibp_tx_queue, &txs);
if (!kiblnd_peer_active(peer_ni) || /* peer_ni has been deleted */
list_splice_init(&peer_ni->ibp_tx_queue, &txs);
if (!kiblnd_peer_active(peer_ni) || /* peer_ni has been deleted */
wait_queue_entry_t wait;
struct ksock_conn *conn;
struct ksock_sched *sched;
wait_queue_entry_t wait;
struct ksock_conn *conn;
struct ksock_sched *sched;
- struct list_head enomem_conns;
+ LIST_HEAD(enomem_conns);
int nenomem_conns;
time64_t timeout;
int i;
int nenomem_conns;
time64_t timeout;
int i;
- INIT_LIST_HEAD(&enomem_conns);
init_waitqueue_entry(&wait, current);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
init_waitqueue_entry(&wait, current);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
lnet_shutdown_lndnets(void)
{
struct lnet_net *net;
lnet_shutdown_lndnets(void)
{
struct lnet_net *net;
- struct list_head resend;
struct lnet_msg *msg, *tmp;
struct lnet_msg *msg, *tmp;
- INIT_LIST_HEAD(&resend);
-
/* NB called holding the global mutex */
/* All quiet on the API front */
/* NB called holding the global mutex */
/* All quiet on the API front */
lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
{
struct lnet_ni *ni;
lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
{
struct lnet_ni *ni;
- struct lnet_net *net_l = NULL;
- struct list_head local_ni_list;
- int rc;
- int ni_count = 0;
- __u32 lnd_type;
+ struct lnet_net *net_l = NULL;
+ LIST_HEAD(local_ni_list);
+ int rc;
+ int ni_count = 0;
+ __u32 lnd_type;
const struct lnet_lnd *lnd;
const struct lnet_lnd *lnd;
net->net_tunables.lct_peer_timeout;
net->net_tunables.lct_peer_timeout;
net->net_tunables.lct_max_tx_credits;
net->net_tunables.lct_max_tx_credits;
net->net_tunables.lct_peer_rtr_credits;
net->net_tunables.lct_peer_rtr_credits;
- INIT_LIST_HEAD(&local_ni_list);
-
/*
* make sure that this net is unique. If it isn't then
* we are adding interfaces to an already existing network, and
/*
* make sure that this net is unique. If it isn't then
* we are adding interfaces to an already existing network, and
int ni_count;
struct lnet_ping_buffer *pbuf;
struct lnet_handle_md ping_mdh;
int ni_count;
struct lnet_ping_buffer *pbuf;
struct lnet_handle_md ping_mdh;
- struct list_head net_head;
- INIT_LIST_HEAD(&net_head);
-
mutex_lock(&the_lnet.ln_api_mutex);
CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
mutex_lock(&the_lnet.ln_api_mutex);
CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
struct lnet_net *net;
char *nets;
int rc;
struct lnet_net *net;
char *nets;
int rc;
- struct list_head net_head;
-
- INIT_LIST_HEAD(&net_head);
rc = lnet_parse_ip2nets(&nets, ip2nets);
if (rc < 0)
rc = lnet_parse_ip2nets(&nets, ip2nets);
if (rc < 0)
int
lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
{
int
lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
{
- struct lnet_net *net;
- struct list_head net_head;
- int rc;
+ struct lnet_net *net;
+ LIST_HEAD(net_head);
+ int rc;
struct lnet_ioctl_config_lnd_tunables tun;
char *nets = conf->cfg_config_u.cfg_net.net_intf;
struct lnet_ioctl_config_lnd_tunables tun;
char *nets = conf->cfg_config_u.cfg_net.net_intf;
- INIT_LIST_HEAD(&net_head);
-
/* Create a net/ni structures for the network string */
rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
if (rc <= 0)
/* Create a net/ni structures for the network string */
rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
if (rc <= 0)
static int
lnet_str2tbs_sep(struct list_head *tbs, char *str)
{
static int
lnet_str2tbs_sep(struct list_head *tbs, char *str)
{
- struct list_head pending;
- char *sep;
- int nob;
- int i;
- struct lnet_text_buf *ltb;
-
- INIT_LIST_HEAD(&pending);
+ LIST_HEAD(pending);
+ char *sep;
+ int nob;
+ int i;
+ struct lnet_text_buf *ltb;
/* Split 'str' into separate commands */
for (;;) {
/* Split 'str' into separate commands */
for (;;) {
lnet_str2tbs_expand(struct list_head *tbs, char *str)
{
char num[16];
lnet_str2tbs_expand(struct list_head *tbs, char *str)
{
char num[16];
- struct list_head pending;
char *sep;
char *sep2;
char *parsed;
char *sep;
char *sep2;
char *parsed;
- INIT_LIST_HEAD(&pending);
-
sep = strchr(str, '[');
if (sep == NULL) /* nothing to expand */
return 0;
sep = strchr(str, '[');
if (sep == NULL) /* nothing to expand */
return 0;
lnet_parse_route (char *str, int *im_a_router)
{
/* static scratch buffer OK (single threaded) */
lnet_parse_route (char *str, int *im_a_router)
{
/* static scratch buffer OK (single threaded) */
- static char cmd[LNET_SINGLE_TEXTBUF_NOB];
+ static char cmd[LNET_SINGLE_TEXTBUF_NOB];
- struct list_head nets;
- struct list_head gateways;
+ LIST_HEAD(nets);
+ LIST_HEAD(gateways);
struct list_head *tmp1;
struct list_head *tmp2;
__u32 net;
struct list_head *tmp1;
struct list_head *tmp2;
__u32 net;
int got_hops = 0;
unsigned int priority = 0;
int got_hops = 0;
unsigned int priority = 0;
- INIT_LIST_HEAD(&gateways);
- INIT_LIST_HEAD(&nets);
-
/* save a copy of the string for error messages */
strncpy(cmd, str, sizeof(cmd));
cmd[sizeof(cmd) - 1] = '\0';
/* save a copy of the string for error messages */
strncpy(cmd, str, sizeof(cmd));
cmd[sizeof(cmd) - 1] = '\0';
int
lnet_parse_routes (char *routes, int *im_a_router)
{
int
lnet_parse_routes (char *routes, int *im_a_router)
{
- struct list_head tbs;
- int rc = 0;
+ LIST_HEAD(tbs);
+ int rc = 0;
- INIT_LIST_HEAD(&tbs);
-
if (lnet_str2tbs_sep(&tbs, routes) < 0) {
CERROR("Error parsing routes\n");
rc = -EINVAL;
if (lnet_str2tbs_sep(&tbs, routes) < 0) {
CERROR("Error parsing routes\n");
rc = -EINVAL;
static char networks[LNET_SINGLE_TEXTBUF_NOB];
static char source[LNET_SINGLE_TEXTBUF_NOB];
static char networks[LNET_SINGLE_TEXTBUF_NOB];
static char source[LNET_SINGLE_TEXTBUF_NOB];
- struct list_head raw_entries;
- struct list_head matched_nets;
- struct list_head current_nets;
+ LIST_HEAD(raw_entries);
+ LIST_HEAD(matched_nets);
+ LIST_HEAD(current_nets);
struct list_head *t;
struct list_head *t2;
struct lnet_text_buf *tb;
struct list_head *t;
struct list_head *t2;
struct lnet_text_buf *tb;
- INIT_LIST_HEAD(&raw_entries);
if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
CERROR("Error parsing ip2nets\n");
LASSERT(lnet_tbnob == 0);
return -EINVAL;
}
if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
CERROR("Error parsing ip2nets\n");
LASSERT(lnet_tbnob == 0);
return -EINVAL;
}
- INIT_LIST_HEAD(&matched_nets);
- INIT_LIST_HEAD(¤t_nets);
networks[0] = 0;
count = 0;
len = 0;
networks[0] = 0;
count = 0;
len = 0;
struct lnet_test_peer *tp;
struct list_head *el;
struct list_head *next;
struct lnet_test_peer *tp;
struct list_head *el;
struct list_head *next;
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold != 0) {
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold != 0) {
- /* removing entries */
- INIT_LIST_HEAD(&cull);
-
lnet_net_lock(0);
list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
lnet_net_lock(0);
list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
struct lnet_test_peer *tp;
struct list_head *el;
struct list_head *next;
struct lnet_test_peer *tp;
struct list_head *el;
struct list_head *next;
- struct list_head cull;
- int fail = 0;
-
- INIT_LIST_HEAD(&cull);
+ LIST_HEAD(cull);
+ int fail = 0;
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
lnet_net_lock(0);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
lnet_net_lock(0);
/* drop all messages which are queued to be routed on that
* peer. */
if (!the_lnet.ln_routing) {
/* drop all messages which are queued to be routed on that
* peer. */
if (!the_lnet.ln_routing) {
- struct list_head drop;
- INIT_LIST_HEAD(&drop);
list_splice_init(&lp->lp_rtrq, &drop);
spin_unlock(&lp->lp_lock);
spin_unlock(&rxpeerni->lpni_lock);
list_splice_init(&lp->lp_rtrq, &drop);
spin_unlock(&lp->lp_lock);
spin_unlock(&rxpeerni->lpni_lock);
lnet_finalize_expired_responses(void)
{
struct lnet_libmd *md;
lnet_finalize_expired_responses(void)
{
struct lnet_libmd *md;
- struct list_head local_queue;
struct lnet_rsp_tracker *rspt, *tmp;
ktime_t now;
int i;
struct lnet_rsp_tracker *rspt, *tmp;
ktime_t now;
int i;
return;
cfs_cpt_for_each(i, lnet_cpt_table()) {
return;
cfs_cpt_for_each(i, lnet_cpt_table()) {
- INIT_LIST_HEAD(&local_queue);
+ LIST_HEAD(local_queue);
lnet_net_lock(i);
if (!the_lnet.ln_mt_rstq[i]) {
lnet_net_lock(i);
if (!the_lnet.ln_mt_rstq[i]) {
lnet_recover_local_nis(void)
{
struct lnet_mt_event_info *ev_info;
lnet_recover_local_nis(void)
{
struct lnet_mt_event_info *ev_info;
- struct list_head processed_list;
- struct list_head local_queue;
+ LIST_HEAD(processed_list);
+ LIST_HEAD(local_queue);
struct lnet_handle_md mdh;
struct lnet_ni *tmp;
struct lnet_ni *ni;
struct lnet_handle_md mdh;
struct lnet_ni *tmp;
struct lnet_ni *ni;
- INIT_LIST_HEAD(&local_queue);
- INIT_LIST_HEAD(&processed_list);
-
/*
* splice the recovery queue on a local queue. We will iterate
* through the local queue and update it as needed. Once we're
/*
* splice the recovery queue on a local queue. We will iterate
* through the local queue and update it as needed. Once we're
lnet_clean_resendqs(void)
{
struct lnet_msg *msg, *tmp;
lnet_clean_resendqs(void)
{
struct lnet_msg *msg, *tmp;
- INIT_LIST_HEAD(&msgs);
-
cfs_cpt_for_each(i, lnet_cpt_table()) {
lnet_net_lock(i);
list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
cfs_cpt_for_each(i, lnet_cpt_table()) {
lnet_net_lock(i);
list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
lnet_recover_peer_nis(void)
{
struct lnet_mt_event_info *ev_info;
lnet_recover_peer_nis(void)
{
struct lnet_mt_event_info *ev_info;
- struct list_head processed_list;
- struct list_head local_queue;
+ LIST_HEAD(processed_list);
+ LIST_HEAD(local_queue);
struct lnet_handle_md mdh;
struct lnet_peer_ni *lpni;
struct lnet_peer_ni *tmp;
struct lnet_handle_md mdh;
struct lnet_peer_ni *lpni;
struct lnet_peer_ni *tmp;
- INIT_LIST_HEAD(&local_queue);
- INIT_LIST_HEAD(&processed_list);
-
/*
* Always use cpt 0 for locking across all interactions with
* ln_mt_peerNIRecovq
/*
* Always use cpt 0 for locking across all interactions with
* ln_mt_peerNIRecovq
{
struct lnet_drop_rule *rule;
struct lnet_drop_rule *tmp;
{
struct lnet_drop_rule *rule;
struct lnet_drop_rule *tmp;
- struct list_head zombies;
- int n = 0;
+ LIST_HEAD(zombies);
+ int n = 0;
- INIT_LIST_HEAD(&zombies);
-
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
if (rule->dr_attr.fa_src != src && src != 0)
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
if (rule->dr_attr.fa_src != src && src != 0)
void
lnet_delay_rule_check(void)
{
void
lnet_delay_rule_check(void)
{
- struct lnet_delay_rule *rule;
- struct list_head msgs;
+ struct lnet_delay_rule *rule;
+ LIST_HEAD(msgs);
while (1) {
if (list_empty(&delay_dd.dd_sched_rules))
break;
while (1) {
if (list_empty(&delay_dd.dd_sched_rules))
break;
lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
{
struct lnet_delay_rule *rule;
lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
{
struct lnet_delay_rule *rule;
- struct lnet_delay_rule *tmp;
- struct list_head rule_list;
- struct list_head msg_list;
- int n = 0;
- bool cleanup;
+ struct lnet_delay_rule *tmp;
+ LIST_HEAD(rule_list);
+ LIST_HEAD(msg_list);
+ int n = 0;
+ bool cleanup;
- INIT_LIST_HEAD(&rule_list);
- INIT_LIST_HEAD(&msg_list);
-
if (shutdown)
src = dst = 0;
if (shutdown)
src = dst = 0;
{
struct lnet_msg *msg, *tmp;
int rc = 0;
{
struct lnet_msg *msg, *tmp;
int rc = 0;
- struct list_head pending_msgs;
-
- INIT_LIST_HEAD(&pending_msgs);
+ LIST_HEAD(pending_msgs);
CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
libcfs_nid2str(lp->lp_primary_nid));
CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
libcfs_nid2str(lp->lp_primary_nid));
static void lnet_resend_msgs(void)
{
struct lnet_msg *msg, *tmp;
static void lnet_resend_msgs(void)
{
struct lnet_msg *msg, *tmp;
- struct list_head resend;
- INIT_LIST_HEAD(&resend);
-
spin_lock(&the_lnet.ln_msg_resend_lock);
list_splice(&the_lnet.ln_msg_resend, &resend);
spin_unlock(&the_lnet.ln_msg_resend_lock);
spin_lock(&the_lnet.ln_msg_resend_lock);
list_splice(&the_lnet.ln_msg_resend, &resend);
spin_unlock(&the_lnet.ln_msg_resend_lock);
int
lnet_del_route(__u32 net, lnet_nid_t gw_nid)
{
int
lnet_del_route(__u32 net, lnet_nid_t gw_nid)
{
- struct list_head rnet_zombies;
+ LIST_HEAD(rnet_zombies);
struct lnet_remotenet *rnet;
struct lnet_remotenet *tmp;
struct list_head *rn_list;
struct lnet_peer_ni *lpni;
struct lnet_route *route;
struct lnet_remotenet *rnet;
struct lnet_remotenet *tmp;
struct list_head *rn_list;
struct lnet_peer_ni *lpni;
struct lnet_route *route;
- struct list_head zombies;
struct lnet_peer *lp = NULL;
int i = 0;
struct lnet_peer *lp = NULL;
int i = 0;
- INIT_LIST_HEAD(&rnet_zombies);
- INIT_LIST_HEAD(&zombies);
-
CDEBUG(D_NET, "Del route: net %s : gw %s\n",
libcfs_net2str(net), libcfs_nid2str(gw_nid));
CDEBUG(D_NET, "Del route: net %s : gw %s\n",
libcfs_net2str(net), libcfs_nid2str(gw_nid));
{
int npages = rbp->rbp_npages;
struct lnet_rtrbuf *rb;
{
int npages = rbp->rbp_npages;
struct lnet_rtrbuf *rb;
if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
return;
if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
return;
- INIT_LIST_HEAD(&tmp);
-
lnet_net_lock(cpt);
list_splice_init(&rbp->rbp_msgs, &tmp);
lnet_drop_routed_msgs_locked(&tmp, cpt);
lnet_net_lock(cpt);
list_splice_init(&rbp->rbp_msgs, &tmp);
lnet_drop_routed_msgs_locked(&tmp, cpt);
static int
lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
{
static int
lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
{
- struct list_head rb_list;
struct lnet_rtrbuf *rb;
int num_rb;
int num_buffers = 0;
struct lnet_rtrbuf *rb;
int num_rb;
int num_buffers = 0;
rbp->rbp_req_nbuffers = nbufs;
lnet_net_unlock(cpt);
rbp->rbp_req_nbuffers = nbufs;
lnet_net_unlock(cpt);
- INIT_LIST_HEAD(&rb_list);
-
/* allocate the buffers on a local list first. If all buffers are
* allocated successfully then join this list to the rbp buffer
* list. If not then free all allocated buffers. */
/* allocate the buffers on a local list first. If all buffers are
* allocated successfully then join this list to the rbp buffer
* list. If not then free all allocated buffers. */
lstcon_ndlist_stat(struct list_head *ndlist,
int timeout, struct list_head __user *result_up)
{
lstcon_ndlist_stat(struct list_head *ndlist,
int timeout, struct list_head __user *result_up)
{
struct lstcon_rpc_trans *trans;
struct lstcon_rpc_trans *trans;
- int rc;
-
- INIT_LIST_HEAD(&head);
rc = lstcon_rpc_trans_ndlist(ndlist, &head,
LST_TRANS_STATQRY, NULL, NULL, &trans);
rc = lstcon_rpc_trans_ndlist(ndlist, &head,
LST_TRANS_STATQRY, NULL, NULL, &trans);