From 45a64335d0e326a411fe6a68cce77e618924f59f Mon Sep 17 00:00:00 2001 From: Mr NeilBrown Date: Wed, 13 Nov 2019 14:03:12 +1100 Subject: [PATCH] LU-9679 various: use list_splice and list_splice_init The construct list_add(to, from); list_del(from); is equivalent to list_splice(from, to); providing 'to' has been initialized. Similarly with list_del_init and list_splice_init. There is no need to check if list_empty(from) first. Also looping over a list moving individiual entries to another list can more easily be done with list_splice. These changes improve code clarity. Signed-off-by: Mr NeilBrown Change-Id: I710eb3bbd83c75e6c8f00b8d0a4c256ad28f9082 Reviewed-on: https://review.whamcloud.com/37457 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Arshad Hussain Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- lnet/klnds/socklnd/socklnd.c | 8 +++----- lnet/klnds/socklnd/socklnd_cb.c | 17 +++++------------ lnet/klnds/socklnd/socklnd_proto.c | 7 +++---- lnet/lnet/api-ni.c | 16 ++++++---------- lnet/selftest/conrpc.c | 5 ++--- lustre/ldlm/ldlm_lockd.c | 3 +-- 6 files changed, 20 insertions(+), 36 deletions(-) diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index 96c16fd..4ed0458 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -1340,8 +1340,7 @@ failed_2: if (!peer_ni->ksnp_closing && list_empty(&peer_ni->ksnp_conns) && list_empty(&peer_ni->ksnp_routes)) { - list_add(&zombies, &peer_ni->ksnp_tx_queue); - list_del_init(&peer_ni->ksnp_tx_queue); + list_splice_init(&peer_ni->ksnp_tx_queue, &zombies); ksocknal_unlink_peer_locked(peer_ni); } @@ -2141,11 +2140,10 @@ ksocknal_free_buffers (void) spin_lock(&ksocknal_data.ksnd_tx_lock); if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { - struct list_head zlist; + LIST_HEAD(zlist); struct ksock_tx *tx; - list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs); - list_del_init(&ksocknal_data.ksnd_idle_noop_txs); + list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist); spin_unlock(&ksocknal_data.ksnd_tx_lock); while (!list_empty(&zlist)) { diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index f73039e..8675858 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -1545,11 +1545,7 @@ int ksocknal_scheduler(void *arg) if (!list_empty(&sched->kss_tx_conns)) { LIST_HEAD(zlist); - if (!list_empty(&sched->kss_zombie_noop_txs)) { - list_add(&zlist, - &sched->kss_zombie_noop_txs); - list_del_init(&sched->kss_zombie_noop_txs); - } + list_splice_init(&sched->kss_zombie_noop_txs, &zlist); conn = list_entry(sched->kss_tx_conns.next, struct ksock_conn, ksnc_tx_list); @@ -2643,14 +2639,11 @@ int ksocknal_reaper(void *arg) ksocknal_destroy_conn(conn); spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); - continue; - } + continue; + } - if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) { - list_add(&enomem_conns, - &ksocknal_data.ksnd_enomem_conns); - list_del_init(&ksocknal_data.ksnd_enomem_conns); - } + list_splice_init(&ksocknal_data.ksnd_enomem_conns, + &enomem_conns); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); diff --git a/lnet/klnds/socklnd/socklnd_proto.c b/lnet/klnds/socklnd/socklnd_proto.c index 33a92a5..14bdb45 100644 --- a/lnet/klnds/socklnd/socklnd_proto.c +++ b/lnet/klnds/socklnd/socklnd_proto.c @@ -146,11 +146,10 @@ ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg) tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1]; ksocknal_next_tx_carrier(conn); - /* use new_tx to replace the noop zc-ack packet */ - list_add(&tx_msg->tx_list, &tx->tx_list); - list_del(&tx->tx_list); + /* use new_tx to replace the noop zc-ack packet */ + list_splice(&tx->tx_list, &tx_msg->tx_list); - return tx; + return tx; } static int diff --git a/lnet/lnet/api-ni.c b/lnet/lnet/api-ni.c index d39e6b9..fbf3711 100644 --- a/lnet/lnet/api-ni.c +++ b/lnet/lnet/api-ni.c @@ -2152,16 +2152,12 @@ lnet_shutdown_lndnets(void) lnet_net_lock(LNET_LOCK_EX); the_lnet.ln_state = LNET_STATE_STOPPING; - while (!list_empty(&the_lnet.ln_nets)) { - /* - * move the nets to the zombie list to avoid them being - * picked up for new work. LONET is also included in the - * Nets that will be moved to the zombie list - */ - net = list_entry(the_lnet.ln_nets.next, - struct lnet_net, net_list); - list_move(&net->net_list, &the_lnet.ln_net_zombie); - } + /* + * move the nets to the zombie list to avoid them being + * picked up for new work. LONET is also included in the + * Nets that will be moved to the zombie list + */ + list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie); /* Drop the cached loopback Net. */ if (the_lnet.ln_loni != NULL) { diff --git a/lnet/selftest/conrpc.c b/lnet/selftest/conrpc.c index 8efacc0..39d19f0 100644 --- a/lnet/selftest/conrpc.c +++ b/lnet/selftest/conrpc.c @@ -1328,7 +1328,7 @@ lstcon_rpc_cleanup_wait(void) struct lstcon_rpc_trans *trans; struct lstcon_rpc *crpc; struct list_head *pacer; - struct list_head zlist; + LIST_HEAD(zlist); /* Called with hold of global mutex */ @@ -1362,8 +1362,7 @@ lstcon_rpc_cleanup_wait(void) "waiting for %d console RPCs to being recycled\n", atomic_read(&console_session.ses_rpc_counter)); - list_add(&zlist, &console_session.ses_rpc_freelist); - list_del_init(&console_session.ses_rpc_freelist); + list_splice_init(&console_session.ses_rpc_freelist, &zlist); spin_unlock(&console_session.ses_rpc_lock); diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index 178976b..de04b6d 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -2146,8 +2146,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, if (ld != NULL) blwi->blwi_ld = *ld; if (count) { - list_add(&blwi->blwi_head, cancels); - list_del_init(cancels); + list_splice_init(cancels, &blwi->blwi_head); blwi->blwi_count = count; } else { blwi->blwi_lock = lock; -- 1.8.3.1