if (!peer_ni->ksnp_closing &&
list_empty(&peer_ni->ksnp_conns) &&
list_empty(&peer_ni->ksnp_routes)) {
- list_add(&zombies, &peer_ni->ksnp_tx_queue);
- list_del_init(&peer_ni->ksnp_tx_queue);
+ list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
ksocknal_unlink_peer_locked(peer_ni);
}
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- struct list_head zlist;
+ LIST_HEAD(zlist);
struct ksock_tx *tx;
- list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
- list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
+ list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
while (!list_empty(&zlist)) {
if (!list_empty(&sched->kss_tx_conns)) {
LIST_HEAD(zlist);
- if (!list_empty(&sched->kss_zombie_noop_txs)) {
- list_add(&zlist,
- &sched->kss_zombie_noop_txs);
- list_del_init(&sched->kss_zombie_noop_txs);
- }
+ list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
conn = list_entry(sched->kss_tx_conns.next,
struct ksock_conn, ksnc_tx_list);
ksocknal_destroy_conn(conn);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- continue;
- }
+ continue;
+ }
- if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
- list_add(&enomem_conns,
- &ksocknal_data.ksnd_enomem_conns);
- list_del_init(&ksocknal_data.ksnd_enomem_conns);
- }
+ list_splice_init(&ksocknal_data.ksnd_enomem_conns,
+ &enomem_conns);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1];
ksocknal_next_tx_carrier(conn);
- /* use new_tx to replace the noop zc-ack packet */
- list_add(&tx_msg->tx_list, &tx->tx_list);
- list_del(&tx->tx_list);
+ /* use new_tx to replace the noop zc-ack packet */
+ list_splice(&tx->tx_list, &tx_msg->tx_list);
- return tx;
+ return tx;
}
static int
lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_state = LNET_STATE_STOPPING;
- while (!list_empty(&the_lnet.ln_nets)) {
- /*
- * move the nets to the zombie list to avoid them being
- * picked up for new work. LONET is also included in the
- * Nets that will be moved to the zombie list
- */
- net = list_entry(the_lnet.ln_nets.next,
- struct lnet_net, net_list);
- list_move(&net->net_list, &the_lnet.ln_net_zombie);
- }
+ /*
+ * move the nets to the zombie list to avoid them being
+ * picked up for new work. LONET is also included in the
+ * Nets that will be moved to the zombie list
+ */
+ list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
/* Drop the cached loopback Net. */
if (the_lnet.ln_loni != NULL) {
struct lstcon_rpc_trans *trans;
struct lstcon_rpc *crpc;
struct list_head *pacer;
- struct list_head zlist;
+ LIST_HEAD(zlist);
/* Called with hold of global mutex */
"waiting for %d console RPCs to being recycled\n",
atomic_read(&console_session.ses_rpc_counter));
- list_add(&zlist, &console_session.ses_rpc_freelist);
- list_del_init(&console_session.ses_rpc_freelist);
+ list_splice_init(&console_session.ses_rpc_freelist, &zlist);
spin_unlock(&console_session.ses_rpc_lock);
if (ld != NULL)
blwi->blwi_ld = *ld;
if (count) {
- list_add(&blwi->blwi_head, cancels);
- list_del_init(cancels);
+ list_splice_init(cancels, &blwi->blwi_head);
blwi->blwi_count = count;
} else {
blwi->blwi_lock = lock;