From 34b57a6f8fcd1bc57c0ba92e299bd39f3baa6cb5 Mon Sep 17 00:00:00 2001 From: "Mr. NeilBrown" Date: Thu, 22 Apr 2021 14:27:38 -0400 Subject: [PATCH] LU-12678 lnet: use list_first_entry() in lnet/klnds subdirectory. Convert list_entry(foo->next .....) to list_first_entry(foo, ....) in 'lnet/klnds In several cases the call is combined with a list_empty() test and list_first_entry_or_null() is used Test-Parameters: trivial testlist=sanity-lnet Signed-off-by: Mr. NeilBrown Change-Id: I3b2b33c3c9284c02e44610614d64a1f84be300a4 Reviewed-on: https://review.whamcloud.com/43419 Reviewed-by: Andreas Dilger Tested-by: jenkins Tested-by: Maloo Reviewed-by: Arshad Hussain Reviewed-by: Oleg Drokin --- lnet/klnds/o2iblnd/o2iblnd.c | 24 ++++++----- lnet/klnds/o2iblnd/o2iblnd_cb.c | 78 +++++++++++++++++----------------- lnet/klnds/socklnd/socklnd.c | 9 ++-- lnet/klnds/socklnd/socklnd_cb.c | 87 ++++++++++++++++++-------------------- lnet/klnds/socklnd/socklnd_proto.c | 4 +- 5 files changed, 99 insertions(+), 103 deletions(-) diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index 0641a6f..ae84f06 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -1629,16 +1629,16 @@ out_fpo: static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies) { + struct kib_fmr_pool *fpo; + if (fps->fps_net == NULL) /* intialized? */ return; spin_lock(&fps->fps_lock); - while (!list_empty(&fps->fps_pool_list)) { - struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next, - struct kib_fmr_pool, - fpo_list); - + while ((fpo = list_first_entry_or_null(&fps->fps_pool_list, + struct kib_fmr_pool, + fpo_list)) != NULL) { fpo->fpo_failed = 1; if (fpo->fpo_map_count == 0) list_move(&fpo->fpo_list, zombies); @@ -2002,8 +2002,9 @@ kiblnd_destroy_pool_list(struct list_head *head) { struct kib_pool *pool; - while (!list_empty(head)) { - pool = list_entry(head->next, struct kib_pool, po_list); + while ((pool = list_first_entry_or_null(head, + struct kib_pool, + po_list)) != NULL) { list_del(&pool->po_list); LASSERT(pool->po_owner != NULL); @@ -2014,14 +2015,15 @@ kiblnd_destroy_pool_list(struct list_head *head) static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies) { + struct kib_pool *po; + if (ps->ps_net == NULL) /* intialized? */ return; spin_lock(&ps->ps_lock); - while (!list_empty(&ps->ps_pool_list)) { - struct kib_pool *po = list_entry(ps->ps_pool_list.next, - struct kib_pool, po_list); - + while ((po = list_first_entry_or_null(&ps->ps_pool_list, + struct kib_pool, + po_list)) == NULL) { po->po_failed = 1; if (po->po_allocated == 0) list_move(&po->po_list, zombies); diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index 5e04b72..4e79dfd 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -99,9 +99,9 @@ kiblnd_txlist_done(struct list_head *txlist, int status, { struct kib_tx *tx; - while (!list_empty(txlist)) { - tx = list_entry(txlist->next, struct kib_tx, tx_list); - + while ((tx = list_first_entry_or_null(txlist, + struct kib_tx, + tx_list)) != NULL) { list_del(&tx->tx_list); /* complete now */ tx->tx_waiting = 0; @@ -989,9 +989,8 @@ kiblnd_check_sends_locked(struct kib_conn *conn) LASSERT (conn->ibc_reserved_credits >= 0); while (conn->ibc_reserved_credits > 0 && - !list_empty(&conn->ibc_tx_queue_rsrvd)) { - tx = list_entry(conn->ibc_tx_queue_rsrvd.next, - struct kib_tx, tx_list); + (tx = list_first_entry_or_null(&conn->ibc_tx_queue_rsrvd, + struct kib_tx, tx_list)) != NULL) { list_move_tail(&tx->tx_list, &conn->ibc_tx_queue); conn->ibc_reserved_credits--; } @@ -1013,17 +1012,17 @@ kiblnd_check_sends_locked(struct kib_conn *conn) if (!list_empty(&conn->ibc_tx_queue_nocred)) { credit = 0; - tx = list_entry(conn->ibc_tx_queue_nocred.next, - struct kib_tx, tx_list); + tx = list_first_entry(&conn->ibc_tx_queue_nocred, + struct kib_tx, tx_list); } else if (!list_empty(&conn->ibc_tx_noops)) { LASSERT (!IBLND_OOB_CAPABLE(ver)); credit = 1; - tx = list_entry(conn->ibc_tx_noops.next, - struct kib_tx, tx_list); + tx = list_first_entry(&conn->ibc_tx_noops, + struct kib_tx, tx_list); } else if (!list_empty(&conn->ibc_tx_queue)) { credit = 1; - tx = list_entry(conn->ibc_tx_queue.next, - struct kib_tx, tx_list); + tx = list_first_entry(&conn->ibc_tx_queue, + struct kib_tx, tx_list); } else break; @@ -2075,9 +2074,9 @@ kiblnd_handle_early_rxs(struct kib_conn *conn) LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - while (!list_empty(&conn->ibc_early_rxs)) { - rx = list_entry(conn->ibc_early_rxs.next, - struct kib_rx, rx_list); + while ((rx = list_first_entry_or_null(&conn->ibc_early_rxs, + struct kib_rx, + rx_list)) != NULL) { list_del(&rx->rx_list); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -2361,8 +2360,8 @@ kiblnd_connreq_done(struct kib_conn *conn, int status) * scheduled. We won't be using round robin on this first batch. */ spin_lock(&conn->ibc_lock); - while (!list_empty(&txs)) { - tx = list_entry(txs.next, struct kib_tx, tx_list); + while ((tx = list_first_entry_or_null(&txs, struct kib_tx, + tx_list)) != NULL) { list_del(&tx->tx_list); kiblnd_queue_tx_locked(tx, conn); @@ -3422,9 +3421,9 @@ kiblnd_check_conns (int idx) * connection. We can only be sure RDMA activity * has ceased once the QP has been modified. */ - while (!list_empty(&closes)) { - conn = list_entry(closes.next, - struct kib_conn, ibc_connd_list); + while ((conn = list_first_entry_or_null(&closes, + struct kib_conn, + ibc_connd_list)) != NULL) { list_del(&conn->ibc_connd_list); kiblnd_close_conn(conn, -ETIMEDOUT); kiblnd_conn_decref(conn); @@ -3434,9 +3433,9 @@ kiblnd_check_conns (int idx) * NOOP, but there were no non-blocking tx descs * free to do it last time... */ - while (!list_empty(&checksends)) { - conn = list_entry(checksends.next, - struct kib_conn, ibc_connd_list); + while ((conn = list_first_entry_or_null(&checksends, + struct kib_conn, + ibc_connd_list)) != NULL) { list_del(&conn->ibc_connd_list); spin_lock(&conn->ibc_lock); @@ -3494,11 +3493,11 @@ kiblnd_connd (void *arg) dropped_lock = false; - if (!list_empty(&kiblnd_data.kib_connd_zombies)) { + conn = list_first_entry_or_null(&kiblnd_data.kib_connd_zombies, + struct kib_conn, ibc_list); + if (conn) { struct kib_peer_ni *peer_ni = NULL; - conn = list_entry(kiblnd_data.kib_connd_zombies.next, - struct kib_conn, ibc_list); list_del(&conn->ibc_list); if (conn->ibc_reconnect) { peer_ni = conn->ibc_peer; @@ -3525,10 +3524,11 @@ kiblnd_connd (void *arg) &kiblnd_data.kib_reconn_wait); } - if (!list_empty(&kiblnd_data.kib_connd_conns)) { + conn = list_first_entry_or_null(&kiblnd_data.kib_connd_conns, + struct kib_conn, ibc_list); + if (conn) { int wait; - conn = list_entry(kiblnd_data.kib_connd_conns.next, - struct kib_conn, ibc_list); + list_del(&conn->ibc_list); spin_unlock_irqrestore(lock, flags); @@ -3554,11 +3554,11 @@ kiblnd_connd (void *arg) &kiblnd_data.kib_reconn_list); } - if (list_empty(&kiblnd_data.kib_reconn_list)) + conn = list_first_entry_or_null(&kiblnd_data.kib_reconn_list, + struct kib_conn, ibc_list); + if (!conn) break; - conn = list_entry(kiblnd_data.kib_reconn_list.next, - struct kib_conn, ibc_list); list_del(&conn->ibc_list); spin_unlock_irqrestore(lock, flags); @@ -3571,9 +3571,10 @@ kiblnd_connd (void *arg) spin_lock_irqsave(lock, flags); } - if (!list_empty(&kiblnd_data.kib_connd_waits)) { - conn = list_entry(kiblnd_data.kib_connd_waits.next, - struct kib_conn, ibc_list); + conn = list_first_entry_or_null(&kiblnd_data.kib_connd_waits, + struct kib_conn, + ibc_sched_list); + if (conn) { list_del(&conn->ibc_list); spin_unlock_irqrestore(lock, flags); @@ -3788,9 +3789,10 @@ kiblnd_scheduler(void *arg) did_something = false; - if (!list_empty(&sched->ibs_conns)) { - conn = list_entry(sched->ibs_conns.next, - struct kib_conn, ibc_sched_list); + conn = list_first_entry_or_null(&sched->ibs_conns, + struct kib_conn, + ibc_sched_list); + if (conn) { /* take over kib_sched_conns' ref on conn... */ LASSERT(conn->ibc_scheduled); list_del(&conn->ibc_sched_list); diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index cdc15e9..35d12d9 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -1336,9 +1336,8 @@ ksocknal_finalize_zcreq(struct ksock_conn *conn) spin_unlock(&peer_ni->ksnp_lock); - while (!list_empty(&zlist)) { - tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list); - + while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx, + tx_zc_list)) != NULL) { list_del(&tx->tx_zc_list); ksocknal_tx_decref(tx); } @@ -1822,8 +1821,8 @@ ksocknal_free_buffers (void) list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist); spin_unlock(&ksocknal_data.ksnd_tx_lock); - while (!list_empty(&zlist)) { - tx = list_entry(zlist.next, struct ksock_tx, tx_list); + while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx, + tx_list)) != NULL) { list_del(&tx->tx_list); LIBCFS_FREE(tx, tx->tx_desc_size); } diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index a0c79d2..c0d626a 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -39,9 +39,9 @@ ksocknal_alloc_tx(int type, int size) /* searching for a noop tx in free list */ spin_lock(&ksocknal_data.ksnd_tx_lock); - if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { - tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next, - struct ksock_tx, tx_list); + tx = list_first_entry_or_null(&ksocknal_data.ksnd_idle_noop_txs, + struct ksock_tx, tx_list); + if (tx) { LASSERT(tx->tx_desc_size == size); list_del(&tx->tx_list); } @@ -423,9 +423,8 @@ ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error) { struct ksock_tx *tx; - while (!list_empty(txlist)) { - tx = list_entry(txlist->next, struct ksock_tx, tx_list); - + while ((tx = list_first_entry_or_null(txlist, struct ksock_tx, + tx_list)) != NULL) { if (error && tx->tx_lnetmsg != NULL) { CNETERR("Deleting packet type %d len %d %s->%s\n", le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type), @@ -1466,10 +1465,10 @@ int ksocknal_scheduler(void *arg) bool did_something = false; /* Ensure I progress everything semi-fairly */ - - if (!list_empty(&sched->kss_rx_conns)) { - conn = list_entry(sched->kss_rx_conns.next, - struct ksock_conn, ksnc_rx_list); + conn = list_first_entry_or_null(&sched->kss_rx_conns, + struct ksock_conn, + ksnc_rx_list); + if (conn) { list_del(&conn->ksnc_rx_list); LASSERT(conn->ksnc_rx_scheduled); @@ -1517,16 +1516,17 @@ int ksocknal_scheduler(void *arg) list_splice_init(&sched->kss_zombie_noop_txs, &zlist); - conn = list_entry(sched->kss_tx_conns.next, - struct ksock_conn, ksnc_tx_list); + conn = list_first_entry(&sched->kss_tx_conns, + struct ksock_conn, + ksnc_tx_list); list_del(&conn->ksnc_tx_list); LASSERT(conn->ksnc_tx_scheduled); LASSERT(conn->ksnc_tx_ready); LASSERT(!list_empty(&conn->ksnc_tx_queue)); - tx = list_entry(conn->ksnc_tx_queue.next, - struct ksock_tx, tx_list); + tx = list_first_entry(&conn->ksnc_tx_queue, + struct ksock_tx, tx_list); if (conn->ksnc_tx_carrier == tx) ksocknal_next_tx_carrier(conn); @@ -2043,11 +2043,10 @@ ksocknal_connect(struct ksock_conn_cb *conn_cb) /* ksnp_tx_queue is queued on a conn on successful * connection for V1.x and V2.x */ - if (!list_empty(&peer_ni->ksnp_conns)) { - conn = list_entry(peer_ni->ksnp_conns.next, - struct ksock_conn, ksnc_list); + conn = list_first_entry_or_null(&peer_ni->ksnp_conns, + struct ksock_conn, ksnc_list); + if (conn) LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); - } /* take all the blocked packets while I've got the lock and * complete below... @@ -2225,11 +2224,10 @@ ksocknal_connd(void *arg) dropped_lock = true; } - if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) { + cr = list_first_entry_or_null(&ksocknal_data.ksnd_connd_connreqs, + struct ksock_connreq, ksncr_list); + if (cr) { /* Connection accepted by the listener */ - cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next, - struct ksock_connreq, ksncr_list); - list_del(&cr->ksncr_list); spin_unlock_bh(connd_lock); dropped_lock = true; @@ -2384,10 +2382,9 @@ ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni) write_lock_bh(&ksocknal_data.ksnd_global_lock); - while (!list_empty(&peer_ni->ksnp_tx_queue)) { - tx = list_entry(peer_ni->ksnp_tx_queue.next, - struct ksock_tx, tx_list); - + while ((tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue, + struct ksock_tx, + tx_list)) != NULL) { if (ktime_get_seconds() < tx->tx_deadline) break; @@ -2507,20 +2504,16 @@ ksocknal_check_peer_timeouts(int idx) /* we can't process stale txs right here because we're * holding only shared lock */ - if (!list_empty(&peer_ni->ksnp_tx_queue)) { - struct ksock_tx *tx; - - tx = list_entry(peer_ni->ksnp_tx_queue.next, - struct ksock_tx, tx_list); - if (ktime_get_seconds() >= tx->tx_deadline) { - ksocknal_peer_addref(peer_ni); - read_unlock(&ksocknal_data.ksnd_global_lock); + tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue, + struct ksock_tx, tx_list); + if (tx && ktime_get_seconds() >= tx->tx_deadline) { + ksocknal_peer_addref(peer_ni); + read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_flush_stale_txs(peer_ni); + ksocknal_flush_stale_txs(peer_ni); - ksocknal_peer_decref(peer_ni); - goto again; - } + ksocknal_peer_decref(peer_ni); + goto again; } if (list_empty(&peer_ni->ksnp_zc_req_list)) @@ -2584,9 +2577,9 @@ int ksocknal_reaper(void *arg) spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); while (!ksocknal_data.ksnd_shuttingdown) { - if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) { - conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next, - struct ksock_conn, ksnc_list); + conn = list_first_entry_or_null(&ksocknal_data.ksnd_deathrow_conns, + struct ksock_conn, ksnc_list); + if (conn) { list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2598,9 +2591,9 @@ int ksocknal_reaper(void *arg) continue; } - if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) { - conn = list_entry(ksocknal_data.ksnd_zombie_conns.next, - struct ksock_conn, ksnc_list); + conn = list_first_entry_or_null(&ksocknal_data.ksnd_zombie_conns, + struct ksock_conn, ksnc_list); + if (conn) { list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2618,9 +2611,9 @@ int ksocknal_reaper(void *arg) /* reschedule all the connections that stalled with ENOMEM... */ nenomem_conns = 0; - while (!list_empty(&enomem_conns)) { - conn = list_entry(enomem_conns.next, - struct ksock_conn, ksnc_tx_list); + while ((conn = list_first_entry_or_null(&enomem_conns, + struct ksock_conn, + ksnc_tx_list)) != NULL) { list_del(&conn->ksnc_tx_list); sched = conn->ksnc_scheduler; diff --git a/lnet/klnds/socklnd/socklnd_proto.c b/lnet/klnds/socklnd/socklnd_proto.c index e24f0b4..49267c0 100644 --- a/lnet/klnds/socklnd/socklnd_proto.c +++ b/lnet/klnds/socklnd/socklnd_proto.c @@ -437,8 +437,8 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2) spin_unlock(&peer_ni->ksnp_lock); - while (!list_empty(&zlist)) { - tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list); + while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx, + tx_zc_list)) != NULL) { list_del(&tx->tx_zc_list); ksocknal_tx_decref(tx); } -- 1.8.3.1