Several loops use list_for_each(), then call list_entry()
each time in the loop This complexity can be replaced with
the use of list_for_each_entry().
Test-Parameters: trivial testlist=sanity-lnet
Signed-off-by: James Simmons <jsimmons@infradead.org>
Change-Id: Ib7968466c4fce5173b20cbaf6c878975ba522d43
Reviewed-on: https://review.whamcloud.com/43591
Reviewed-by: Chris Horn <chris.horn@hpe.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Arshad Hussain <arshad.hussain@aeoncomputing.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
int
kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
{
int
kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
{
- kgn_conn_t *conn;
- struct list_head *ctmp, *cnxt;
+ kgn_conn_t *conn, *cnxt;
int loopback;
int count = 0;
loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
int loopback;
int count = 0;
loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
- list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
- conn = list_entry(ctmp, kgn_conn_t, gnc_list);
-
+ list_for_each_entry_safe(conn, cnxt, &peer->gnp_conns, gnc_list) {
if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
continue;
if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
continue;
kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
{
kgn_conn_t *conn;
kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
{
kgn_conn_t *conn;
int loopback;
ENTRY;
loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
int loopback;
ENTRY;
loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
- list_for_each(tmp, &peer->gnp_conns) {
- conn = list_entry(tmp, kgn_conn_t, gnc_list);
+ list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
CDEBUG(D_NET, "checking conn 0x%p for peer %s"
" lo %d new %llu existing %llu"
" new peer %llu existing peer %llu"
CDEBUG(D_NET, "checking conn 0x%p for peer %s"
" lo %d new %llu existing %llu"
" new peer %llu existing peer %llu"
*/
for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
*/
for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
- list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
+ list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
/* if gnn_shutdown set for any net shutdown is in progress just return */
if (net->gnn_shutdown) {
up_read(&kgnilnd_data.kgn_net_rw_sem);
/* if gnn_shutdown set for any net shutdown is in progress just return */
if (net->gnn_shutdown) {
up_read(&kgnilnd_data.kgn_net_rw_sem);
lnet_nid_t *id, __u32 *nic_addr,
int *refcount, int *connecting)
{
lnet_nid_t *id, __u32 *nic_addr,
int *refcount, int *connecting)
{
- struct list_head *ptmp;
kgn_peer_t *peer;
int i;
int rc = -ENOENT;
kgn_peer_t *peer;
int i;
int rc = -ENOENT;
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
-
- list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
- peer = list_entry(ptmp, kgn_peer_t, gnp_list);
-
+ list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
if (index-- > 0)
continue;
if (index-- > 0)
continue;
{
LIST_HEAD (souls);
LIST_HEAD (zombies);
{
LIST_HEAD (souls);
LIST_HEAD (zombies);
- struct list_head *ptmp, *pnxt;
- kgn_peer_t *peer;
+ kgn_peer_t *peer, *pnxt;
}
for (i = lo; i <= hi; i++) {
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
- peer = list_entry(ptmp, kgn_peer_t, gnp_list);
-
+ list_for_each_entry_safe(peer, pnxt, &kgnilnd_data.kgn_peers[i],
+ gnp_list) {
LASSERTF(peer->gnp_net != NULL,
"peer %p (%s) with NULL net\n",
peer, libcfs_nid2str(peer->gnp_nid));
LASSERTF(peer->gnp_net != NULL,
"peer %p (%s) with NULL net\n",
peer, libcfs_nid2str(peer->gnp_nid));
kgnilnd_get_conn_by_idx(int index)
{
kgn_peer_t *peer;
kgnilnd_get_conn_by_idx(int index)
{
kgn_peer_t *peer;
- struct list_head *ptmp;
- struct list_head *ctmp;
int i;
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
int i;
for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
read_lock(&kgnilnd_data.kgn_peer_conn_lock);
- list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
-
- peer = list_entry(ptmp, kgn_peer_t, gnp_list);
-
- list_for_each(ctmp, &peer->gnp_conns) {
- conn = list_entry(ctmp, kgn_conn_t, gnc_list);
-
+ list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
+ list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
continue;
if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
continue;
int
kgnilnd_conn_seq_seek(kgn_conn_seq_iter_t *gseq, loff_t off)
{
int
kgnilnd_conn_seq_seek(kgn_conn_seq_iter_t *gseq, loff_t off)
{
- struct list_head *list, *tmp;
+ struct list_head *list;
+ kgn_conn_t *conn;
loff_t here = 0;
int rc = 0;
loff_t here = 0;
int rc = 0;
- list_for_each(tmp, list) {
+ list_for_each_entry(conn, list, gnc_hashlist) {
- kgn_conn_t *conn;
- conn = list_entry(tmp, kgn_conn_t, gnc_hashlist);
gseq->gconn_conn = conn;
rc = 0;
goto out;
gseq->gconn_conn = conn;
rc = 0;
goto out;
int
kgnilnd_peer_seq_seek(kgn_peer_seq_iter_t *gseq, loff_t off)
{
int
kgnilnd_peer_seq_seek(kgn_peer_seq_iter_t *gseq, loff_t off)
{
- struct list_head *list, *tmp;
+ struct list_head *list;
+ kgn_peer_t *peer;
loff_t here = 0;
int rc = 0;
loff_t here = 0;
int rc = 0;
- list_for_each(tmp, list) {
+ list_for_each_entry(peer, list, gnp_list) {
- kgn_peer_t *peer;
- peer = list_entry(tmp, kgn_peer_t, gnp_list);
gseq->gpeer_peer = peer;
rc = 0;
goto out;
gseq->gpeer_peer = peer;
rc = 0;
goto out;
{
struct kib_peer_ni *peer_ni;
struct kib_conn *conn;
{
struct kib_peer_ni *peer_ni;
struct kib_conn *conn;
- struct list_head *ctmp;
int i;
unsigned long flags;
int i;
unsigned long flags;
if (peer_ni->ibp_ni != ni)
continue;
if (peer_ni->ibp_ni != ni)
continue;
- list_for_each(ctmp, &peer_ni->ibp_conns) {
+ list_for_each_entry(conn, &peer_ni->ibp_conns,
+ ibc_list) {
if (index-- > 0)
continue;
if (index-- > 0)
continue;
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
kiblnd_conn_addref(conn);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
kiblnd_conn_addref(conn);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
static struct kib_tx *
kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
{
static struct kib_tx *
kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
{
- struct list_head *tmp;
-
- list_for_each(tmp, &conn->ibc_active_txs) {
- struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
+ list_for_each_entry(tx, &conn->ibc_active_txs, tx_list) {
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
{
struct kib_tx *tx;
kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
{
struct kib_tx *tx;
- struct list_head *ttmp;
-
- list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, struct kib_tx, tx_list);
+ list_for_each_entry(tx, txs, tx_list) {
if (txs != &conn->ibc_active_txs) {
LASSERT(tx->tx_queued);
} else {
if (txs != &conn->ibc_active_txs) {
LASSERT(tx->tx_queued);
} else {
struct kib_peer_ni *peer_ni;
struct kib_conn *conn;
struct kib_tx *tx, *tx_tmp;
struct kib_peer_ni *peer_ni;
struct kib_conn *conn;
struct kib_tx *tx, *tx_tmp;
- struct list_head *ctmp;
unsigned long flags;
/* NB. We expect to have a look at all the peers and not find any
unsigned long flags;
/* NB. We expect to have a look at all the peers and not find any
- list_for_each(ctmp, &peer_ni->ibp_conns) {
+ list_for_each_entry(conn, &peer_ni->ibp_conns, ibc_list) {
int timedout;
int sendnoop;
int timedout;
int sendnoop;
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
spin_lock(&conn->ibc_lock);
LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
spin_lock(&conn->ibc_lock);
ksocknal_add_conn_cb_locked(struct ksock_peer_ni *peer_ni,
struct ksock_conn_cb *conn_cb)
{
ksocknal_add_conn_cb_locked(struct ksock_peer_ni *peer_ni,
struct ksock_conn_cb *conn_cb)
{
struct ksock_conn *conn;
struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
struct ksock_conn *conn;
struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
/* peer_ni's route list takes over my ref on 'route' */
peer_ni->ksnp_conn_cb = conn_cb;
/* peer_ni's route list takes over my ref on 'route' */
peer_ni->ksnp_conn_cb = conn_cb;
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+ list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
(struct sockaddr *)&conn_cb->ksnr_addr))
continue;
if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
(struct sockaddr *)&conn_cb->ksnr_addr))
continue;
{
struct ksock_peer_ni *peer_ni;
struct ksock_conn *conn;
{
struct ksock_peer_ni *peer_ni;
struct ksock_conn *conn;
- struct list_head *ctmp;
int i;
read_lock(&ksocknal_data.ksnd_global_lock);
int i;
read_lock(&ksocknal_data.ksnd_global_lock);
if (peer_ni->ksnp_ni != ni)
continue;
if (peer_ni->ksnp_ni != ni)
continue;
- list_for_each(ctmp, &peer_ni->ksnp_conns) {
+ list_for_each_entry(conn, &peer_ni->ksnp_conns,
+ ksnc_list) {
if (index-- > 0)
continue;
if (index-- > 0)
continue;
- conn = list_entry(ctmp, struct ksock_conn,
- ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
return conn;
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
return conn;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
LIST_HEAD(zombies);
struct lnet_process_id peerid;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
LIST_HEAD(zombies);
struct lnet_process_id peerid;
u64 incarnation;
struct ksock_conn *conn;
struct ksock_conn *conn2;
u64 incarnation;
struct ksock_conn *conn;
struct ksock_conn *conn2;
* loopback connection */
if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
(struct sockaddr *)&conn->ksnc_myaddr)) {
* loopback connection */
if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
(struct sockaddr *)&conn->ksnc_myaddr)) {
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+ list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
if (!rpc_cmp_addr(
(struct sockaddr *)&conn2->ksnc_peeraddr,
(struct sockaddr *)&conn->ksnc_peeraddr) ||
if (!rpc_cmp_addr(
(struct sockaddr *)&conn2->ksnc_peeraddr,
(struct sockaddr *)&conn->ksnc_peeraddr) ||
struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
struct ksock_conn_cb *conn_cb;
struct ksock_conn *conn2;
struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
struct ksock_conn_cb *conn_cb;
struct ksock_conn *conn2;
LASSERT(peer_ni->ksnp_error == 0);
LASSERT(!conn->ksnc_closing);
LASSERT(peer_ni->ksnp_error == 0);
LASSERT(!conn->ksnc_closing);
LASSERT((conn_cb->ksnr_connected &
BIT(conn->ksnc_type)) != 0);
LASSERT((conn_cb->ksnr_connected &
BIT(conn->ksnc_type)) != 0);
- conn2 = NULL;
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+ list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
if (conn2->ksnc_conn_cb == conn_cb &&
conn2->ksnc_type == conn->ksnc_type)
if (conn2->ksnc_conn_cb == conn_cb &&
conn2->ksnc_type == conn->ksnc_type)
- break;
-
- conn2 = NULL;
- if (conn2 == NULL)
- conn_cb->ksnr_connected &= ~BIT(conn->ksnc_type);
-
+ conn_cb->ksnr_connected &= ~BIT(conn->ksnc_type);
+conn2_found:
conn->ksnc_conn_cb = NULL;
/* drop conn's ref on conn_cb */
conn->ksnc_conn_cb = NULL;
/* drop conn's ref on conn_cb */
spin_lock(&peer_ni->ksnp_lock);
spin_lock(&peer_ni->ksnp_lock);
- list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
+ list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list,
+ tx_zc_list) {
if (tx->tx_conn != conn)
continue;
if (tx->tx_conn != conn)
continue;
struct ksock_conn *conn;
for (index = 0; ; index++) {
struct ksock_conn *conn;
for (index = 0; ; index++) {
- list_for_each(tmp, &peer_ni->ksnp_conns) {
+ list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
- conn = list_entry(tmp, struct ksock_conn,
- ksnc_list);
ksocknal_conn_addref(conn);
break;
}
ksocknal_conn_addref(conn);
break;
}
read_unlock(&ksocknal_data.ksnd_global_lock);
read_unlock(&ksocknal_data.ksnd_global_lock);
break;
ksocknal_lib_push_conn (conn);
break;
ksocknal_lib_push_conn (conn);
struct ksock_conn *
ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
{
struct ksock_conn *
ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
{
struct ksock_conn *conn;
struct ksock_conn *typed = NULL;
struct ksock_conn *fallback = NULL;
int tnob = 0;
int fnob = 0;
struct ksock_conn *conn;
struct ksock_conn *typed = NULL;
struct ksock_conn *fallback = NULL;
int tnob = 0;
int fnob = 0;
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
- ksnc_list);
+ list_for_each_entry(c, &peer_ni->ksnp_conns, ksnc_list) {
int nob = atomic_read(&c->ksnc_tx_nob) +
c->ksnc_sock->sk->sk_wmem_queued;
int rc;
int nob = atomic_read(&c->ksnc_tx_nob) +
c->ksnc_sock->sk->sk_wmem_queued;
int rc;
{
/* We're called with a shared lock on ksnd_global_lock */
struct ksock_conn *conn;
{
/* We're called with a shared lock on ksnd_global_lock */
struct ksock_conn *conn;
- struct list_head *ctmp;
- list_for_each(ctmp, &peer_ni->ksnp_conns) {
+ list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
- conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
-
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
/* no more packets queued */
conn->ksnc_tx_carrier = NULL;
} else {
/* no more packets queued */
conn->ksnc_tx_carrier = NULL;
} else {
- conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
- struct ksock_tx, tx_list);
+ conn->ksnc_tx_carrier = list_next_entry(tx, tx_list);
LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type ==
tx->tx_msg.ksm_type);
}
LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type ==
tx->tx_msg.ksm_type);
}
spin_lock(&peer_ni->ksnp_lock);
spin_lock(&peer_ni->ksnp_lock);
- list_for_each_entry_safe(tx, tmp,
- &peer_ni->ksnp_zc_req_list, tx_zc_list) {
+ list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list,
+ tx_zc_list) {
__u64 c = tx->tx_msg.ksm_zc_cookies[0];
if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
__u64 c = tx->tx_msg.ksm_zc_cookies[0];
if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {