From: Amir Shehata Date: Fri, 1 Apr 2016 19:28:58 +0000 (-0700) Subject: LU-7734 lnet: rename LND peer to peer_ni X-Git-Tag: 2.9.53~47^2~24 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=d8792a7dab933def57f6069296234ad48ea0da09 LU-7734 lnet: rename LND peer to peer_ni Patch to rename LND peers to peer_ni to reflect the fact that these constructs reflect an actual connection between a local NI and remote peer NI. Signed-off-by: Amir Shehata Change-Id: I1c25a12eae61d8822a8c4ada2e077a5b2011ba22 Reviewed-on: http://review.whamcloud.com/19307 Reviewed-by: Doug Oucharek Tested-by: Doug Oucharek --- diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index c07d4d18..17f0596 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -255,7 +255,7 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) msg->ibm_cksum = msg_cksum; if (flip) { - /* leave magic unflipped as a clue to peer endianness */ + /* leave magic unflipped as a clue to peer_ni endianness */ msg->ibm_version = version; CLASSERT (sizeof(msg->ibm_type) == 1); CLASSERT (sizeof(msg->ibm_credits) == 1); @@ -313,9 +313,9 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) } int -kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) +kiblnd_create_peer(lnet_ni_t *ni, kib_peer_ni_t **peerp, lnet_nid_t nid) { - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; kib_net_t *net = ni->ni_data; int cpt = lnet_cpt_of_nid(nid, ni); unsigned long flags; @@ -323,23 +323,23 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) LASSERT(net != NULL); LASSERT(nid != LNET_NID_ANY); - LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); - if (peer == NULL) { - CERROR("Cannot allocate peer\n"); + LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni)); + if (peer_ni == NULL) { + CERROR("Cannot allocate peer_ni\n"); return -ENOMEM; } - peer->ibp_ni = ni; - peer->ibp_nid = nid; - peer->ibp_error = 0; - peer->ibp_last_alive = 0; - peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni); - peer->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits; - atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ + peer_ni->ibp_ni = ni; + peer_ni->ibp_nid = nid; + peer_ni->ibp_error = 0; + peer_ni->ibp_last_alive = 0; + peer_ni->ibp_max_frags = kiblnd_cfg_rdma_frags(peer_ni->ibp_ni); + peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits; + atomic_set(&peer_ni->ibp_refcount, 1); /* 1 ref for caller */ - INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ - INIT_LIST_HEAD(&peer->ibp_conns); - INIT_LIST_HEAD(&peer->ibp_tx_queue); + INIT_LIST_HEAD(&peer_ni->ibp_list); /* not in the peer_ni table yet */ + INIT_LIST_HEAD(&peer_ni->ibp_conns); + INIT_LIST_HEAD(&peer_ni->ibp_tx_queue); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -351,43 +351,43 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - *peerp = peer; + *peerp = peer_ni; return 0; } void -kiblnd_destroy_peer (kib_peer_t *peer) +kiblnd_destroy_peer (kib_peer_ni_t *peer_ni) { - kib_net_t *net = peer->ibp_ni->ni_data; + kib_net_t *net = peer_ni->ibp_ni->ni_data; LASSERT(net != NULL); - LASSERT (atomic_read(&peer->ibp_refcount) == 0); - LASSERT(!kiblnd_peer_active(peer)); - LASSERT(kiblnd_peer_idle(peer)); - LASSERT(list_empty(&peer->ibp_tx_queue)); + LASSERT (atomic_read(&peer_ni->ibp_refcount) == 0); + LASSERT(!kiblnd_peer_active(peer_ni)); + LASSERT(kiblnd_peer_idle(peer_ni)); + LASSERT(list_empty(&peer_ni->ibp_tx_queue)); - LIBCFS_FREE(peer, sizeof(*peer)); + LIBCFS_FREE(peer_ni, sizeof(*peer_ni)); - /* NB a peer's connections keep a reference on their peer until + /* NB a peer_ni's connections keep a reference on their peer_ni until * they are destroyed, so we can be assured that _all_ state to do - * with this peer has been cleaned up when its refcount drops to + * with this peer_ni has been cleaned up when its refcount drops to * zero. */ atomic_dec(&net->ibn_npeers); } -kib_peer_t * +kib_peer_ni_t * kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid) { /* the caller is responsible for accounting the additional reference * that this creates */ struct list_head *peer_list = kiblnd_nid2peerlist(nid); struct list_head *tmp; - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); + peer_ni = list_entry(tmp, kib_peer_ni_t, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); /* * Match a peer if its NID and the NID of the local NI it @@ -395,35 +395,35 @@ kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid) * the peer, which will result in a new lnd peer being * created. */ - if (peer->ibp_nid != nid || - peer->ibp_ni->ni_nid != ni->ni_nid) + if (peer_ni->ibp_nid != nid || + peer_ni->ibp_ni->ni_nid != ni->ni_nid) continue; - CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n", - peer, libcfs_nid2str(nid), - atomic_read(&peer->ibp_refcount), - peer->ibp_version); - return peer; + CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d) version: %x\n", + peer_ni, libcfs_nid2str(nid), + atomic_read(&peer_ni->ibp_refcount), + peer_ni->ibp_version); + return peer_ni; } return NULL; } void -kiblnd_unlink_peer_locked (kib_peer_t *peer) +kiblnd_unlink_peer_locked (kib_peer_ni_t *peer_ni) { - LASSERT(list_empty(&peer->ibp_conns)); + LASSERT(list_empty(&peer_ni->ibp_conns)); - LASSERT (kiblnd_peer_active(peer)); - list_del_init(&peer->ibp_list); + LASSERT (kiblnd_peer_active(peer_ni)); + list_del_init(&peer_ni->ibp_list); /* lose peerlist's ref */ - kiblnd_peer_decref(peer); + kiblnd_peer_decref(peer_ni); } static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, lnet_nid_t *nidp, int *count) { - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; struct list_head *ptmp; int i; unsigned long flags; @@ -434,17 +434,17 @@ kiblnd_get_peer_info(lnet_ni_t *ni, int index, list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); + peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); - if (peer->ibp_ni != ni) + if (peer_ni->ibp_ni != ni) continue; if (index-- > 0) continue; - *nidp = peer->ibp_nid; - *count = atomic_read(&peer->ibp_refcount); + *nidp = peer_ni->ibp_nid; + *count = atomic_read(&peer_ni->ibp_refcount); read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -457,23 +457,23 @@ kiblnd_get_peer_info(lnet_ni_t *ni, int index, } static void -kiblnd_del_peer_locked (kib_peer_t *peer) +kiblnd_del_peer_locked (kib_peer_ni_t *peer_ni) { struct list_head *ctmp; struct list_head *cnxt; kib_conn_t *conn; - if (list_empty(&peer->ibp_conns)) { - kiblnd_unlink_peer_locked(peer); + if (list_empty(&peer_ni->ibp_conns)) { + kiblnd_unlink_peer_locked(peer_ni); } else { - list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { + list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); kiblnd_close_conn_locked(conn, 0); } - /* NB closing peer's last conn unlinked it. */ + /* NB closing peer_ni's last conn unlinked it. */ } - /* NB peer now unlinked; might even be freed if the peer table had the + /* NB peer_ni now unlinked; might even be freed if the peer_ni table had the * last ref on it. */ } @@ -483,7 +483,7 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid) struct list_head zombies = LIST_HEAD_INIT(zombies); struct list_head *ptmp; struct list_head *pnxt; - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; int lo; int hi; int i; @@ -501,23 +501,23 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); + peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); - if (peer->ibp_ni != ni) + if (peer_ni->ibp_ni != ni) continue; - if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid)) + if (!(nid == LNET_NID_ANY || peer_ni->ibp_nid == nid)) continue; - if (!list_empty(&peer->ibp_tx_queue)) { - LASSERT(list_empty(&peer->ibp_conns)); + if (!list_empty(&peer_ni->ibp_tx_queue)) { + LASSERT(list_empty(&peer_ni->ibp_conns)); - list_splice_init(&peer->ibp_tx_queue, + list_splice_init(&peer_ni->ibp_tx_queue, &zombies); } - kiblnd_del_peer_locked(peer); + kiblnd_del_peer_locked(peer_ni); rc = 0; /* matched something */ } } @@ -532,7 +532,7 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid) static kib_conn_t * kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) { - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; struct list_head *ptmp; kib_conn_t *conn; struct list_head *ctmp; @@ -544,13 +544,13 @@ kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); + peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); - if (peer->ibp_ni != ni) + if (peer_ni->ibp_ni != ni) continue; - list_for_each(ctmp, &peer->ibp_conns) { + list_for_each(ctmp, &peer_ni->ibp_conns) { if (index-- > 0) continue; @@ -699,18 +699,18 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) } kib_conn_t * -kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, +kiblnd_create_conn(kib_peer_ni_t *peer_ni, struct rdma_cm_id *cmid, int state, int version) { /* CAVEAT EMPTOR: * If the new conn is created successfully it takes over the caller's - * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself - * is destroyed. On failure, the caller's ref on 'peer' remains and + * ref on 'peer_ni'. It also "owns" 'cmid' and destroys it when it itself + * is destroyed. On failure, the caller's ref on 'peer_ni' remains and * she must dispose of 'cmid'. (Actually I'd block forever if I tried * to destroy 'cmid' here since I'm called from the CM which still has * its ref on 'cmid'). */ rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_net_t *net = peer->ibp_ni->ni_data; + kib_net_t *net = peer_ni->ibp_ni->ni_data; kib_dev_t *dev; struct ib_qp_init_attr *init_qp_attr; struct kib_sched_info *sched; @@ -729,7 +729,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, dev = net->ibn_dev; - cpt = lnet_cpt_of_nid(peer->ibp_nid, peer->ibp_ni); + cpt = lnet_cpt_of_nid(peer_ni->ibp_nid, peer_ni->ibp_ni); sched = kiblnd_data.kib_scheds[cpt]; LASSERT(sched->ibs_nthreads > 0); @@ -738,24 +738,24 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, sizeof(*init_qp_attr)); if (init_qp_attr == NULL) { CERROR("Can't allocate qp_attr for %s\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); goto failed_0; } LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn)); if (conn == NULL) { CERROR("Can't allocate connection for %s\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); goto failed_1; } conn->ibc_state = IBLND_CONN_INIT; conn->ibc_version = version; - conn->ibc_peer = peer; /* I take the caller's ref */ + conn->ibc_peer = peer_ni; /* I take the caller's ref */ cmid->context = conn; /* for future CM callbacks */ conn->ibc_cmid = cmid; - conn->ibc_max_frags = peer->ibp_max_frags; - conn->ibc_queue_depth = peer->ibp_queue_depth; + conn->ibc_max_frags = peer_ni->ibp_max_frags; + conn->ibc_queue_depth = peer_ni->ibp_queue_depth; INIT_LIST_HEAD(&conn->ibc_early_rxs); INIT_LIST_HEAD(&conn->ibc_tx_noops); @@ -928,7 +928,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; - kib_peer_t *peer = conn->ibc_peer; + kib_peer_ni_t *peer_ni = conn->ibc_peer; int rc; LASSERT (!in_interrupt()); @@ -982,9 +982,9 @@ kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) /* See CAVEAT EMPTOR above in kiblnd_create_conn */ if (conn->ibc_state != IBLND_CONN_INIT) { - kib_net_t *net = peer->ibp_ni->ni_data; + kib_net_t *net = peer_ni->ibp_ni->ni_data; - kiblnd_peer_decref(peer); + kiblnd_peer_decref(peer_ni); rdma_destroy_id(cmid); atomic_dec(&net->ibn_nconns); } @@ -994,19 +994,19 @@ kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) } int -kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) +kiblnd_close_peer_conns_locked(kib_peer_ni_t *peer_ni, int why) { kib_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; - list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { + list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); CDEBUG(D_NET, "Closing conn -> %s, " "version: %x, reason: %d\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), conn->ibc_version, why); kiblnd_close_conn_locked(conn, why); @@ -1017,7 +1017,7 @@ kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) } int -kiblnd_close_stale_conns_locked(kib_peer_t *peer, +kiblnd_close_stale_conns_locked(kib_peer_ni_t *peer_ni, int version, __u64 incarnation) { kib_conn_t *conn; @@ -1025,7 +1025,7 @@ kiblnd_close_stale_conns_locked(kib_peer_t *peer, struct list_head *cnxt; int count = 0; - list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { + list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); if (conn->ibc_version == version && @@ -1034,7 +1034,7 @@ kiblnd_close_stale_conns_locked(kib_peer_t *peer, CDEBUG(D_NET, "Closing stale conn -> %s version: %x, " "incarnation:%#llx(%x, %#llx)\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), conn->ibc_version, conn->ibc_incarnation, version, incarnation); @@ -1048,7 +1048,7 @@ kiblnd_close_stale_conns_locked(kib_peer_t *peer, static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) { - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; struct list_head *ptmp; struct list_head *pnxt; int lo; @@ -1069,16 +1069,16 @@ kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); + peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); - if (peer->ibp_ni != ni) + if (peer_ni->ibp_ni != ni) continue; - if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid)) + if (!(nid == LNET_NID_ANY || nid == peer_ni->ibp_nid)) continue; - count += kiblnd_close_peer_conns_locked(peer, 0); + count += kiblnd_close_peer_conns_locked(peer_ni, 0); } } @@ -1151,27 +1151,27 @@ kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when) cfs_time_t last_alive = 0; cfs_time_t now = cfs_time_current(); rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; unsigned long flags; read_lock_irqsave(glock, flags); - peer = kiblnd_find_peer_locked(ni, nid); - if (peer != NULL) - last_alive = peer->ibp_last_alive; + peer_ni = kiblnd_find_peer_locked(ni, nid); + if (peer_ni != NULL) + last_alive = peer_ni->ibp_last_alive; read_unlock_irqrestore(glock, flags); if (last_alive != 0) *when = last_alive; - /* peer is not persistent in hash, trigger peer creation + /* peer_ni is not persistent in hash, trigger peer_ni creation * and connection establishment with a NULL tx */ - if (peer == NULL) + if (peer_ni == NULL) kiblnd_launch_tx(ni, NULL, nid); - CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n", - libcfs_nid2str(nid), peer, + CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago\n", + libcfs_nid2str(nid), peer_ni, last_alive ? cfs_duration_sec(now - last_alive) : -1); return; } @@ -2933,7 +2933,7 @@ kiblnd_shutdown (lnet_ni_t *ni) /* nuke all existing peers within this net */ kiblnd_del_peer(ni, LNET_NID_ANY); - /* Wait for all peer state to clean up */ + /* Wait for all peer_ni state to clean up */ i = 2; while (atomic_read(&net->ibn_npeers) != 0) { i++; diff --git a/lnet/klnds/o2iblnd/o2iblnd.h b/lnet/klnds/o2iblnd/o2iblnd.h index feb8ef3..25382c2 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.h +++ b/lnet/klnds/o2iblnd/o2iblnd.h @@ -78,7 +78,7 @@ #include #include -#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */ +#define IBLND_PEER_HASH_SIZE 101 /* # peer_ni lists */ /* # scheduler loops before reschedule */ #define IBLND_RESCHED 100 @@ -110,8 +110,8 @@ extern kib_tunables_t kiblnd_tunables; #define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */ #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */ -#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */ -#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */ +#define IBLND_CREDITS_DEFAULT 8 /* default # of peer_ni credits */ +#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer_ni credits */ /* when eagerly to return credits */ #define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \ @@ -398,7 +398,7 @@ typedef struct /* schedulers sleep here */ wait_queue_head_t kib_failover_waitq; atomic_t kib_nthreads; /* # live threads */ - /* stabilize net/dev/peer/conn ops */ + /* stabilize net/dev/peer_ni/conn ops */ rwlock_t kib_global_lock; /* hash table of all my known peers */ struct list_head *kib_peers; @@ -535,7 +535,7 @@ typedef struct { __u16 ibr_version; /* sender's version */ __u8 ibr_why; /* reject reason */ __u8 ibr_padding; /* padding */ - __u64 ibr_incarnation; /* incarnation of peer */ + __u64 ibr_incarnation; /* incarnation of peer_ni */ kib_connparams_t ibr_cp; /* connection parameters */ } WIRE_ATTR kib_rej_t; @@ -544,12 +544,12 @@ typedef struct { #define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */ #define IBLND_REJECT_FATAL 3 /* Anything else */ -#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */ -#define IBLND_REJECT_CONN_STALE 5 /* stale peer */ +#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer_ni */ +#define IBLND_REJECT_CONN_STALE 5 /* stale peer_ni */ -/* peer's rdma frags doesn't match mine */ +/* peer_ni's rdma frags doesn't match mine */ #define IBLND_REJECT_RDMA_FRAGS 6 -/* peer's msg queue size doesn't match mine */ +/* peer_ni's msg queue size doesn't match mine */ #define IBLND_REJECT_MSG_QUEUE_SIZE 7 /***********************************************************************/ @@ -578,7 +578,7 @@ typedef struct kib_rx /* receive message */ #define IBLND_POSTRX_DONT_POST 0 /* don't post */ #define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */ -#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */ +#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer_ni back 1 credit */ #define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */ typedef struct kib_tx /* transmit message */ @@ -593,7 +593,7 @@ typedef struct kib_tx /* transmit message */ short tx_sending; /* queued for sending */ short tx_queued; - /* waiting for peer */ + /* waiting for peer_ni */ short tx_waiting; /* LNET completion status */ int tx_status; @@ -639,11 +639,11 @@ typedef struct kib_conn { /* scheduler information */ struct kib_sched_info *ibc_sched; - /* owning peer */ + /* owning peer_ni */ struct kib_peer *ibc_peer; /* HCA bound on */ kib_hca_dev_t *ibc_hdev; - /* stash on peer's conn list */ + /* stash on peer_ni's conn list */ struct list_head ibc_list; /* schedule for attention */ struct list_head ibc_sched_list; @@ -720,7 +720,7 @@ typedef struct kib_conn typedef struct kib_peer { - /* stash on global peer list */ + /* stash on global peer_ni list */ struct list_head ibp_list; /* who's on the other end(s) */ lnet_nid_t ibp_nid; @@ -730,31 +730,31 @@ typedef struct kib_peer struct list_head ibp_conns; /* msgs waiting for a conn */ struct list_head ibp_tx_queue; - /* incarnation of peer */ + /* incarnation of peer_ni */ __u64 ibp_incarnation; /* when (in jiffies) I was last alive */ cfs_time_t ibp_last_alive; /* # users */ atomic_t ibp_refcount; - /* version of peer */ + /* version of peer_ni */ __u16 ibp_version; /* current passive connection attempts */ unsigned short ibp_accepting; /* current active connection attempts */ unsigned short ibp_connecting; - /* reconnect this peer later */ + /* reconnect this peer_ni later */ unsigned short ibp_reconnecting:1; /* counter of how many times we triggered a conn race */ unsigned char ibp_races; /* # consecutive reconnection attempts to this peer */ unsigned int ibp_reconnected; - /* errno on closing this peer */ + /* errno on closing this peer_ni */ int ibp_error; /* max map_on_demand */ __u16 ibp_max_frags; /* max_peer_credits */ __u16 ibp_queue_depth; -} kib_peer_t; +} kib_peer_ni_t; #ifndef HAVE_IB_INC_RKEY /** @@ -868,36 +868,36 @@ do { \ } \ } while (0) -#define kiblnd_peer_addref(peer) \ +#define kiblnd_peer_addref(peer_ni) \ do { \ - CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \ - (peer), libcfs_nid2str((peer)->ibp_nid), \ - atomic_read (&(peer)->ibp_refcount)); \ - atomic_inc(&(peer)->ibp_refcount); \ + CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)++\n", \ + (peer_ni), libcfs_nid2str((peer_ni)->ibp_nid), \ + atomic_read (&(peer_ni)->ibp_refcount)); \ + atomic_inc(&(peer_ni)->ibp_refcount); \ } while (0) -#define kiblnd_peer_decref(peer) \ +#define kiblnd_peer_decref(peer_ni) \ do { \ - CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \ - (peer), libcfs_nid2str((peer)->ibp_nid), \ - atomic_read (&(peer)->ibp_refcount)); \ - LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \ - if (atomic_dec_and_test(&(peer)->ibp_refcount)) \ - kiblnd_destroy_peer(peer); \ + CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)--\n", \ + (peer_ni), libcfs_nid2str((peer_ni)->ibp_nid), \ + atomic_read (&(peer_ni)->ibp_refcount)); \ + LASSERT_ATOMIC_POS(&(peer_ni)->ibp_refcount); \ + if (atomic_dec_and_test(&(peer_ni)->ibp_refcount)) \ + kiblnd_destroy_peer(peer_ni); \ } while (0) static inline bool -kiblnd_peer_connecting(kib_peer_t *peer) +kiblnd_peer_connecting(kib_peer_ni_t *peer_ni) { - return peer->ibp_connecting != 0 || - peer->ibp_reconnecting != 0 || - peer->ibp_accepting != 0; + return peer_ni->ibp_connecting != 0 || + peer_ni->ibp_reconnecting != 0 || + peer_ni->ibp_accepting != 0; } static inline bool -kiblnd_peer_idle(kib_peer_t *peer) +kiblnd_peer_idle(kib_peer_ni_t *peer_ni) { - return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns); + return !kiblnd_peer_connecting(peer_ni) && list_empty(&peer_ni->ibp_conns); } static inline struct list_head * @@ -910,19 +910,19 @@ kiblnd_nid2peerlist (lnet_nid_t nid) } static inline int -kiblnd_peer_active (kib_peer_t *peer) +kiblnd_peer_active (kib_peer_ni_t *peer_ni) { - /* Am I in the peer hash table? */ - return !list_empty(&peer->ibp_list); + /* Am I in the peer_ni hash table? */ + return !list_empty(&peer_ni->ibp_list); } static inline kib_conn_t * -kiblnd_get_conn_locked (kib_peer_t *peer) +kiblnd_get_conn_locked (kib_peer_ni_t *peer_ni) { - LASSERT(!list_empty(&peer->ibp_conns)); + LASSERT(!list_empty(&peer_ni->ibp_conns)); /* just return the first connection */ - return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list); + return list_entry(peer_ni->ibp_conns.next, kib_conn_t, ibc_list); } static inline int @@ -1179,17 +1179,17 @@ int kiblnd_cm_callback(struct rdma_cm_id *cmid, int kiblnd_translate_mtu(int value); int kiblnd_dev_failover(kib_dev_t *dev); -int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid); -void kiblnd_destroy_peer (kib_peer_t *peer); -bool kiblnd_reconnect_peer(kib_peer_t *peer); +int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_ni_t **peerp, lnet_nid_t nid); +void kiblnd_destroy_peer (kib_peer_ni_t *peer); +bool kiblnd_reconnect_peer(kib_peer_ni_t *peer); void kiblnd_destroy_dev (kib_dev_t *dev); -void kiblnd_unlink_peer_locked (kib_peer_t *peer); -kib_peer_t *kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid); -int kiblnd_close_stale_conns_locked (kib_peer_t *peer, +void kiblnd_unlink_peer_locked (kib_peer_ni_t *peer_ni); +kib_peer_ni_t *kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid); +int kiblnd_close_stale_conns_locked (kib_peer_ni_t *peer_ni, int version, __u64 incarnation); -int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why); +int kiblnd_close_peer_conns_locked (kib_peer_ni_t *peer_ni, int why); -kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, +kib_conn_t *kiblnd_create_conn(kib_peer_ni_t *peer_ni, struct rdma_cm_id *cmid, int state, int version); void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn); void kiblnd_close_conn (kib_conn_t *conn, int error); diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index 41b9cdb..3901be7 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -38,8 +38,8 @@ #define MAX_CONN_RACES_BEFORE_ABORT 20 -static void kiblnd_peer_alive(kib_peer_t *peer); -static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error); +static void kiblnd_peer_alive(kib_peer_ni_t *peer_ni); +static void kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error); static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob); static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, @@ -61,7 +61,7 @@ kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx) LASSERT (!in_interrupt()); LASSERT (!tx->tx_queued); /* mustn't be queued for sending */ LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */ - LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */ + LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer_ni response */ LASSERT (tx->tx_pool != NULL); kiblnd_unmap_tx(ni, tx); @@ -416,7 +416,7 @@ kiblnd_handle_rx (kib_rx_t *rx) LASSERT (tx->tx_waiting); /* CAVEAT EMPTOR: I could be racing with tx_complete, but... - * (a) I can overwrite tx_msg since my peer has received it! + * (a) I can overwrite tx_msg since my peer_ni has received it! * (b) tx_waiting set tells tx_complete() it's not done. */ tx->tx_nwrq = 0; /* overwrite PUT_REQ */ @@ -580,7 +580,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob) return rc; } - /* If rd is not tx_rd, it's going to get sent to a peer, who will need + /* If rd is not tx_rd, it's going to get sent to a peer_ni, who will need * the rkey */ rd->rd_key = tx->fmr.fmr_key; rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; @@ -616,7 +616,7 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) __u32 nob; int i; - /* If rd is not tx_rd, it's going to get sent to a peer and I'm the + /* If rd is not tx_rd, it's going to get sent to a peer_ni and I'm the * RDMA sink */ tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; tx->tx_nfrags = nfrags; @@ -753,12 +753,12 @@ static int kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) __must_hold(&conn->ibc_lock) { - kib_msg_t *msg = tx->tx_msg; - kib_peer_t *peer = conn->ibc_peer; - struct lnet_ni *ni = peer->ibp_ni; - int ver = conn->ibc_version; - int rc; - int done; + kib_msg_t *msg = tx->tx_msg; + kib_peer_ni_t *peer_ni = conn->ibc_peer; + struct lnet_ni *ni = peer_ni->ibp_ni; + int ver = conn->ibc_version; + int rc; + int done; LASSERT(tx->tx_queued); /* We rely on this for QP sizing */ @@ -775,13 +775,13 @@ __must_hold(&conn->ibc_lock) kiblnd_concurrent_sends(ver, ni)) { /* tx completions outstanding... */ CDEBUG(D_NET, "%s: posted enough\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); return -EAGAIN; } if (credit != 0 && conn->ibc_credits == 0) { /* no credits */ CDEBUG(D_NET, "%s: no credits\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); return -EAGAIN; } @@ -789,7 +789,7 @@ __must_hold(&conn->ibc_lock) conn->ibc_credits == 1 && /* last credit reserved */ msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */ CDEBUG(D_NET, "%s: not using last credit\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); return -EAGAIN; } @@ -805,16 +805,16 @@ __must_hold(&conn->ibc_lock) * kiblnd_check_sends_locked will queue NOOP again when * posted NOOPs complete */ spin_unlock(&conn->ibc_lock); - kiblnd_tx_done(peer->ibp_ni, tx); + kiblnd_tx_done(peer_ni->ibp_ni, tx); spin_lock(&conn->ibc_lock); CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), conn->ibc_noops_posted); return 0; } - kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits, - peer->ibp_nid, conn->ibc_incarnation); + kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits, + peer_ni->ibp_nid, conn->ibc_incarnation); conn->ibc_credits -= credit; conn->ibc_outstanding_credits = 0; @@ -854,7 +854,7 @@ __must_hold(&conn->ibc_lock) } LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), - "bad wr_id %#llx, opc %d, flags %d, peer: %s\n", + "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n", bad->wr_id, bad->opcode, bad->send_flags, libcfs_nid2str(conn->ibc_peer->ibp_nid)); @@ -887,15 +887,15 @@ __must_hold(&conn->ibc_lock) if (conn->ibc_state == IBLND_CONN_ESTABLISHED) CERROR("Error %d posting transmit to %s\n", - rc, libcfs_nid2str(peer->ibp_nid)); + rc, libcfs_nid2str(peer_ni->ibp_nid)); else CDEBUG(D_NET, "Error %d posting transmit to %s\n", - rc, libcfs_nid2str(peer->ibp_nid)); + rc, libcfs_nid2str(peer_ni->ibp_nid)); kiblnd_close_conn(conn, rc); if (done) - kiblnd_tx_done(peer->ibp_ni, tx); + kiblnd_tx_done(peer_ni->ibp_ni, tx); spin_lock(&conn->ibc_lock); @@ -1000,12 +1000,12 @@ kiblnd_tx_complete (kib_tx_t *tx, int status) conn->ibc_noops_posted--; if (failed) { - tx->tx_waiting = 0; /* don't wait for peer */ + tx->tx_waiting = 0; /* don't wait for peer_ni */ tx->tx_status = -EIO; } idle = (tx->tx_sending == 0) && /* This is the final callback */ - !tx->tx_waiting && /* Not waiting for peer */ + !tx->tx_waiting && /* Not waiting for peer_ni */ !tx->tx_queued; /* Not re-queued (PUT_DONE) */ if (idle) list_del(&tx->tx_list); @@ -1084,7 +1084,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, } if (tx->tx_nwrq >= conn->ibc_max_frags) { - CERROR("RDMA has too many fragments for peer %s (%d), " + CERROR("RDMA has too many fragments for peer_ni %s (%d), " "src idx/frags: %d/%d dst idx/frags: %d/%d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), conn->ibc_max_frags, @@ -1242,25 +1242,25 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, } static void -kiblnd_connect_peer (kib_peer_t *peer) +kiblnd_connect_peer (kib_peer_ni_t *peer_ni) { struct rdma_cm_id *cmid; kib_dev_t *dev; - kib_net_t *net = peer->ibp_ni->ni_data; + kib_net_t *net = peer_ni->ibp_ni->ni_data; struct sockaddr_in srcaddr; struct sockaddr_in dstaddr; int rc; LASSERT (net != NULL); - LASSERT (peer->ibp_connecting > 0); - LASSERT(!peer->ibp_reconnecting); + LASSERT (peer_ni->ibp_connecting > 0); + LASSERT(!peer_ni->ibp_reconnecting); - cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP, + cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer_ni, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cmid)) { CERROR("Can't create CMID for %s: %ld\n", - libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid)); + libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid)); rc = PTR_ERR(cmid); goto failed; } @@ -1273,9 +1273,9 @@ kiblnd_connect_peer (kib_peer_t *peer) memset(&dstaddr, 0, sizeof(dstaddr)); dstaddr.sin_family = AF_INET; dstaddr.sin_port = htons(*kiblnd_tunables.kib_service); - dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid)); + dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid)); - kiblnd_peer_addref(peer); /* cmid's ref */ + kiblnd_peer_addref(peer_ni); /* cmid's ref */ if (*kiblnd_tunables.kib_use_priv_port) { rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr, @@ -1289,28 +1289,28 @@ kiblnd_connect_peer (kib_peer_t *peer) if (rc != 0) { /* Can't initiate address resolution: */ CERROR("Can't resolve addr for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); + libcfs_nid2str(peer_ni->ibp_nid), rc); goto failed2; } LASSERT (cmid->device != NULL); CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", - libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, + libcfs_nid2str(peer_ni->ibp_nid), dev->ibd_ifname, &dev->ibd_ifip, cmid->device->name); return; failed2: - kiblnd_peer_connect_failed(peer, 1, rc); - kiblnd_peer_decref(peer); /* cmid's ref */ + kiblnd_peer_connect_failed(peer_ni, 1, rc); + kiblnd_peer_decref(peer_ni); /* cmid's ref */ rdma_destroy_id(cmid); return; failed: - kiblnd_peer_connect_failed(peer, 1, rc); + kiblnd_peer_connect_failed(peer_ni, 1, rc); } bool -kiblnd_reconnect_peer(kib_peer_t *peer) +kiblnd_reconnect_peer(kib_peer_ni_t *peer_ni) { rwlock_t *glock = &kiblnd_data.kib_global_lock; char *reason = NULL; @@ -1320,12 +1320,12 @@ kiblnd_reconnect_peer(kib_peer_t *peer) INIT_LIST_HEAD(&txs); write_lock_irqsave(glock, flags); - if (peer->ibp_reconnecting == 0) { - if (peer->ibp_accepting) + if (peer_ni->ibp_reconnecting == 0) { + if (peer_ni->ibp_accepting) reason = "accepting"; - else if (peer->ibp_connecting) + else if (peer_ni->ibp_connecting) reason = "connecting"; - else if (!list_empty(&peer->ibp_conns)) + else if (!list_empty(&peer_ni->ibp_conns)) reason = "connected"; else /* connected then closed */ reason = "closed"; @@ -1333,37 +1333,38 @@ kiblnd_reconnect_peer(kib_peer_t *peer) goto no_reconnect; } - LASSERT(!peer->ibp_accepting && !peer->ibp_connecting && - list_empty(&peer->ibp_conns)); - peer->ibp_reconnecting = 0; + LASSERT(!peer_ni->ibp_accepting && !peer_ni->ibp_connecting && + list_empty(&peer_ni->ibp_conns)); + peer_ni->ibp_reconnecting = 0; - if (!kiblnd_peer_active(peer)) { - list_splice_init(&peer->ibp_tx_queue, &txs); + if (!kiblnd_peer_active(peer_ni)) { + list_splice_init(&peer_ni->ibp_tx_queue, &txs); reason = "unlinked"; goto no_reconnect; } - peer->ibp_connecting++; - peer->ibp_reconnected++; + peer_ni->ibp_connecting++; + peer_ni->ibp_reconnected++; + write_unlock_irqrestore(glock, flags); - kiblnd_connect_peer(peer); + kiblnd_connect_peer(peer_ni); return true; no_reconnect: write_unlock_irqrestore(glock, flags); CWARN("Abort reconnection of %s: %s\n", - libcfs_nid2str(peer->ibp_nid), reason); - kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED); + libcfs_nid2str(peer_ni->ibp_nid), reason); + kiblnd_txlist_done(peer_ni->ibp_ni, &txs, -ECONNABORTED); return false; } void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) { - kib_peer_t *peer; - kib_peer_t *peer2; + kib_peer_ni_t *peer_ni; + kib_peer_ni_t *peer2; kib_conn_t *conn; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; unsigned long flags; @@ -1375,14 +1376,14 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ LASSERT (tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ - /* First time, just use a read lock since I expect to find my peer + /* First time, just use a read lock since I expect to find my peer_ni * connected */ read_lock_irqsave(g_lock, flags); - peer = kiblnd_find_peer_locked(ni, nid); - if (peer != NULL && !list_empty(&peer->ibp_conns)) { - /* Found a peer with an established connection */ - conn = kiblnd_get_conn_locked(peer); + peer_ni = kiblnd_find_peer_locked(ni, nid); + if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) { + /* Found a peer_ni with an established connection */ + conn = kiblnd_get_conn_locked(peer_ni); kiblnd_conn_addref(conn); /* 1 ref for me... */ read_unlock_irqrestore(g_lock, flags); @@ -1397,17 +1398,17 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) /* Re-try with a write lock */ write_lock(g_lock); - peer = kiblnd_find_peer_locked(ni, nid); - if (peer != NULL) { - if (list_empty(&peer->ibp_conns)) { - /* found a peer, but it's still connecting... */ - LASSERT(kiblnd_peer_connecting(peer)); + peer_ni = kiblnd_find_peer_locked(ni, nid); + if (peer_ni != NULL) { + if (list_empty(&peer_ni->ibp_conns)) { + /* found a peer_ni, but it's still connecting... */ + LASSERT(kiblnd_peer_connecting(peer_ni)); if (tx != NULL) list_add_tail(&tx->tx_list, - &peer->ibp_tx_queue); + &peer_ni->ibp_tx_queue); write_unlock_irqrestore(g_lock, flags); } else { - conn = kiblnd_get_conn_locked(peer); + conn = kiblnd_get_conn_locked(peer_ni); kiblnd_conn_addref(conn); /* 1 ref for me... */ write_unlock_irqrestore(g_lock, flags); @@ -1421,10 +1422,10 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_unlock_irqrestore(g_lock, flags); - /* Allocate a peer ready to add to the peer table and retry */ - rc = kiblnd_create_peer(ni, &peer, nid); + /* Allocate a peer_ni ready to add to the peer_ni table and retry */ + rc = kiblnd_create_peer(ni, &peer_ni, nid); if (rc != 0) { - CERROR("Can't create peer %s\n", libcfs_nid2str(nid)); + CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid)); if (tx != NULL) { tx->tx_status = -EHOSTUNREACH; tx->tx_waiting = 0; @@ -1438,7 +1439,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) peer2 = kiblnd_find_peer_locked(ni, nid); if (peer2 != NULL) { if (list_empty(&peer2->ibp_conns)) { - /* found a peer, but it's still connecting... */ + /* found a peer_ni, but it's still connecting... */ LASSERT(kiblnd_peer_connecting(peer2)); if (tx != NULL) list_add_tail(&tx->tx_list, @@ -1455,27 +1456,27 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) kiblnd_conn_decref(conn); /* ...to here */ } - kiblnd_peer_decref(peer); + kiblnd_peer_decref(peer_ni); return; } - /* Brand new peer */ - LASSERT (peer->ibp_connecting == 0); - peer->ibp_connecting = 1; + /* Brand new peer_ni */ + LASSERT (peer_ni->ibp_connecting == 0); + peer_ni->ibp_connecting = 1; /* always called with a ref on ni, which prevents ni being shutdown */ LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0); if (tx != NULL) - list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); + list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue); - kiblnd_peer_addref(peer); - list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); + kiblnd_peer_addref(peer_ni); + list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid)); write_unlock_irqrestore(g_lock, flags); - kiblnd_connect_peer(peer); - kiblnd_peer_decref(peer); + kiblnd_connect_peer(peer_ni); + kiblnd_peer_decref(peer_ni); } int @@ -1787,7 +1788,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, CERROR("Can't setup PUT sink for %s: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); kiblnd_tx_done(ni, tx); - /* tell peer it's over */ + /* tell peer_ni it's over */ kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc, rxmsg->ibm_u.putreq.ibprm_cookie); break; @@ -1844,15 +1845,15 @@ kiblnd_thread_fini (void) } static void -kiblnd_peer_alive (kib_peer_t *peer) +kiblnd_peer_alive (kib_peer_ni_t *peer_ni) { /* This is racy, but everyone's only writing cfs_time_current() */ - peer->ibp_last_alive = cfs_time_current(); + peer_ni->ibp_last_alive = cfs_time_current(); smp_mb(); } static void -kiblnd_peer_notify (kib_peer_t *peer) +kiblnd_peer_notify (kib_peer_ni_t *peer_ni) { int error = 0; cfs_time_t last_alive = 0; @@ -1860,18 +1861,18 @@ kiblnd_peer_notify (kib_peer_t *peer) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (kiblnd_peer_idle(peer) && peer->ibp_error != 0) { - error = peer->ibp_error; - peer->ibp_error = 0; + if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) { + error = peer_ni->ibp_error; + peer_ni->ibp_error = 0; - last_alive = peer->ibp_last_alive; + last_alive = peer_ni->ibp_last_alive; } read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (error != 0) - lnet_notify(peer->ibp_ni, - peer->ibp_nid, 0, last_alive); + lnet_notify(peer_ni->ibp_ni, + peer_ni->ibp_nid, 0, last_alive); } void @@ -1883,7 +1884,7 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) * connection to be finished off by the connd. Otherwise the connd is * already dealing with it (either to set it up or tear it down). * Caller holds kib_global_lock exclusively in irq context */ - kib_peer_t *peer = conn->ibc_peer; + kib_peer_ni_t *peer_ni = conn->ibc_peer; kib_dev_t *dev; unsigned long flags; @@ -1902,10 +1903,10 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) list_empty(&conn->ibc_tx_queue_nocred) && list_empty(&conn->ibc_active_txs)) { CDEBUG(D_NET, "closing conn to %s\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); } else { CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", - libcfs_nid2str(peer->ibp_nid), error, + libcfs_nid2str(peer_ni->ibp_nid), error, list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", list_empty(&conn->ibc_tx_queue_rsrvd) ? @@ -1915,16 +1916,16 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); } - dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev; + dev = ((kib_net_t *)peer_ni->ibp_ni->ni_data)->ibn_dev; list_del(&conn->ibc_list); /* connd (see below) takes over ibc_list's ref */ - if (list_empty(&peer->ibp_conns) && /* no more conns */ - kiblnd_peer_active(peer)) { /* still in peer table */ - kiblnd_unlink_peer_locked(peer); + if (list_empty(&peer_ni->ibp_conns) && /* no more conns */ + kiblnd_peer_active(peer_ni)) { /* still in peer_ni table */ + kiblnd_unlink_peer_locked(peer_ni); /* set/clear error on last conn */ - peer->ibp_error = conn->ibc_comms_error; + peer_ni->ibp_error = conn->ibc_comms_error; } kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING); @@ -2041,7 +2042,7 @@ kiblnd_finalise_conn (kib_conn_t *conn) } static void -kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) +kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error) { struct list_head zombies = LIST_HEAD_INIT(zombies); unsigned long flags; @@ -2052,52 +2053,52 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (active) { - LASSERT (peer->ibp_connecting > 0); - peer->ibp_connecting--; + LASSERT(peer_ni->ibp_connecting > 0); + peer_ni->ibp_connecting--; } else { - LASSERT (peer->ibp_accepting > 0); - peer->ibp_accepting--; + LASSERT (peer_ni->ibp_accepting > 0); + peer_ni->ibp_accepting--; } - if (kiblnd_peer_connecting(peer)) { + if (kiblnd_peer_connecting(peer_ni)) { /* another connection attempt under way... */ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); return; } - peer->ibp_reconnected = 0; - if (list_empty(&peer->ibp_conns)) { - /* Take peer's blocked transmits to complete with error */ - list_add(&zombies, &peer->ibp_tx_queue); - list_del_init(&peer->ibp_tx_queue); + peer_ni->ibp_reconnected = 0; + if (list_empty(&peer_ni->ibp_conns)) { + /* Take peer_ni's blocked transmits to complete with error */ + list_add(&zombies, &peer_ni->ibp_tx_queue); + list_del_init(&peer_ni->ibp_tx_queue); - if (kiblnd_peer_active(peer)) - kiblnd_unlink_peer_locked(peer); + if (kiblnd_peer_active(peer_ni)) + kiblnd_unlink_peer_locked(peer_ni); - peer->ibp_error = error; + peer_ni->ibp_error = error; } else { /* Can't have blocked transmits if there are connections */ - LASSERT(list_empty(&peer->ibp_tx_queue)); + LASSERT(list_empty(&peer_ni->ibp_tx_queue)); } write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - kiblnd_peer_notify(peer); + kiblnd_peer_notify(peer_ni); if (list_empty(&zombies)) return; CNETERR("Deleting messages for %s: connection failed\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); - kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH); + kiblnd_txlist_done(peer_ni->ibp_ni, &zombies, -EHOSTUNREACH); } static void kiblnd_connreq_done(kib_conn_t *conn, int status) { - kib_peer_t *peer = conn->ibc_peer; + kib_peer_ni_t *peer_ni = conn->ibc_peer; kib_tx_t *tx; struct list_head txs; unsigned long flags; @@ -2106,21 +2107,21 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n", - libcfs_nid2str(peer->ibp_nid), active, + libcfs_nid2str(peer_ni->ibp_nid), active, conn->ibc_version, status); LASSERT (!in_interrupt()); LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && - peer->ibp_connecting > 0) || + peer_ni->ibp_connecting > 0) || (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT && - peer->ibp_accepting > 0)); + peer_ni->ibp_accepting > 0)); LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); conn->ibc_connvars = NULL; if (status != 0) { /* failed to establish connection */ - kiblnd_peer_connect_failed(peer, active, status); + kiblnd_peer_connect_failed(peer_ni, active, status); kiblnd_finalise_conn(conn); return; } @@ -2130,38 +2131,38 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) conn->ibc_last_send = jiffies; kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); - kiblnd_peer_alive(peer); + kiblnd_peer_alive(peer_ni); - /* Add conn to peer's list and nuke any dangling conns from a different - * peer instance... */ + /* Add conn to peer_ni's list and nuke any dangling conns from a different + * peer_ni instance... */ kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ - list_add(&conn->ibc_list, &peer->ibp_conns); - peer->ibp_reconnected = 0; + list_add(&conn->ibc_list, &peer_ni->ibp_conns); + peer_ni->ibp_reconnected = 0; if (active) - peer->ibp_connecting--; + peer_ni->ibp_connecting--; else - peer->ibp_accepting--; + peer_ni->ibp_accepting--; - if (peer->ibp_version == 0) { - peer->ibp_version = conn->ibc_version; - peer->ibp_incarnation = conn->ibc_incarnation; + if (peer_ni->ibp_version == 0) { + peer_ni->ibp_version = conn->ibc_version; + peer_ni->ibp_incarnation = conn->ibc_incarnation; } - if (peer->ibp_version != conn->ibc_version || - peer->ibp_incarnation != conn->ibc_incarnation) { - kiblnd_close_stale_conns_locked(peer, conn->ibc_version, + if (peer_ni->ibp_version != conn->ibc_version || + peer_ni->ibp_incarnation != conn->ibc_incarnation) { + kiblnd_close_stale_conns_locked(peer_ni, conn->ibc_version, conn->ibc_incarnation); - peer->ibp_version = conn->ibc_version; - peer->ibp_incarnation = conn->ibc_incarnation; + peer_ni->ibp_version = conn->ibc_version; + peer_ni->ibp_incarnation = conn->ibc_incarnation; } /* grab pending txs while I have the lock */ - list_add(&txs, &peer->ibp_tx_queue); - list_del_init(&peer->ibp_tx_queue); + list_add(&txs, &peer_ni->ibp_tx_queue); + list_del_init(&peer_ni->ibp_tx_queue); - if (!kiblnd_peer_active(peer) || /* peer has been deleted */ + if (!kiblnd_peer_active(peer_ni) || /* peer_ni has been deleted */ conn->ibc_comms_error != 0) { /* error has happened already */ - lnet_ni_t *ni = peer->ibp_ni; + lnet_ni_t *ni = peer_ni->ibp_ni; /* start to shut down connection */ kiblnd_close_conn_locked(conn, -ECONNABORTED); @@ -2214,8 +2215,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) kib_msg_t *reqmsg = priv; kib_msg_t *ackmsg; kib_dev_t *ibdev; - kib_peer_t *peer; - kib_peer_t *peer2; + kib_peer_ni_t *peer_ni; + kib_peer_ni_t *peer2; kib_conn_t *conn; lnet_ni_t *ni = NULL; kib_net_t *net = NULL; @@ -2241,7 +2242,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) if (*kiblnd_tunables.kib_require_priv_port && ntohs(peer_addr->sin_port) >= PROT_SOCK) { __u32 ip = ntohl(peer_addr->sin_addr.s_addr); - CERROR("Peer's port (%pI4h:%hu) is not privileged\n", + CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n", &ip, ntohs(peer_addr->sin_port)); goto failed; } @@ -2255,7 +2256,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) * o2iblnd-specific protocol changes, or when LNET unifies * protocols over all LNDs, the initial connection will * negotiate a protocol version. I trap this here to avoid - * console errors; the reject tells the peer which protocol I + * console errors; the reject tells the peer_ni which protocol I * speak. */ if (reqmsg->ibm_magic == LNET_PROTO_MAGIC || reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC)) @@ -2304,7 +2305,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - /* I can accept peer's version */ + /* I can accept peer_ni's version */ version = reqmsg->ibm_version; if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) { @@ -2363,17 +2364,17 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - /* assume 'nid' is a new peer; create */ - rc = kiblnd_create_peer(ni, &peer, nid); + /* assume 'nid' is a new peer_ni; create */ + rc = kiblnd_create_peer(ni, &peer_ni, nid); if (rc != 0) { - CERROR("Can't create peer for %s\n", libcfs_nid2str(nid)); + CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid)); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } /* We have validated the peer's parameters so use those */ - peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags; - peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth; + peer_ni->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags; + peer_ni->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth; write_lock_irqsave(g_lock, flags); @@ -2399,10 +2400,10 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) libcfs_nid2str(nid), peer2->ibp_version, version, peer2->ibp_incarnation, reqmsg->ibm_srcstamp); - kiblnd_peer_decref(peer); - rej.ibr_why = IBLND_REJECT_CONN_STALE; - goto failed; - } + kiblnd_peer_decref(peer_ni); + rej.ibr_why = IBLND_REJECT_CONN_STALE; + goto failed; + } /* Tie-break connection race in favour of the higher NID. * If we keep running into a race condition multiple times, @@ -2420,7 +2421,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) CDEBUG(D_NET, "Conn race %s\n", libcfs_nid2str(peer2->ibp_nid)); - kiblnd_peer_decref(peer); + kiblnd_peer_decref(peer_ni); rej.ibr_why = IBLND_REJECT_CONN_RACE; goto failed; } @@ -2429,7 +2430,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) libcfs_nid2str(peer2->ibp_nid), MAX_CONN_RACES_BEFORE_ABORT); /* - * passive connection is allowed even this peer is waiting for + * passive connection is allowed even this peer_ni is waiting for * reconnection. */ peer2->ibp_reconnecting = 0; @@ -2437,38 +2438,38 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) peer2->ibp_accepting++; kiblnd_peer_addref(peer2); - /* Race with kiblnd_launch_tx (active connect) to create peer + /* Race with kiblnd_launch_tx (active connect) to create peer_ni * so copy validated parameters since we now know what the - * peer's limits are */ - peer2->ibp_max_frags = peer->ibp_max_frags; - peer2->ibp_queue_depth = peer->ibp_queue_depth; + * peer_ni's limits are */ + peer2->ibp_max_frags = peer_ni->ibp_max_frags; + peer2->ibp_queue_depth = peer_ni->ibp_queue_depth; write_unlock_irqrestore(g_lock, flags); - kiblnd_peer_decref(peer); - peer = peer2; + kiblnd_peer_decref(peer_ni); + peer_ni = peer2; } else { - /* Brand new peer */ - LASSERT (peer->ibp_accepting == 0); - LASSERT (peer->ibp_version == 0 && - peer->ibp_incarnation == 0); + /* Brand new peer_ni */ + LASSERT (peer_ni->ibp_accepting == 0); + LASSERT (peer_ni->ibp_version == 0 && + peer_ni->ibp_incarnation == 0); - peer->ibp_accepting = 1; - peer->ibp_version = version; - peer->ibp_incarnation = reqmsg->ibm_srcstamp; + peer_ni->ibp_accepting = 1; + peer_ni->ibp_version = version; + peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp; /* I have a ref on ni that prevents it being shutdown */ LASSERT (net->ibn_shutdown == 0); - kiblnd_peer_addref(peer); - list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); + kiblnd_peer_addref(peer_ni); + list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid)); write_unlock_irqrestore(g_lock, flags); } - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); + conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT, version); if (conn == NULL) { - kiblnd_peer_connect_failed(peer, 0, -ENOMEM); - kiblnd_peer_decref(peer); + kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM); + kiblnd_peer_decref(peer_ni); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } @@ -2536,7 +2537,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, __u64 incarnation, int why, kib_connparams_t *cp) { rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_peer_t *peer = conn->ibc_peer; + kib_peer_ni_t *peer_ni = conn->ibc_peer; char *reason; int msg_size = IBLND_MSG_SIZE; int frag_num = -1; @@ -2545,8 +2546,8 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, unsigned long flags; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); - LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ - LASSERT(!peer->ibp_reconnecting); + LASSERT(peer_ni->ibp_connecting > 0); /* 'conn' at least */ + LASSERT(!peer_ni->ibp_reconnecting); if (cp) { msg_size = cp->ibcp_max_msg_size; @@ -2560,10 +2561,10 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, * NB: reconnect is still needed even when ibp_tx_queue is * empty if ibp_version != version because reconnect may be * initiated by kiblnd_query() */ - reconnect = (!list_empty(&peer->ibp_tx_queue) || - peer->ibp_version != version) && - peer->ibp_connecting == 1 && - peer->ibp_accepting == 0; + reconnect = (!list_empty(&peer_ni->ibp_tx_queue) || + peer_ni->ibp_version != version) && + peer_ni->ibp_connecting == 1 && + peer_ni->ibp_accepting == 0; if (!reconnect) { reason = "no need"; goto out; @@ -2581,7 +2582,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, reason = "can't negotiate max frags"; goto out; } - tunables = &peer->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; + tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; if (!tunables->lnd_map_on_demand) { reason = "map_on_demand must be enabled"; goto out; @@ -2591,7 +2592,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, goto out; } - peer->ibp_max_frags = frag_num; + peer_ni->ibp_max_frags = frag_num; reason = "rdma fragments"; break; } @@ -2605,7 +2606,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, goto out; } - peer->ibp_queue_depth = queue_dep; + peer_ni->ibp_queue_depth = queue_dep; reason = "queue depth"; break; @@ -2623,21 +2624,21 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, } conn->ibc_reconnect = 1; - peer->ibp_reconnecting = 1; - peer->ibp_version = version; + peer_ni->ibp_reconnecting = 1; + peer_ni->ibp_version = version; if (incarnation != 0) - peer->ibp_incarnation = incarnation; + peer_ni->ibp_incarnation = incarnation; out: write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), reconnect ? "reconnect" : "don't reconnect", reason, IBLND_MSG_VERSION, version, msg_size, conn->ibc_queue_depth, queue_dep, conn->ibc_max_frags, frag_num); /* - * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer + * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer_ni * while destroying the zombie */ } @@ -2645,7 +2646,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, static void kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; + kib_peer_ni_t *peer_ni = conn->ibc_peer; LASSERT (!in_interrupt()); LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); @@ -2658,7 +2659,7 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) case IB_CM_REJ_INVALID_SERVICE_ID: CNETERR("%s rejected: no listener at %d\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), *kiblnd_tunables.kib_service); break; @@ -2674,7 +2675,7 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) * b) V2 will provide incarnation while rejecting me, * -1 will be overwrote. * - * if I try to connect to a V1 peer with V2 protocol, + * if I try to connect to a V1 peer_ni with V2 protocol, * it rejected me then upgrade to V2, I have no idea * about the upgrading and try to reconnect with V1, * in this case upgraded V2 can find out I'm trying to @@ -2708,22 +2709,22 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) if (rej->ibr_magic != IBLND_MSG_MAGIC && rej->ibr_magic != LNET_PROTO_MAGIC) { CERROR("%s rejected: consumer defined fatal error\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); break; } if (rej->ibr_version != IBLND_MSG_VERSION && rej->ibr_version != IBLND_MSG_VERSION_1) { CERROR("%s rejected: o2iblnd version %x error\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version); break; } if (rej->ibr_why == IBLND_REJECT_FATAL && rej->ibr_version == IBLND_MSG_VERSION_1) { - CDEBUG(D_NET, "rejected by old version peer %s: %x\n", - libcfs_nid2str(peer->ibp_nid), rej->ibr_version); + CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n", + libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version); if (conn->ibc_version != IBLND_MSG_VERSION_1) rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT; @@ -2741,17 +2742,17 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) case IBLND_REJECT_NO_RESOURCES: CERROR("%s rejected: o2iblnd no resources\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); break; case IBLND_REJECT_FATAL: CERROR("%s rejected: o2iblnd fatal error\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); break; default: CERROR("%s rejected: o2iblnd reason %d\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_why); break; } @@ -2760,7 +2761,7 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) /* fall through */ default: CNETERR("%s rejected: reason %d, size %d\n", - libcfs_nid2str(peer->ibp_nid), reason, priv_nob); + libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob); break; } @@ -2770,8 +2771,8 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) static void kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; - lnet_ni_t *ni = peer->ibp_ni; + kib_peer_ni_t *peer_ni = conn->ibc_peer; + lnet_ni_t *ni = peer_ni->ibp_ni; kib_net_t *net = ni->ni_data; kib_msg_t *msg = priv; int ver = conn->ibc_version; @@ -2782,13 +2783,13 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) if (rc != 0) { CERROR("Can't unpack connack from %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); + libcfs_nid2str(peer_ni->ibp_nid), rc); goto failed; } if (msg->ibm_type != IBLND_MSG_CONNACK) { CERROR("Unexpected message %d from %s\n", - msg->ibm_type, libcfs_nid2str(peer->ibp_nid)); + msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid)); rc = -EPROTO; goto failed; } @@ -2796,7 +2797,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) if (ver != msg->ibm_version) { CERROR("%s replied version %x is different with " "requested version %x\n", - libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver); + libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver); rc = -EPROTO; goto failed; } @@ -2804,7 +2805,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) if (msg->ibm_u.connparams.ibcp_queue_depth > conn->ibc_queue_depth) { CERROR("%s has incompatible queue depth %d (<=%d wanted)\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_u.connparams.ibcp_queue_depth, conn->ibc_queue_depth); rc = -EPROTO; @@ -2814,7 +2815,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) if (msg->ibm_u.connparams.ibcp_max_frags > conn->ibc_max_frags) { CERROR("%s has incompatible max_frags %d (<=%d wanted)\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_u.connparams.ibcp_max_frags, conn->ibc_max_frags); rc = -EPROTO; @@ -2823,7 +2824,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { CERROR("%s max message size %d too big (%d max)\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_u.connparams.ibcp_max_msg_size, IBLND_MSG_SIZE); rc = -EPROTO; @@ -2841,7 +2842,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) if (rc != 0) { CERROR("Bad connection reply from %s, rc = %d, " "version: %x max_frags: %d\n", - libcfs_nid2str(peer->ibp_nid), rc, + libcfs_nid2str(peer_ni->ibp_nid), rc, msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags); goto failed; } @@ -2871,7 +2872,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) static int kiblnd_active_connect (struct rdma_cm_id *cmid) { - kib_peer_t *peer = (kib_peer_t *)cmid->context; + kib_peer_ni_t *peer_ni = (kib_peer_ni_t *)cmid->context; kib_conn_t *conn; kib_msg_t *msg; struct rdma_conn_param cp; @@ -2882,23 +2883,23 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - incarnation = peer->ibp_incarnation; - version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : - peer->ibp_version; + incarnation = peer_ni->ibp_incarnation; + version = (peer_ni->ibp_version == 0) ? IBLND_MSG_VERSION : + peer_ni->ibp_version; read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, + conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_ACTIVE_CONNECT, version); if (conn == NULL) { - kiblnd_peer_connect_failed(peer, 1, -ENOMEM); - kiblnd_peer_decref(peer); /* lose cmid's ref */ + kiblnd_peer_connect_failed(peer_ni, 1, -ENOMEM); + kiblnd_peer_decref(peer_ni); /* lose cmid's ref */ return -ENOMEM; } /* conn "owns" cmid now, so I return success from here on to ensure the * CM callback doesn't destroy cmid. conn also takes over cmid's ref - * on peer */ + * on peer_ni */ msg = &conn->ibc_connvars->cv_msg; @@ -2908,8 +2909,8 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; - kiblnd_pack_msg(peer->ibp_ni, msg, version, - 0, peer->ibp_nid, incarnation); + kiblnd_pack_msg(peer_ni->ibp_ni, msg, version, + 0, peer_ni->ibp_nid, incarnation); memset(&cp, 0, sizeof(cp)); cp.private_data = msg; @@ -2926,7 +2927,7 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) rc = rdma_connect(cmid, &cp); if (rc != 0) { CERROR("Can't connect to %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); + libcfs_nid2str(peer_ni->ibp_nid), rc); kiblnd_connreq_done(conn, rc); kiblnd_conn_decref(conn); } @@ -2937,7 +2938,7 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) { - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; kib_conn_t *conn; int rc; @@ -2956,22 +2957,22 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return rc; case RDMA_CM_EVENT_ADDR_ERROR: - peer = (kib_peer_t *)cmid->context; + peer_ni = (kib_peer_ni_t *)cmid->context; CNETERR("%s: ADDR ERROR %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); - kiblnd_peer_decref(peer); + libcfs_nid2str(peer_ni->ibp_nid), event->status); + kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH); + kiblnd_peer_decref(peer_ni); return -EHOSTUNREACH; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_ADDR_RESOLVED: - peer = (kib_peer_t *)cmid->context; + peer_ni = (kib_peer_ni_t *)cmid->context; CDEBUG(D_NET,"%s Addr resolved: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer_ni->ibp_nid), event->status); if (event->status != 0) { CNETERR("Can't resolve address for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer_ni->ibp_nid), event->status); rc = event->status; } else { rc = rdma_resolve_route( @@ -2980,32 +2981,32 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return 0; /* Can't initiate route resolution */ CERROR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); + libcfs_nid2str(peer_ni->ibp_nid), rc); } - kiblnd_peer_connect_failed(peer, 1, rc); - kiblnd_peer_decref(peer); + kiblnd_peer_connect_failed(peer_ni, 1, rc); + kiblnd_peer_decref(peer_ni); return rc; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_ROUTE_ERROR: - peer = (kib_peer_t *)cmid->context; + peer_ni = (kib_peer_ni_t *)cmid->context; CNETERR("%s: ROUTE ERROR %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); - kiblnd_peer_decref(peer); + libcfs_nid2str(peer_ni->ibp_nid), event->status); + kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH); + kiblnd_peer_decref(peer_ni); return -EHOSTUNREACH; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_ROUTE_RESOLVED: - peer = (kib_peer_t *)cmid->context; + peer_ni = (kib_peer_ni_t *)cmid->context; CDEBUG(D_NET,"%s Route resolved: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer_ni->ibp_nid), event->status); if (event->status == 0) return kiblnd_active_connect(cmid); CNETERR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - kiblnd_peer_connect_failed(peer, 1, event->status); - kiblnd_peer_decref(peer); + libcfs_nid2str(peer_ni->ibp_nid), event->status); + kiblnd_peer_connect_failed(peer_ni, 1, event->status); + kiblnd_peer_decref(peer_ni); return event->status; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_UNREACHABLE: @@ -3148,7 +3149,7 @@ kiblnd_check_conns (int idx) struct list_head checksends = LIST_HEAD_INIT(checksends); struct list_head *peers = &kiblnd_data.kib_peers[idx]; struct list_head *ptmp; - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; kib_conn_t *conn; struct list_head *ctmp; unsigned long flags; @@ -3159,9 +3160,9 @@ kiblnd_check_conns (int idx) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); list_for_each(ptmp, peers) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); + peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list); - list_for_each(ctmp, &peer->ibp_conns) { + list_for_each(ctmp, &peer_ni->ibp_conns) { int timedout; int sendnoop; @@ -3181,9 +3182,9 @@ kiblnd_check_conns (int idx) if (timedout) { CERROR("Timed out RDMA with %s (%lu): " "c: %u, oc: %u, rc: %u\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), cfs_duration_sec(cfs_time_current() - - peer->ibp_last_alive), + peer_ni->ibp_last_alive), conn->ibc_credits, conn->ibc_outstanding_credits, conn->ibc_reserved_credits); @@ -3241,7 +3242,7 @@ kiblnd_disconnect_conn (kib_conn_t *conn) } /* - * High-water for reconnection to the same peer, reconnection attempt should + * High-water for reconnection to the same peer_ni, reconnection attempt should * be delayed after trying more than KIB_RECONN_HIGH_RACE. */ #define KIB_RECONN_HIGH_RACE 10 @@ -3277,27 +3278,27 @@ kiblnd_connd (void *arg) dropped_lock = 0; if (!list_empty(&kiblnd_data.kib_connd_zombies)) { - kib_peer_t *peer = NULL; + kib_peer_ni_t *peer_ni = NULL; conn = list_entry(kiblnd_data.kib_connd_zombies.next, kib_conn_t, ibc_list); list_del(&conn->ibc_list); if (conn->ibc_reconnect) { - peer = conn->ibc_peer; - kiblnd_peer_addref(peer); + peer_ni = conn->ibc_peer; + kiblnd_peer_addref(peer_ni); } spin_unlock_irqrestore(lock, flags); dropped_lock = 1; - kiblnd_destroy_conn(conn, !peer); + kiblnd_destroy_conn(conn, !peer_ni); spin_lock_irqsave(lock, flags); - if (!peer) + if (!peer_ni) continue; - conn->ibc_peer = peer; - if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) + conn->ibc_peer = peer_ni; + if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE) list_add_tail(&conn->ibc_list, &kiblnd_data.kib_reconn_list); else @@ -3356,7 +3357,7 @@ kiblnd_connd (void *arg) /* Time to check for RDMA timeouts on a few more * peers: I do checks every 'p' seconds on a - * proportion of the peer table and I need to check + * proportion of the peer_ni table and I need to check * every connection 'n' times within a timeout * interval, to ensure I detect a timeout on any * connection within (n+1)/n times the timeout diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index be5545e..f27a8ce 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -97,42 +97,42 @@ ksocknal_destroy_route (ksock_route_t *route) } static int -ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) +ksocknal_create_peer(ksock_peer_ni_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) { int cpt = lnet_cpt_of_nid(id.nid, ni); ksock_net_t *net = ni->ni_data; - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; LASSERT(id.nid != LNET_NID_ANY); LASSERT(id.pid != LNET_PID_ANY); LASSERT(!in_interrupt()); - LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); - if (peer == NULL) + LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni)); + if (peer_ni == NULL) return -ENOMEM; - peer->ksnp_ni = ni; - peer->ksnp_id = id; - atomic_set(&peer->ksnp_refcount, 1); /* 1 ref for caller */ - peer->ksnp_closing = 0; - peer->ksnp_accepting = 0; - peer->ksnp_proto = NULL; - peer->ksnp_last_alive = 0; - peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; - - INIT_LIST_HEAD(&peer->ksnp_conns); - INIT_LIST_HEAD(&peer->ksnp_routes); - INIT_LIST_HEAD(&peer->ksnp_tx_queue); - INIT_LIST_HEAD(&peer->ksnp_zc_req_list); - spin_lock_init(&peer->ksnp_lock); + peer_ni->ksnp_ni = ni; + peer_ni->ksnp_id = id; + atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */ + peer_ni->ksnp_closing = 0; + peer_ni->ksnp_accepting = 0; + peer_ni->ksnp_proto = NULL; + peer_ni->ksnp_last_alive = 0; + peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; + + INIT_LIST_HEAD(&peer_ni->ksnp_conns); + INIT_LIST_HEAD(&peer_ni->ksnp_routes); + INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue); + INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list); + spin_lock_init(&peer_ni->ksnp_lock); spin_lock_bh(&net->ksnn_lock); if (net->ksnn_shutdown) { spin_unlock_bh(&net->ksnn_lock); - LIBCFS_FREE(peer, sizeof(*peer)); - CERROR("Can't create peer: network shutdown\n"); + LIBCFS_FREE(peer_ni, sizeof(*peer_ni)); + CERROR("Can't create peer_ni: network shutdown\n"); return -ESHUTDOWN; } @@ -140,106 +140,106 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) spin_unlock_bh(&net->ksnn_lock); - *peerp = peer; + *peerp = peer_ni; return 0; } void -ksocknal_destroy_peer (ksock_peer_t *peer) +ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni) { - ksock_net_t *net = peer->ksnp_ni->ni_data; + ksock_net_t *net = peer_ni->ksnp_ni->ni_data; - CDEBUG (D_NET, "peer %s %p deleted\n", - libcfs_id2str(peer->ksnp_id), peer); + CDEBUG (D_NET, "peer_ni %s %p deleted\n", + libcfs_id2str(peer_ni->ksnp_id), peer_ni); - LASSERT(atomic_read(&peer->ksnp_refcount) == 0); - LASSERT(peer->ksnp_accepting == 0); - LASSERT(list_empty(&peer->ksnp_conns)); - LASSERT(list_empty(&peer->ksnp_routes)); - LASSERT(list_empty(&peer->ksnp_tx_queue)); - LASSERT(list_empty(&peer->ksnp_zc_req_list)); + LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0); + LASSERT(peer_ni->ksnp_accepting == 0); + LASSERT(list_empty(&peer_ni->ksnp_conns)); + LASSERT(list_empty(&peer_ni->ksnp_routes)); + LASSERT(list_empty(&peer_ni->ksnp_tx_queue)); + LASSERT(list_empty(&peer_ni->ksnp_zc_req_list)); - LIBCFS_FREE(peer, sizeof(*peer)); + LIBCFS_FREE(peer_ni, sizeof(*peer_ni)); - /* NB a peer's connections and routes keep a reference on their peer + /* NB a peer_ni's connections and routes keep a reference on their peer_ni * until they are destroyed, so we can be assured that _all_ state to - * do with this peer has been cleaned up when its refcount drops to + * do with this peer_ni has been cleaned up when its refcount drops to * zero. */ spin_lock_bh(&net->ksnn_lock); net->ksnn_npeers--; spin_unlock_bh(&net->ksnn_lock); } -ksock_peer_t * +ksock_peer_ni_t * ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id) { struct list_head *peer_list = ksocknal_nid2peerlist(id.nid); struct list_head *tmp; - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, ksock_peer_t, ksnp_list); + peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list); - LASSERT(!peer->ksnp_closing); + LASSERT(!peer_ni->ksnp_closing); - if (peer->ksnp_ni != ni) + if (peer_ni->ksnp_ni != ni) continue; - if (peer->ksnp_id.nid != id.nid || - peer->ksnp_id.pid != id.pid) + if (peer_ni->ksnp_id.nid != id.nid || + peer_ni->ksnp_id.pid != id.pid) continue; - CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n", - peer, libcfs_id2str(id), - atomic_read(&peer->ksnp_refcount)); - return peer; + CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n", + peer_ni, libcfs_id2str(id), + atomic_read(&peer_ni->ksnp_refcount)); + return peer_ni; } return NULL; } -ksock_peer_t * +ksock_peer_ni_t * ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id) { - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; read_lock(&ksocknal_data.ksnd_global_lock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) /* +1 ref for caller? */ - ksocknal_peer_addref(peer); + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) /* +1 ref for caller? */ + ksocknal_peer_addref(peer_ni); read_unlock(&ksocknal_data.ksnd_global_lock); - return (peer); + return (peer_ni); } static void -ksocknal_unlink_peer_locked (ksock_peer_t *peer) +ksocknal_unlink_peer_locked (ksock_peer_ni_t *peer_ni) { int i; __u32 ip; ksock_interface_t *iface; - for (i = 0; i < peer->ksnp_n_passive_ips; i++) { + for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) { LASSERT (i < LNET_MAX_INTERFACES); - ip = peer->ksnp_passive_ips[i]; + ip = peer_ni->ksnp_passive_ips[i]; - iface = ksocknal_ip2iface(peer->ksnp_ni, ip); - /* All IPs in peer->ksnp_passive_ips[] come from the + iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip); + /* All IPs in peer_ni->ksnp_passive_ips[] come from the * interface list, therefore the call must succeed. */ LASSERT (iface != NULL); - CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n", - peer, iface, iface->ksni_nroutes); + CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n", + peer_ni, iface, iface->ksni_nroutes); iface->ksni_npeers--; } - LASSERT(list_empty(&peer->ksnp_conns)); - LASSERT(list_empty(&peer->ksnp_routes)); - LASSERT(!peer->ksnp_closing); - peer->ksnp_closing = 1; - list_del(&peer->ksnp_list); + LASSERT(list_empty(&peer_ni->ksnp_conns)); + LASSERT(list_empty(&peer_ni->ksnp_routes)); + LASSERT(!peer_ni->ksnp_closing); + peer_ni->ksnp_closing = 1; + list_del(&peer_ni->ksnp_list); /* lose peerlist's ref */ - ksocknal_peer_decref(peer); + ksocknal_peer_decref(peer_ni); } static int @@ -247,7 +247,7 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index, lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, int *port, int *conn_count, int *share_count) { - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; struct list_head *ptmp; ksock_route_t *route; struct list_head *rtmp; @@ -259,17 +259,17 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index, for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, ksnp_list); + peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list); - if (peer->ksnp_ni != ni) + if (peer_ni->ksnp_ni != ni) continue; - if (peer->ksnp_n_passive_ips == 0 && - list_empty(&peer->ksnp_routes)) { + if (peer_ni->ksnp_n_passive_ips == 0 && + list_empty(&peer_ni->ksnp_routes)) { if (index-- > 0) continue; - *id = peer->ksnp_id; + *id = peer_ni->ksnp_id; *myip = 0; *peer_ip = 0; *port = 0; @@ -279,12 +279,12 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index, goto out; } - for (j = 0; j < peer->ksnp_n_passive_ips; j++) { + for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) { if (index-- > 0) continue; - *id = peer->ksnp_id; - *myip = peer->ksnp_passive_ips[j]; + *id = peer_ni->ksnp_id; + *myip = peer_ni->ksnp_passive_ips[j]; *peer_ip = 0; *port = 0; *conn_count = 0; @@ -293,14 +293,14 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index, goto out; } - list_for_each(rtmp, &peer->ksnp_routes) { + list_for_each(rtmp, &peer_ni->ksnp_routes) { if (index-- > 0) continue; route = list_entry(rtmp, ksock_route_t, ksnr_list); - *id = peer->ksnp_id; + *id = peer_ni->ksnp_id; *myip = route->ksnr_myipaddr; *peer_ip = route->ksnr_ipaddr; *port = route->ksnr_port; @@ -319,7 +319,7 @@ out: static void ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) { - ksock_peer_t *peer = route->ksnr_peer; + ksock_peer_ni_t *peer_ni = route->ksnr_peer; int type = conn->ksnc_type; ksock_interface_t *iface; @@ -330,12 +330,12 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) if (route->ksnr_myipaddr == 0) { /* route wasn't bound locally yet (the initial route) */ CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &route->ksnr_ipaddr, &conn->ksnc_myipaddr); } else { CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h " - "to %pI4h\n", libcfs_id2str(peer->ksnp_id), + "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id), &route->ksnr_ipaddr, &route->ksnr_myipaddr, &conn->ksnc_myipaddr); @@ -361,36 +361,36 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) } static void -ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route) +ksocknal_add_route_locked (ksock_peer_ni_t *peer_ni, ksock_route_t *route) { struct list_head *tmp; ksock_conn_t *conn; ksock_route_t *route2; - LASSERT(!peer->ksnp_closing); + LASSERT(!peer_ni->ksnp_closing); LASSERT(route->ksnr_peer == NULL); LASSERT(!route->ksnr_scheduled); LASSERT(!route->ksnr_connecting); LASSERT(route->ksnr_connected == 0); /* LASSERT(unique) */ - list_for_each(tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer_ni->ksnp_routes) { route2 = list_entry(tmp, ksock_route_t, ksnr_list); if (route2->ksnr_ipaddr == route->ksnr_ipaddr) { CERROR("Duplicate route %s %pI4h\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &route->ksnr_ipaddr); LBUG(); } } - route->ksnr_peer = peer; - ksocknal_peer_addref(peer); - /* peer's routelist takes over my ref on 'route' */ - list_add_tail(&route->ksnr_list, &peer->ksnp_routes); + route->ksnr_peer = peer_ni; + ksocknal_peer_addref(peer_ni); + /* peer_ni's routelist takes over my ref on 'route' */ + list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes); - list_for_each(tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer_ni->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); if (conn->ksnc_ipaddr != route->ksnr_ipaddr) @@ -404,7 +404,7 @@ ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route) static void ksocknal_del_route_locked (ksock_route_t *route) { - ksock_peer_t *peer = route->ksnr_peer; + ksock_peer_ni_t *peer_ni = route->ksnr_peer; ksock_interface_t *iface; ksock_conn_t *conn; struct list_head *ctmp; @@ -413,7 +413,7 @@ ksocknal_del_route_locked (ksock_route_t *route) LASSERT(!route->ksnr_deleted); /* Close associated conns */ - list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { + list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) { conn = list_entry(ctmp, ksock_conn_t, ksnc_list); if (conn->ksnc_route != route) @@ -431,13 +431,13 @@ ksocknal_del_route_locked (ksock_route_t *route) route->ksnr_deleted = 1; list_del(&route->ksnr_list); - ksocknal_route_decref(route); /* drop peer's ref */ + ksocknal_route_decref(route); /* drop peer_ni's ref */ - if (list_empty(&peer->ksnp_routes) && - list_empty(&peer->ksnp_conns)) { - /* I've just removed the last route to a peer with no active + if (list_empty(&peer_ni->ksnp_routes) && + list_empty(&peer_ni->ksnp_conns)) { + /* I've just removed the last route to a peer_ni with no active * connections */ - ksocknal_unlink_peer_locked(peer); + ksocknal_unlink_peer_locked(peer_ni); } } @@ -445,8 +445,8 @@ int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) { struct list_head *tmp; - ksock_peer_t *peer; - ksock_peer_t *peer2; + ksock_peer_ni_t *peer_ni; + ksock_peer_ni_t *peer2; ksock_route_t *route; ksock_route_t *route2; int rc; @@ -455,14 +455,14 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) id.pid == LNET_PID_ANY) return (-EINVAL); - /* Have a brand new peer ready... */ - rc = ksocknal_create_peer(&peer, ni, id); + /* Have a brand new peer_ni ready... */ + rc = ksocknal_create_peer(&peer_ni, ni, id); if (rc != 0) return rc; route = ksocknal_create_route (ipaddr, port); if (route == NULL) { - ksocknal_peer_decref(peer); + ksocknal_peer_decref(peer_ni); return (-ENOMEM); } @@ -473,16 +473,16 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) peer2 = ksocknal_find_peer_locked(ni, id); if (peer2 != NULL) { - ksocknal_peer_decref(peer); - peer = peer2; + ksocknal_peer_decref(peer_ni); + peer_ni = peer2; } else { - /* peer table takes my ref on peer */ - list_add_tail(&peer->ksnp_list, + /* peer_ni table takes my ref on peer_ni */ + list_add_tail(&peer_ni->ksnp_list, ksocknal_nid2peerlist(id.nid)); } route2 = NULL; - list_for_each(tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer_ni->ksnp_routes) { route2 = list_entry(tmp, ksock_route_t, ksnr_list); if (route2->ksnr_ipaddr == ipaddr) @@ -491,7 +491,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) route2 = NULL; } if (route2 == NULL) { - ksocknal_add_route_locked(peer, route); + ksocknal_add_route_locked(peer_ni, route); route->ksnr_share_count++; } else { ksocknal_route_decref(route); @@ -504,7 +504,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) } static void -ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) +ksocknal_del_peer_locked (ksock_peer_ni_t *peer_ni, __u32 ip) { ksock_conn_t *conn; ksock_route_t *route; @@ -512,12 +512,12 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) struct list_head *nxt; int nshared; - LASSERT(!peer->ksnp_closing); + LASSERT(!peer_ni->ksnp_closing); - /* Extra ref prevents peer disappearing until I'm done with it */ - ksocknal_peer_addref(peer); + /* Extra ref prevents peer_ni disappearing until I'm done with it */ + ksocknal_peer_addref(peer_ni); - list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { + list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); /* no match */ @@ -530,7 +530,7 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) } nshared = 0; - list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { + list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); nshared += route->ksnr_share_count; } @@ -539,7 +539,7 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) /* remove everything else if there are no explicit entries * left */ - list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { + list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); /* we should only be removing auto-entries */ @@ -547,15 +547,15 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) ksocknal_del_route_locked(route); } - list_for_each_safe(tmp, nxt, &peer->ksnp_conns) { + list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); ksocknal_close_conn_locked(conn, 0); } } - ksocknal_peer_decref(peer); - /* NB peer unlinks itself when last conn/route is removed */ + ksocknal_peer_decref(peer_ni); + /* NB peer_ni unlinks itself when last conn/route is removed */ } static int @@ -564,7 +564,7 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) struct list_head zombies = LIST_HEAD_INIT(zombies); struct list_head *ptmp; struct list_head *pnxt; - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; int lo; int hi; int i; @@ -584,31 +584,31 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, ksnp_list); + peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list); - if (peer->ksnp_ni != ni) + if (peer_ni->ksnp_ni != ni) continue; if (!((id.nid == LNET_NID_ANY || - peer->ksnp_id.nid == id.nid) && + peer_ni->ksnp_id.nid == id.nid) && (id.pid == LNET_PID_ANY || - peer->ksnp_id.pid == id.pid))) + peer_ni->ksnp_id.pid == id.pid))) continue; - ksocknal_peer_addref(peer); /* a ref for me... */ + ksocknal_peer_addref(peer_ni); /* a ref for me... */ - ksocknal_del_peer_locked(peer, ip); + ksocknal_del_peer_locked(peer_ni, ip); - if (peer->ksnp_closing && - !list_empty(&peer->ksnp_tx_queue)) { - LASSERT(list_empty(&peer->ksnp_conns)); - LASSERT(list_empty(&peer->ksnp_routes)); + if (peer_ni->ksnp_closing && + !list_empty(&peer_ni->ksnp_tx_queue)) { + LASSERT(list_empty(&peer_ni->ksnp_conns)); + LASSERT(list_empty(&peer_ni->ksnp_routes)); - list_splice_init(&peer->ksnp_tx_queue, + list_splice_init(&peer_ni->ksnp_tx_queue, &zombies); } - ksocknal_peer_decref(peer); /* ...till here */ + ksocknal_peer_decref(peer_ni); /* ...till here */ rc = 0; /* matched! */ } @@ -624,7 +624,7 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) static ksock_conn_t * ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index) { - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; struct list_head *ptmp; ksock_conn_t *conn; struct list_head *ctmp; @@ -634,14 +634,14 @@ ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index) for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, ksnp_list); + peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list); - LASSERT(!peer->ksnp_closing); + LASSERT(!peer_ni->ksnp_closing); - if (peer->ksnp_ni != ni) + if (peer_ni->ksnp_ni != ni) continue; - list_for_each(ctmp, &peer->ksnp_conns) { + list_for_each(ctmp, &peer_ni->ksnp_conns) { if (index-- > 0) continue; @@ -743,10 +743,10 @@ ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips) } static int -ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) +ksocknal_select_ips(ksock_peer_ni_t *peer_ni, __u32 *peerips, int n_peerips) { rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - ksock_net_t *net = peer->ksnp_ni->ni_data; + ksock_net_t *net = peer_ni->ksnp_ni->ni_data; ksock_interface_t *iface; ksock_interface_t *best_iface; int n_ips; @@ -777,25 +777,25 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) n_ips = (net->ksnn_ninterfaces < 2) ? 0 : MIN(n_peerips, net->ksnn_ninterfaces); - for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) { + for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) { /* ^ yes really... */ /* If we have any new interfaces, first tick off all the - * peer IPs that match old interfaces, then choose new - * interfaces to match the remaining peer IPS. + * peer_ni IPs that match old interfaces, then choose new + * interfaces to match the remaining peer_ni IPS. * We don't forget interfaces we've stopped using; we might * start using them again... */ - if (i < peer->ksnp_n_passive_ips) { + if (i < peer_ni->ksnp_n_passive_ips) { /* Old interface. */ - ip = peer->ksnp_passive_ips[i]; - best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip); + ip = peer_ni->ksnp_passive_ips[i]; + best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip); - /* peer passive ips are kept up to date */ + /* peer_ni passive ips are kept up to date */ LASSERT(best_iface != NULL); } else { /* choose a new interface */ - LASSERT (i == peer->ksnp_n_passive_ips); + LASSERT (i == peer_ni->ksnp_n_passive_ips); best_iface = NULL; best_netmatch = 0; @@ -805,11 +805,11 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) iface = &net->ksnn_interfaces[j]; ip = iface->ksni_ipaddr; - for (k = 0; k < peer->ksnp_n_passive_ips; k++) - if (peer->ksnp_passive_ips[k] == ip) + for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++) + if (peer_ni->ksnp_passive_ips[k] == ip) break; - if (k < peer->ksnp_n_passive_ips) /* using it already */ + if (k < peer_ni->ksnp_n_passive_ips) /* using it already */ continue; k = ksocknal_match_peerip(iface, peerips, n_peerips); @@ -831,17 +831,17 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) best_iface->ksni_npeers++; ip = best_iface->ksni_ipaddr; - peer->ksnp_passive_ips[i] = ip; - peer->ksnp_n_passive_ips = i+1; + peer_ni->ksnp_passive_ips[i] = ip; + peer_ni->ksnp_n_passive_ips = i+1; } - /* mark the best matching peer IP used */ + /* mark the best matching peer_ni IP used */ j = ksocknal_match_peerip(best_iface, peerips, n_peerips); peerips[j] = 0; } - /* Overwrite input peer IP addresses */ - memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips)); + /* Overwrite input peer_ni IP addresses */ + memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips)); write_unlock_bh(global_lock); @@ -849,12 +849,12 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) } static void -ksocknal_create_routes(ksock_peer_t *peer, int port, +ksocknal_create_routes(ksock_peer_ni_t *peer_ni, int port, __u32 *peer_ipaddrs, int npeer_ipaddrs) { ksock_route_t *newroute = NULL; rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - lnet_ni_t *ni = peer->ksnp_ni; + lnet_ni_t *ni = peer_ni->ksnp_ni; ksock_net_t *net = ni->ni_data; struct list_head *rtmp; ksock_route_t *route; @@ -895,14 +895,14 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, write_lock_bh(global_lock); } - if (peer->ksnp_closing) { - /* peer got closed under me */ + if (peer_ni->ksnp_closing) { + /* peer_ni got closed under me */ break; } /* Already got a route? */ route = NULL; - list_for_each(rtmp, &peer->ksnp_routes) { + list_for_each(rtmp, &peer_ni->ksnp_routes) { route = list_entry(rtmp, ksock_route_t, ksnr_list); if (route->ksnr_ipaddr == newroute->ksnr_ipaddr) @@ -924,7 +924,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, iface = &net->ksnn_interfaces[j]; /* Using this interface already? */ - list_for_each(rtmp, &peer->ksnp_routes) { + list_for_each(rtmp, &peer_ni->ksnp_routes) { route = list_entry(rtmp, ksock_route_t, ksnr_list); @@ -957,7 +957,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, newroute->ksnr_myipaddr = best_iface->ksni_ipaddr; best_iface->ksni_nroutes++; - ksocknal_add_route_locked(peer, newroute); + ksocknal_add_route_locked(peer_ni, newroute); newroute = NULL; } @@ -998,11 +998,11 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock) } static int -ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr) +ksocknal_connecting (ksock_peer_ni_t *peer_ni, __u32 ipaddr) { ksock_route_t *route; - list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) { + list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) { if (route->ksnr_ipaddr == ipaddr) return route->ksnr_connecting; } @@ -1020,8 +1020,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, __u64 incarnation; ksock_conn_t *conn; ksock_conn_t *conn2; - ksock_peer_t *peer = NULL; - ksock_peer_t *peer2; + ksock_peer_ni_t *peer_ni = NULL; + ksock_peer_ni_t *peer2; ksock_sched_t *sched; struct ksock_hello_msg *hello; int cpt; @@ -1072,21 +1072,21 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, if (rc != 0) goto failed_1; - /* Find out/confirm peer's NID and connection type and get the + /* Find out/confirm peer_ni's NID and connection type and get the * vector of interfaces she's willing to let me connect to. - * Passive connections use the listener timeout since the peer sends + * Passive connections use the listener timeout since the peer_ni sends * eagerly */ if (active) { - peer = route->ksnr_peer; - LASSERT(ni == peer->ksnp_ni); + peer_ni = route->ksnr_peer; + LASSERT(ni == peer_ni->ksnp_ni); /* Active connection sends HELLO eagerly */ hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips); - peerid = peer->ksnp_id; + peerid = peer_ni->ksnp_id; write_lock_bh(global_lock); - conn->ksnc_proto = peer->ksnp_proto; + conn->ksnc_proto = peer_ni->ksnp_proto; write_unlock_bh(global_lock); if (conn->ksnc_proto == NULL) { @@ -1106,7 +1106,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, peerid.nid = LNET_NID_ANY; peerid.pid = LNET_PID_ANY; - /* Passive, get protocol from peer */ + /* Passive, get protocol from peer_ni */ conn->ksnc_proto = NULL; } @@ -1121,10 +1121,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, cpt = lnet_cpt_of_nid(peerid.nid, ni); if (active) { - ksocknal_peer_addref(peer); + ksocknal_peer_addref(peer_ni); write_lock_bh(global_lock); } else { - rc = ksocknal_create_peer(&peer, ni, peerid); + rc = ksocknal_create_peer(&peer_ni, ni, peerid); if (rc != 0) goto failed_1; @@ -1135,57 +1135,57 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, peer2 = ksocknal_find_peer_locked(ni, peerid); if (peer2 == NULL) { - /* NB this puts an "empty" peer in the peer + /* NB this puts an "empty" peer_ni in the peer_ni * table (which takes my ref) */ - list_add_tail(&peer->ksnp_list, + list_add_tail(&peer_ni->ksnp_list, ksocknal_nid2peerlist(peerid.nid)); } else { - ksocknal_peer_decref(peer); - peer = peer2; + ksocknal_peer_decref(peer_ni); + peer_ni = peer2; } /* +1 ref for me */ - ksocknal_peer_addref(peer); - peer->ksnp_accepting++; + ksocknal_peer_addref(peer_ni); + peer_ni->ksnp_accepting++; /* Am I already connecting to this guy? Resolve in * favour of higher NID... */ if (peerid.nid < ni->ni_nid && - ksocknal_connecting(peer, conn->ksnc_ipaddr)) { + ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) { rc = EALREADY; warn = "connection race resolution"; goto failed_2; } } - if (peer->ksnp_closing || + if (peer_ni->ksnp_closing || (active && route->ksnr_deleted)) { - /* peer/route got closed under me */ + /* peer_ni/route got closed under me */ rc = -ESTALE; - warn = "peer/route removed"; + warn = "peer_ni/route removed"; goto failed_2; } - if (peer->ksnp_proto == NULL) { + if (peer_ni->ksnp_proto == NULL) { /* Never connected before. - * NB recv_hello may have returned EPROTO to signal my peer + * NB recv_hello may have returned EPROTO to signal my peer_ni * wants a different protocol than the one I asked for. */ - LASSERT(list_empty(&peer->ksnp_conns)); + LASSERT(list_empty(&peer_ni->ksnp_conns)); - peer->ksnp_proto = conn->ksnc_proto; - peer->ksnp_incarnation = incarnation; + peer_ni->ksnp_proto = conn->ksnc_proto; + peer_ni->ksnp_incarnation = incarnation; } - if (peer->ksnp_proto != conn->ksnc_proto || - peer->ksnp_incarnation != incarnation) { - /* Peer rebooted or I've got the wrong protocol version */ - ksocknal_close_peer_conns_locked(peer, 0, 0); + if (peer_ni->ksnp_proto != conn->ksnc_proto || + peer_ni->ksnp_incarnation != incarnation) { + /* peer_ni rebooted or I've got the wrong protocol version */ + ksocknal_close_peer_conns_locked(peer_ni, 0, 0); - peer->ksnp_proto = NULL; + peer_ni->ksnp_proto = NULL; rc = ESTALE; - warn = peer->ksnp_incarnation != incarnation ? - "peer rebooted" : + warn = peer_ni->ksnp_incarnation != incarnation ? + "peer_ni rebooted" : "wrong proto version"; goto failed_2; } @@ -1206,7 +1206,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, /* Refuse to duplicate an existing connection, unless this is a * loopback connection */ if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) { - list_for_each(tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer_ni->ksnp_conns) { conn2 = list_entry(tmp, ksock_conn_t, ksnc_list); if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr || @@ -1214,7 +1214,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, conn2->ksnc_type != conn->ksnc_type) continue; - /* Reply on a passive connection attempt so the peer + /* Reply on a passive connection attempt so the peer_ni * realises we're connected. */ LASSERT (rc == 0); if (!active) @@ -1231,16 +1231,16 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, if (active && route->ksnr_ipaddr != conn->ksnc_ipaddr) { CERROR("Route %s %pI4h connected to %pI4h\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &route->ksnr_ipaddr, &conn->ksnc_ipaddr); } /* Search for a route corresponding to the new connection and * create an association. This allows incoming connections created - * by routes in my peer to match my own route entries so I don't + * by routes in my peer_ni to match my own route entries so I don't * continually create duplicate routes. */ - list_for_each(tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); if (route->ksnr_ipaddr != conn->ksnc_ipaddr) @@ -1250,10 +1250,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, break; } - conn->ksnc_peer = peer; /* conn takes my ref on peer */ - peer->ksnp_last_alive = ktime_get_real_seconds(); - peer->ksnp_send_keepalive = 0; - peer->ksnp_error = 0; + conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */ + peer_ni->ksnp_last_alive = ktime_get_real_seconds(); + peer_ni->ksnp_send_keepalive = 0; + peer_ni->ksnp_error = 0; sched = ksocknal_choose_scheduler_locked(cpt); sched->kss_nconns++; @@ -1263,9 +1263,9 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, /* Set the deadline for the outgoing HELLO to drain */ conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued; conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - smp_mb(); /* order with adding to peer's conn list */ + smp_mb(); /* order with adding to peer_ni's conn list */ - list_add(&conn->ksnc_list, &peer->ksnp_conns); + list_add(&conn->ksnc_list, &peer_ni->ksnp_conns); ksocknal_conn_addref(conn); ksocknal_new_packet(conn, 0); @@ -1273,7 +1273,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn); /* Take packets blocking for this connection. */ - list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) { + list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) { if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO) continue; @@ -1301,10 +1301,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, if (active) { /* additional routes after interface exchange? */ - ksocknal_create_routes(peer, conn->ksnc_port, + ksocknal_create_routes(peer_ni, conn->ksnc_port, hello->kshm_ips, hello->kshm_nips); } else { - hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips, + hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips, hello->kshm_nips); rc = ksocknal_send_hello(ni, conn, peerid.nid, hello); } @@ -1326,7 +1326,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, ksocknal_lib_set_callback(sock, conn); if (!active) - peer->ksnp_accepting--; + peer_ni->ksnp_accepting--; write_unlock_bh(global_lock); @@ -1349,12 +1349,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, return rc; failed_2: - if (!peer->ksnp_closing && - list_empty(&peer->ksnp_conns) && - list_empty(&peer->ksnp_routes)) { - list_add(&zombies, &peer->ksnp_tx_queue); - list_del_init(&peer->ksnp_tx_queue); - ksocknal_unlink_peer_locked(peer); + if (!peer_ni->ksnp_closing && + list_empty(&peer_ni->ksnp_conns) && + list_empty(&peer_ni->ksnp_routes)) { + list_add(&zombies, &peer_ni->ksnp_tx_queue); + list_del_init(&peer_ni->ksnp_tx_queue); + ksocknal_unlink_peer_locked(peer_ni); } write_unlock_bh(global_lock); @@ -1378,12 +1378,12 @@ failed_2: } write_lock_bh(global_lock); - peer->ksnp_accepting--; + peer_ni->ksnp_accepting--; write_unlock_bh(global_lock); } ksocknal_txlist_done(ni, &zombies, 1); - ksocknal_peer_decref(peer); + ksocknal_peer_decref(peer_ni); failed_1: if (hello != NULL) @@ -1403,16 +1403,16 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) /* This just does the immmediate housekeeping, and queues the * connection for the reaper to terminate. * Caller holds ksnd_global_lock exclusively in irq context */ - ksock_peer_t *peer = conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = conn->ksnc_peer; ksock_route_t *route; ksock_conn_t *conn2; struct list_head *tmp; - LASSERT(peer->ksnp_error == 0); + LASSERT(peer_ni->ksnp_error == 0); LASSERT(!conn->ksnc_closing); conn->ksnc_closing = 1; - /* ksnd_deathrow_conns takes over peer's ref */ + /* ksnd_deathrow_conns takes over peer_ni's ref */ list_del(&conn->ksnc_list); route = conn->ksnc_route; @@ -1422,7 +1422,7 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0); conn2 = NULL; - list_for_each(tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer_ni->ksnp_conns) { conn2 = list_entry(tmp, ksock_conn_t, ksnc_list); if (conn2->ksnc_route == route && @@ -1439,35 +1439,35 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) ksocknal_route_decref(route); /* drop conn's ref on route */ } - if (list_empty(&peer->ksnp_conns)) { - /* No more connections to this peer */ + if (list_empty(&peer_ni->ksnp_conns)) { + /* No more connections to this peer_ni */ - if (!list_empty(&peer->ksnp_tx_queue)) { + if (!list_empty(&peer_ni->ksnp_tx_queue)) { ksock_tx_t *tx; LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); /* throw them to the last connection..., * these TXs will be send to /dev/null by scheduler */ - list_for_each_entry(tx, &peer->ksnp_tx_queue, + list_for_each_entry(tx, &peer_ni->ksnp_tx_queue, tx_list) ksocknal_tx_prep(conn, tx); spin_lock_bh(&conn->ksnc_scheduler->kss_lock); - list_splice_init(&peer->ksnp_tx_queue, + list_splice_init(&peer_ni->ksnp_tx_queue, &conn->ksnc_tx_queue); spin_unlock_bh(&conn->ksnc_scheduler->kss_lock); } /* renegotiate protocol version */ - peer->ksnp_proto = NULL; + peer_ni->ksnp_proto = NULL; /* stash last conn close reason */ - peer->ksnp_error = error; + peer_ni->ksnp_error = error; - if (list_empty(&peer->ksnp_routes)) { + if (list_empty(&peer_ni->ksnp_routes)) { /* I've just closed last conn belonging to a - * peer with no routes to it */ - ksocknal_unlink_peer_locked(peer); + * peer_ni with no routes to it */ + ksocknal_unlink_peer_locked(peer_ni); } } @@ -1481,36 +1481,36 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) } void -ksocknal_peer_failed (ksock_peer_t *peer) +ksocknal_peer_failed (ksock_peer_ni_t *peer_ni) { int notify = 0; cfs_time_t last_alive = 0; /* There has been a connection failure or comms error; but I'll only - * tell LNET I think the peer is dead if it's to another kernel and + * tell LNET I think the peer_ni is dead if it's to another kernel and * there are no connections or connection attempts in existence. */ read_lock(&ksocknal_data.ksnd_global_lock); - if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 && - list_empty(&peer->ksnp_conns) && - peer->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer) == NULL) { + if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 && + list_empty(&peer_ni->ksnp_conns) && + peer_ni->ksnp_accepting == 0 && + ksocknal_find_connecting_route_locked(peer_ni) == NULL) { notify = 1; - last_alive = peer->ksnp_last_alive; + last_alive = peer_ni->ksnp_last_alive; } read_unlock(&ksocknal_data.ksnd_global_lock); if (notify) - lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0, + lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0, last_alive); } void ksocknal_finalize_zcreq(ksock_conn_t *conn) { - ksock_peer_t *peer = conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = conn->ksnc_peer; ksock_tx_t *tx; ksock_tx_t *tmp; struct list_head zlist = LIST_HEAD_INIT(zlist); @@ -1519,9 +1519,9 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) * abort all buffered data */ LASSERT(conn->ksnc_sock == NULL); - spin_lock(&peer->ksnp_lock); + spin_lock(&peer_ni->ksnp_lock); - list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) { + list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) { if (tx->tx_conn != conn) continue; @@ -1533,7 +1533,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) list_add(&tx->tx_zc_list, &zlist); } - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); while (!list_empty(&zlist)) { tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list); @@ -1550,7 +1550,7 @@ ksocknal_terminate_conn(ksock_conn_t *conn) * disengage the socket from its callbacks and close it. * ksnc_refcount will eventually hit zero, and then the reaper will * destroy it. */ - ksock_peer_t *peer = conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = conn->ksnc_peer; ksock_sched_t *sched = conn->ksnc_scheduler; int failed = 0; @@ -1584,17 +1584,17 @@ ksocknal_terminate_conn(ksock_conn_t *conn) * scheduler yet, but it _has_ committed to terminate... */ conn->ksnc_scheduler->kss_nconns--; - if (peer->ksnp_error != 0) { - /* peer's last conn closed in error */ - LASSERT(list_empty(&peer->ksnp_conns)); + if (peer_ni->ksnp_error != 0) { + /* peer_ni's last conn closed in error */ + LASSERT(list_empty(&peer_ni->ksnp_conns)); failed = 1; - peer->ksnp_error = 0; /* avoid multiple notifications */ + peer_ni->ksnp_error = 0; /* avoid multiple notifications */ } write_unlock_bh(&ksocknal_data.ksnd_global_lock); if (failed) - ksocknal_peer_failed(peer); + ksocknal_peer_failed(peer_ni); /* The socket is closed on the final put; either here, or in * ksocknal_{send,recv}msg(). Since we set up the linger2 option @@ -1684,14 +1684,14 @@ ksocknal_destroy_conn (ksock_conn_t *conn) } int -ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why) +ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why) { ksock_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; - list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { + list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) { conn = list_entry(ctmp, ksock_conn_t, ksnc_list); if (ipaddr == 0 || @@ -1707,13 +1707,13 @@ ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why) int ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why) { - ksock_peer_t *peer = conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = conn->ksnc_peer; __u32 ipaddr = conn->ksnc_ipaddr; int count; write_lock_bh(&ksocknal_data.ksnd_global_lock); - count = ksocknal_close_peer_conns_locked (peer, ipaddr, why); + count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why); write_unlock_bh(&ksocknal_data.ksnd_global_lock); @@ -1723,7 +1723,7 @@ ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why) int ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr) { - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; struct list_head *ptmp; struct list_head *pnxt; int lo; @@ -1743,13 +1743,13 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, ksnp_list); + peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list); - if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) && - (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid))) + if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) && + (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid))) continue; - count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0); + count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0); } } @@ -1791,7 +1791,7 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when) int connect = 1; time64_t last_alive = 0; time64_t now = ktime_get_real_seconds(); - ksock_peer_t *peer = NULL; + ksock_peer_ni_t *peer_ni = NULL; rwlock_t *glock = &ksocknal_data.ksnd_global_lock; lnet_process_id_t id = { .nid = nid, @@ -1800,13 +1800,13 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when) read_lock(glock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) { + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) { struct list_head *tmp; ksock_conn_t *conn; int bufnob; - list_for_each(tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer_ni->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); bufnob = conn->ksnc_sock->sk->sk_wmem_queued; @@ -1814,13 +1814,13 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when) /* something got ACKed */ conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - peer->ksnp_last_alive = now; + peer_ni->ksnp_last_alive = now; conn->ksnc_tx_bufnob = bufnob; } } - last_alive = peer->ksnp_last_alive; - if (ksocknal_find_connectable_route_locked(peer) == NULL) + last_alive = peer_ni->ksnp_last_alive; + if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) connect = 0; } @@ -1829,8 +1829,8 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when) if (last_alive != 0) *when = last_alive; - CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n", - libcfs_nid2str(nid), peer, + CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago, connect %d\n", + libcfs_nid2str(nid), peer_ni, last_alive ? cfs_duration_sec(now - last_alive) : -1, connect); @@ -1841,16 +1841,16 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when) write_lock_bh(glock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) - ksocknal_launch_all_connections_locked(peer); + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) + ksocknal_launch_all_connections_locked(peer_ni); write_unlock_bh(glock); return; } static void -ksocknal_push_peer (ksock_peer_t *peer) +ksocknal_push_peer (ksock_peer_ni_t *peer_ni) { int index; int i; @@ -1863,7 +1863,7 @@ ksocknal_push_peer (ksock_peer_t *peer) i = 0; conn = NULL; - list_for_each(tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer_ni->ksnp_conns) { if (i++ == index) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); @@ -1899,22 +1899,22 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id) } for (tmp = start; tmp <= end; tmp++) { - int peer_off; /* searching offset in peer hash table */ + int peer_off; /* searching offset in peer_ni hash table */ for (peer_off = 0; ; peer_off++) { - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; int i = 0; read_lock(&ksocknal_data.ksnd_global_lock); - list_for_each_entry(peer, tmp, ksnp_list) { + list_for_each_entry(peer_ni, tmp, ksnp_list) { if (!((id.nid == LNET_NID_ANY || - id.nid == peer->ksnp_id.nid) && + id.nid == peer_ni->ksnp_id.nid) && (id.pid == LNET_PID_ANY || - id.pid == peer->ksnp_id.pid))) + id.pid == peer_ni->ksnp_id.pid))) continue; if (i++ == peer_off) { - ksocknal_peer_addref(peer); + ksocknal_peer_addref(peer_ni); break; } } @@ -1924,8 +1924,8 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id) break; rc = 0; - ksocknal_push_peer(peer); - ksocknal_peer_decref(peer); + ksocknal_push_peer(peer_ni); + ksocknal_peer_decref(peer_ni); } } return rc; @@ -1940,7 +1940,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) int i; int j; struct list_head *ptmp; - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; struct list_head *rtmp; ksock_route_t *route; @@ -1966,14 +1966,14 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, + peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list); - for (j = 0; j < peer->ksnp_n_passive_ips; j++) - if (peer->ksnp_passive_ips[j] == ipaddress) + for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) + if (peer_ni->ksnp_passive_ips[j] == ipaddress) iface->ksni_npeers++; - list_for_each(rtmp, &peer->ksnp_routes) { + list_for_each(rtmp, &peer_ni->ksnp_routes) { route = list_entry(rtmp, ksock_route_t, ksnr_list); @@ -1994,7 +1994,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) } static void -ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) +ksocknal_peer_del_interface_locked(ksock_peer_ni_t *peer_ni, __u32 ipaddr) { struct list_head *tmp; struct list_head *nxt; @@ -2003,16 +2003,16 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) int i; int j; - for (i = 0; i < peer->ksnp_n_passive_ips; i++) - if (peer->ksnp_passive_ips[i] == ipaddr) { - for (j = i+1; j < peer->ksnp_n_passive_ips; j++) - peer->ksnp_passive_ips[j-1] = - peer->ksnp_passive_ips[j]; - peer->ksnp_n_passive_ips--; + for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) + if (peer_ni->ksnp_passive_ips[i] == ipaddr) { + for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++) + peer_ni->ksnp_passive_ips[j-1] = + peer_ni->ksnp_passive_ips[j]; + peer_ni->ksnp_n_passive_ips--; break; } - list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { + list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); if (route->ksnr_myipaddr != ipaddr) @@ -2026,7 +2026,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) } } - list_for_each_safe(tmp, nxt, &peer->ksnp_conns) { + list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); if (conn->ksnc_myipaddr == ipaddr) @@ -2041,7 +2041,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress) int rc = -ENOENT; struct list_head *tmp; struct list_head *nxt; - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; __u32 this_ip; int i; int j; @@ -2066,13 +2066,13 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress) for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) { list_for_each_safe(tmp, nxt, &ksocknal_data.ksnd_peers[j]) { - peer = list_entry(tmp, ksock_peer_t, + peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list); - if (peer->ksnp_ni != ni) + if (peer_ni->ksnp_ni != ni) continue; - ksocknal_peer_del_interface_locked(peer, this_ip); + ksocknal_peer_del_interface_locked(peer_ni, this_ip); } } } @@ -2495,7 +2495,7 @@ ksocknal_base_startup(void) static void ksocknal_debug_peerhash (lnet_ni_t *ni) { - ksock_peer_t *peer = NULL; + ksock_peer_ni_t *peer_ni = NULL; struct list_head *tmp; int i; @@ -2503,29 +2503,29 @@ ksocknal_debug_peerhash (lnet_ni_t *ni) for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(tmp, ksock_peer_t, ksnp_list); + peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list); - if (peer->ksnp_ni == ni) break; + if (peer_ni->ksnp_ni == ni) break; - peer = NULL; + peer_ni = NULL; } } - if (peer != NULL) { + if (peer_ni != NULL) { ksock_route_t *route; ksock_conn_t *conn; - CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, " + CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, " "closing %d, accepting %d, err %d, zcookie %llu, " - "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id), - atomic_read(&peer->ksnp_refcount), - peer->ksnp_sharecount, peer->ksnp_closing, - peer->ksnp_accepting, peer->ksnp_error, - peer->ksnp_zc_next_cookie, - !list_empty(&peer->ksnp_tx_queue), - !list_empty(&peer->ksnp_zc_req_list)); - - list_for_each(tmp, &peer->ksnp_routes) { + "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id), + atomic_read(&peer_ni->ksnp_refcount), + peer_ni->ksnp_sharecount, peer_ni->ksnp_closing, + peer_ni->ksnp_accepting, peer_ni->ksnp_error, + peer_ni->ksnp_zc_next_cookie, + !list_empty(&peer_ni->ksnp_tx_queue), + !list_empty(&peer_ni->ksnp_zc_req_list)); + + list_for_each(tmp, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, " "del %d\n", atomic_read(&route->ksnr_refcount), @@ -2533,7 +2533,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni) route->ksnr_connected, route->ksnr_deleted); } - list_for_each(tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer_ni->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); CWARN ("Conn: ref %d, sref %d, t %d, c %d\n", atomic_read(&conn->ksnc_conn_refcount), @@ -2566,7 +2566,7 @@ ksocknal_shutdown (lnet_ni_t *ni) /* Delete all peers */ ksocknal_del_peer(ni, anyid, 0); - /* Wait for all peer state to clean up */ + /* Wait for all peer_ni state to clean up */ i = 2; spin_lock_bh(&net->ksnn_lock); while (net->ksnn_npeers != 0) { diff --git a/lnet/klnds/socklnd/socklnd.h b/lnet/klnds/socklnd/socklnd.h index 6d38849..fd24314 100644 --- a/lnet/klnds/socklnd/socklnd.h +++ b/lnet/klnds/socklnd/socklnd.h @@ -71,7 +71,7 @@ #define SOCKNAL_NSCHEDS 3 #define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1) -#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */ +#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer_ni lists */ #define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ #define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ #define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */ @@ -154,9 +154,9 @@ typedef struct int *ksnd_keepalive_count; /* # probes */ int *ksnd_keepalive_intvl; /* time between probes */ int *ksnd_credits; /* # concurrent sends */ - int *ksnd_peertxcredits; /* # concurrent sends to 1 peer */ - int *ksnd_peerrtrcredits; /* # per-peer router buffer credits */ - int *ksnd_peertimeout; /* seconds to consider peer dead */ + int *ksnd_peertxcredits; /* # concurrent sends to 1 peer_ni */ + int *ksnd_peerrtrcredits; /* # per-peer_ni router buffer credits */ + int *ksnd_peertimeout; /* seconds to consider peer_ni dead */ int *ksnd_enable_csum; /* enable check sum */ int *ksnd_inject_csum_error; /* set non-zero to inject checksum error */ int *ksnd_nonblk_zcack; /* always send zc-ack on non-blocking connection */ @@ -196,7 +196,7 @@ typedef struct int ksnd_init; /* initialisation state */ int ksnd_nnets; /* # networks set up */ struct list_head ksnd_nets; /* list of nets */ - /* stabilize peer/conn ops */ + /* stabilize peer_ni/conn ops */ rwlock_t ksnd_global_lock; /* hash table of all my known peers */ struct list_head *ksnd_peers; @@ -273,7 +273,7 @@ struct ksock_proto; /* forward ref */ typedef struct /* transmit packet */ { struct list_head tx_list; /* queue on conn for transmission etc */ - struct list_head tx_zc_list; /* queue on peer for ZC request */ + struct list_head tx_zc_list; /* queue on peer_ni for ZC request */ atomic_t tx_refcount; /* tx reference count */ int tx_nob; /* # packet bytes */ int tx_resid; /* residual bytes */ @@ -321,9 +321,9 @@ typedef union { typedef struct ksock_conn { - struct ksock_peer *ksnc_peer; /* owning peer */ + struct ksock_peer *ksnc_peer; /* owning peer_ni */ struct ksock_route *ksnc_route; /* owning route */ - struct list_head ksnc_list; /* stash on peer's conn list */ + struct list_head ksnc_list; /* stash on peer_ni's conn list */ struct socket *ksnc_sock; /* actual socket */ void *ksnc_saved_data_ready; /* socket's original data_ready() callback */ void *ksnc_saved_write_space; /* socket's original write_space() callback */ @@ -331,8 +331,8 @@ typedef struct ksock_conn atomic_t ksnc_sock_refcount; /* sock refcount */ ksock_sched_t *ksnc_scheduler; /* who schedules this connection */ __u32 ksnc_myipaddr; /* my IP */ - __u32 ksnc_ipaddr; /* peer's IP */ - int ksnc_port; /* peer's port */ + __u32 ksnc_ipaddr; /* peer_ni's IP */ + int ksnc_port; /* peer_ni's port */ signed int ksnc_type:3; /* type of connection, * should be signed value */ unsigned int ksnc_closing:1; /* being shut down */ @@ -388,9 +388,9 @@ typedef struct ksock_conn typedef struct ksock_route { - struct list_head ksnr_list; /* chain on peer route list */ + struct list_head ksnr_list; /* chain on peer_ni route list */ struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */ - struct ksock_peer *ksnr_peer; /* owning peer */ + struct ksock_peer *ksnr_peer; /* owning peer_ni */ atomic_t ksnr_refcount; /* # users */ cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */ cfs_duration_t ksnr_retry_interval; /* how long between retries */ @@ -400,7 +400,7 @@ typedef struct ksock_route unsigned int ksnr_scheduled:1; /* scheduled for attention */ unsigned int ksnr_connecting:1;/* connection establishment in progress */ unsigned int ksnr_connected:4; /* connections established by type */ - unsigned int ksnr_deleted:1; /* been removed from peer? */ + unsigned int ksnr_deleted:1; /* been removed from peer_ni? */ unsigned int ksnr_share_count; /* created explicitly? */ int ksnr_conn_count; /* # conns established by this route */ } ksock_route_t; @@ -409,7 +409,7 @@ typedef struct ksock_route typedef struct ksock_peer { - struct list_head ksnp_list; /* stash on global peer list */ + struct list_head ksnp_list; /* stash on global peer_ni list */ cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */ lnet_process_id_t ksnp_id; /* who's on the other end(s) */ atomic_t ksnp_refcount; /* # users */ @@ -418,8 +418,8 @@ typedef struct ksock_peer int ksnp_accepting;/* # passive connections pending */ int ksnp_error; /* errno on closing last conn */ __u64 ksnp_zc_next_cookie;/* ZC completion cookie */ - __u64 ksnp_incarnation; /* latest known peer incarnation */ - struct ksock_proto *ksnp_proto; /* latest known peer protocol */ + __u64 ksnp_incarnation; /* latest known peer_ni incarnation */ + struct ksock_proto *ksnp_proto; /* latest known peer_ni protocol */ struct list_head ksnp_conns; /* all active connections */ struct list_head ksnp_routes; /* routes */ struct list_head ksnp_tx_queue; /* waiting packets */ @@ -430,7 +430,7 @@ typedef struct ksock_peer lnet_ni_t *ksnp_ni; /* which network */ int ksnp_n_passive_ips; /* # of... */ __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */ -} ksock_peer_t; +} ksock_peer_ni_t; typedef struct ksock_connreq { @@ -592,20 +592,20 @@ ksocknal_route_decref (ksock_route_t *route) } static inline void -ksocknal_peer_addref (ksock_peer_t *peer) +ksocknal_peer_addref (ksock_peer_ni_t *peer_ni) { - LASSERT (atomic_read (&peer->ksnp_refcount) > 0); - atomic_inc(&peer->ksnp_refcount); + LASSERT (atomic_read (&peer_ni->ksnp_refcount) > 0); + atomic_inc(&peer_ni->ksnp_refcount); } -extern void ksocknal_destroy_peer (ksock_peer_t *peer); +extern void ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni); static inline void -ksocknal_peer_decref (ksock_peer_t *peer) +ksocknal_peer_decref (ksock_peer_ni_t *peer_ni) { - LASSERT (atomic_read (&peer->ksnp_refcount) > 0); - if (atomic_dec_and_test(&peer->ksnp_refcount)) - ksocknal_destroy_peer (peer); + LASSERT (atomic_read (&peer_ni->ksnp_refcount) > 0); + if (atomic_dec_and_test(&peer_ni->ksnp_refcount)) + ksocknal_destroy_peer (peer_ni); } int ksocknal_startup (lnet_ni_t *ni); @@ -619,19 +619,19 @@ int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int ksocknal_accept(lnet_ni_t *ni, struct socket *sock); extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port); -extern ksock_peer_t *ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id); -extern ksock_peer_t *ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id); -extern void ksocknal_peer_failed (ksock_peer_t *peer); +extern ksock_peer_ni_t *ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id); +extern ksock_peer_ni_t *ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id); +extern void ksocknal_peer_failed (ksock_peer_ni_t *peer_ni); extern int ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, struct socket *sock, int type); extern void ksocknal_close_conn_locked (ksock_conn_t *conn, int why); extern void ksocknal_terminate_conn (ksock_conn_t *conn); extern void ksocknal_destroy_conn (ksock_conn_t *conn); -extern int ksocknal_close_peer_conns_locked (ksock_peer_t *peer, +extern int ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why); extern int ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why); extern int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr); -extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer, +extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk); extern int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, @@ -647,9 +647,9 @@ extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive); extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when); extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name); extern void ksocknal_thread_fini (void); -extern void ksocknal_launch_all_connections_locked (ksock_peer_t *peer); -extern ksock_route_t *ksocknal_find_connectable_route_locked (ksock_peer_t *peer); -extern ksock_route_t *ksocknal_find_connecting_route_locked (ksock_peer_t *peer); +extern void ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni); +extern ksock_route_t *ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni); +extern ksock_route_t *ksocknal_find_connecting_route_locked (ksock_peer_ni_t *peer_ni); extern int ksocknal_new_packet (ksock_conn_t *conn, int skip); extern int ksocknal_scheduler (void *arg); extern int ksocknal_connd (void *arg); diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 386006e..b69599b 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -439,11 +439,11 @@ static void ksocknal_check_zc_req(ksock_tx_t *tx) { ksock_conn_t *conn = tx->tx_conn; - ksock_peer_t *peer = conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = conn->ksnc_peer; /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx * to ksnp_zc_req_list if some fragment of this message should be sent - * zero-copy. Our peer will send an ACK containing this cookie when + * zero-copy. Our peer_ni will send an ACK containing this cookie when * she has received this message to tell us we can signal completion. * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on * ksnp_zc_req_list. */ @@ -461,46 +461,46 @@ ksocknal_check_zc_req(ksock_tx_t *tx) ksocknal_tx_addref(tx); - spin_lock(&peer->ksnp_lock); + spin_lock(&peer_ni->ksnp_lock); - /* ZC_REQ is going to be pinned to the peer */ + /* ZC_REQ is going to be pinned to the peer_ni */ tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); - tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; + tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++; - if (peer->ksnp_zc_next_cookie == 0) - peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; + if (peer_ni->ksnp_zc_next_cookie == 0) + peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; - list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list); + list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list); - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); } static void ksocknal_uncheck_zc_req(ksock_tx_t *tx) { - ksock_peer_t *peer = tx->tx_conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = tx->tx_conn->ksnc_peer; LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_zc_capable); tx->tx_zc_checked = 0; - spin_lock(&peer->ksnp_lock); + spin_lock(&peer_ni->ksnp_lock); if (tx->tx_msg.ksm_zc_cookies[0] == 0) { /* Not waiting for an ACK */ - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); return; } tx->tx_msg.ksm_zc_cookies[0] = 0; list_del(&tx->tx_zc_list); - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); ksocknal_tx_decref(tx); } @@ -606,14 +606,14 @@ ksocknal_launch_connection_locked (ksock_route_t *route) } void -ksocknal_launch_all_connections_locked (ksock_peer_t *peer) +ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni) { ksock_route_t *route; /* called holding write lock on ksnd_global_lock */ for (;;) { /* launch any/all connections that need it */ - route = ksocknal_find_connectable_route_locked(peer); + route = ksocknal_find_connectable_route_locked(peer_ni); if (route == NULL) return; @@ -622,7 +622,7 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer) } ksock_conn_t * -ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) +ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk) { struct list_head *tmp; ksock_conn_t *conn; @@ -631,7 +631,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) int tnob = 0; int fnob = 0; - list_for_each(tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer_ni->ksnp_conns) { ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); int nob = atomic_read(&c->ksnc_tx_nob) + c->ksnc_sock->sk->sk_wmem_queued; @@ -777,13 +777,13 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) ksock_route_t * -ksocknal_find_connectable_route_locked (ksock_peer_t *peer) +ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni) { cfs_time_t now = cfs_time_current(); struct list_head *tmp; ksock_route_t *route; - list_for_each(tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); @@ -814,12 +814,12 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) } ksock_route_t * -ksocknal_find_connecting_route_locked (ksock_peer_t *peer) +ksocknal_find_connecting_route_locked (ksock_peer_ni_t *peer_ni) { struct list_head *tmp; ksock_route_t *route; - list_for_each(tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer_ni->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); @@ -834,7 +834,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) int ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) { - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; ksock_conn_t *conn; rwlock_t *g_lock; int retry; @@ -846,10 +846,10 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) for (retry = 0;; retry = 1) { read_lock(g_lock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) { - if (ksocknal_find_connectable_route_locked(peer) == NULL) { - conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) { + if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) { + conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk); if (conn != NULL) { /* I've got no routes that need to be * connecting and I do have an actual @@ -866,8 +866,8 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) write_lock_bh(g_lock); - peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) + peer_ni = ksocknal_find_peer_locked(ni, id); + if (peer_ni != NULL) break; write_unlock_bh(g_lock); @@ -879,7 +879,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) } if (retry) { - CERROR("Can't find peer %s\n", libcfs_id2str(id)); + CERROR("Can't find peer_ni %s\n", libcfs_id2str(id)); return -EHOSTUNREACH; } @@ -887,15 +887,15 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) LNET_NIDADDR(id.nid), lnet_acceptor_port()); if (rc != 0) { - CERROR("Can't add peer %s: %d\n", + CERROR("Can't add peer_ni %s: %d\n", libcfs_id2str(id), rc); return rc; } } - ksocknal_launch_all_connections_locked(peer); + ksocknal_launch_all_connections_locked(peer_ni); - conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); + conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk); if (conn != NULL) { /* Connection exists; queue message on it */ ksocknal_queue_tx_locked (tx, conn); @@ -903,14 +903,14 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) return (0); } - if (peer->ksnp_accepting > 0 || - ksocknal_find_connecting_route_locked (peer) != NULL) { - /* the message is going to be pinned to the peer */ + if (peer_ni->ksnp_accepting > 0 || + ksocknal_find_connecting_route_locked (peer_ni) != NULL) { + /* the message is going to be pinned to the peer_ni */ tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); /* Queue the message until a connection is established */ - list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue); + list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue); write_unlock_bh(g_lock); return 0; } @@ -1235,7 +1235,7 @@ ksocknal_process_receive (ksock_conn_t *conn) conn->ksnc_proto->pro_unpack(&conn->ksnc_msg); if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { - /* Userspace peer */ + /* Userspace peer_ni */ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; id = &conn->ksnc_peer->ksnp_id; @@ -1752,7 +1752,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, proto = ksocknal_parse_proto_version(hello); if (proto == NULL) { if (!active) { - /* unknown protocol from peer, tell peer my protocol */ + /* unknown protocol from peer_ni, tell peer_ni my protocol */ conn->ksnc_proto = &ksocknal_protocol_v3x; #if SOCKNAL_VERSION_DEBUG if (*ksocknal_tunables.ksnd_protocol == 2) @@ -1792,7 +1792,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, if (!active && conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { - /* Userspace NAL assigns peer process ID from socket */ + /* Userspace NAL assigns peer_ni process ID from socket */ recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG; recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr); } else { @@ -1803,7 +1803,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, if (!active) { *peerid = recv_id; - /* peer determines type */ + /* peer_ni determines type */ conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype); if (conn->ksnc_type == SOCKLND_CONN_NONE) { CERROR("Unexpected type %d from %s ip %pI4h\n", @@ -1845,7 +1845,7 @@ static int ksocknal_connect (ksock_route_t *route) { struct list_head zombies = LIST_HEAD_INIT(zombies); - ksock_peer_t *peer = route->ksnr_peer; + ksock_peer_ni_t *peer_ni = route->ksnr_peer; int type; int wanted; struct socket *sock; @@ -1866,19 +1866,19 @@ ksocknal_connect (ksock_route_t *route) for (;;) { wanted = ksocknal_route_mask() & ~route->ksnr_connected; - /* stop connecting if peer/route got closed under me, or + /* stop connecting if peer_ni/route got closed under me, or * route got connected while queued */ - if (peer->ksnp_closing || route->ksnr_deleted || + if (peer_ni->ksnp_closing || route->ksnr_deleted || wanted == 0) { retry_later = 0; break; } - /* reschedule if peer is connecting to me */ - if (peer->ksnp_accepting > 0) { + /* reschedule if peer_ni is connecting to me */ + if (peer_ni->ksnp_accepting > 0) { CDEBUG(D_NET, - "peer %s(%d) already connecting to me, retry later.\n", - libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting); + "peer_ni %s(%d) already connecting to me, retry later.\n", + libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting); retry_later = 1; } @@ -1900,21 +1900,21 @@ ksocknal_connect (ksock_route_t *route) if (cfs_time_aftereq(cfs_time_current(), deadline)) { rc = -ETIMEDOUT; - lnet_connect_console_error(rc, peer->ksnp_id.nid, + lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, route->ksnr_ipaddr, route->ksnr_port); goto failed; } - rc = lnet_connect(&sock, peer->ksnp_id.nid, + rc = lnet_connect(&sock, peer_ni->ksnp_id.nid, route->ksnr_myipaddr, route->ksnr_ipaddr, route->ksnr_port); if (rc != 0) goto failed; - rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type); + rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type); if (rc < 0) { - lnet_connect_console_error(rc, peer->ksnp_id.nid, + lnet_connect_console_error(rc, peer_ni->ksnp_id.nid, route->ksnr_ipaddr, route->ksnr_port); goto failed; @@ -1924,8 +1924,8 @@ ksocknal_connect (ksock_route_t *route) * race or I have to renegotiate protocol version */ retry_later = (rc != 0); if (retry_later) - CDEBUG(D_NET, "peer %s: conn race, retry later.\n", - libcfs_nid2str(peer->ksnp_id.nid)); + CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n", + libcfs_nid2str(peer_ni->ksnp_id.nid)); write_lock_bh(&ksocknal_data.ksnd_global_lock); } @@ -1935,10 +1935,10 @@ ksocknal_connect (ksock_route_t *route) if (retry_later) { /* re-queue for attention; this frees me up to handle - * the peer's incoming connection request */ + * the peer_ni's incoming connection request */ if (rc == EALREADY || - (rc == 0 && peer->ksnp_accepting > 0)) { + (rc == 0 && peer_ni->ksnp_accepting > 0)) { /* We want to introduce a delay before next * attempt to connect if we lost conn race, * but the race is resolved quickly usually, @@ -1974,28 +1974,28 @@ ksocknal_connect (ksock_route_t *route) route->ksnr_timeout = cfs_time_add(cfs_time_current(), route->ksnr_retry_interval); - if (!list_empty(&peer->ksnp_tx_queue) && - peer->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer) == NULL) { + if (!list_empty(&peer_ni->ksnp_tx_queue) && + peer_ni->ksnp_accepting == 0 && + ksocknal_find_connecting_route_locked(peer_ni) == NULL) { ksock_conn_t *conn; /* ksnp_tx_queue is queued on a conn on successful * connection for V1.x and V2.x */ - if (!list_empty(&peer->ksnp_conns)) { - conn = list_entry(peer->ksnp_conns.next, + if (!list_empty(&peer_ni->ksnp_conns)) { + conn = list_entry(peer_ni->ksnp_conns.next, ksock_conn_t, ksnc_list); LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x); } /* take all the blocked packets while I've got the lock and * complete below... */ - list_splice_init(&peer->ksnp_tx_queue, &zombies); + list_splice_init(&peer_ni->ksnp_tx_queue, &zombies); } write_unlock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_peer_failed(peer); - ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1); + ksocknal_peer_failed(peer_ni); + ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, 1); return 0; } @@ -2242,13 +2242,13 @@ ksocknal_connd (void *arg) } static ksock_conn_t * -ksocknal_find_timed_out_conn (ksock_peer_t *peer) +ksocknal_find_timed_out_conn (ksock_peer_ni_t *peer_ni) { /* We're called with a shared lock on ksnd_global_lock */ ksock_conn_t *conn; struct list_head *ctmp; - list_for_each(ctmp, &peer->ksnp_conns) { + list_for_each(ctmp, &peer_ni->ksnp_conns) { int error; conn = list_entry(ctmp, ksock_conn_t, ksnc_list); @@ -2264,7 +2264,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) CNETERR("A connection with %s " "(%pI4h:%d) was reset; " "it may have rebooted.\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port); break; @@ -2272,7 +2272,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) CNETERR("A connection with %s " "(%pI4h:%d) timed out; the " "network or node may be down.\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port); break; @@ -2280,7 +2280,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) CNETERR("An unexpected network error %d " "occurred with %s " "(%pI4h:%d\n", error, - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port); break; @@ -2296,7 +2296,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) ksocknal_conn_addref(conn); CNETERR("Timeout receiving from %s (%pI4h:%d), " "state %d wanted %d left %d\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port, conn->ksnc_rx_state, @@ -2314,7 +2314,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) ksocknal_conn_addref(conn); CNETERR("Timeout sending data to %s (%pI4h:%d) " "the network or that node may be down.\n", - libcfs_id2str(peer->ksnp_id), + libcfs_id2str(peer_ni->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port); return (conn); } @@ -2324,15 +2324,15 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) } static inline void -ksocknal_flush_stale_txs(ksock_peer_t *peer) +ksocknal_flush_stale_txs(ksock_peer_ni_t *peer_ni) { ksock_tx_t *tx; struct list_head stale_txs = LIST_HEAD_INIT(stale_txs); write_lock_bh(&ksocknal_data.ksnd_global_lock); - while (!list_empty(&peer->ksnp_tx_queue)) { - tx = list_entry(peer->ksnp_tx_queue.next, + while (!list_empty(&peer_ni->ksnp_tx_queue)) { + tx = list_entry(peer_ni->ksnp_tx_queue.next, ksock_tx_t, tx_list); if (!cfs_time_aftereq(cfs_time_current(), @@ -2345,11 +2345,11 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer) write_unlock_bh(&ksocknal_data.ksnd_global_lock); - ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1); + ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, 1); } static int -ksocknal_send_keepalive_locked(ksock_peer_t *peer) +ksocknal_send_keepalive_locked(ksock_peer_ni_t *peer_ni) __must_hold(&ksocknal_data.ksnd_global_lock) { ksock_sched_t *sched; @@ -2357,27 +2357,27 @@ __must_hold(&ksocknal_data.ksnd_global_lock) ksock_tx_t *tx; /* last_alive will be updated by create_conn */ - if (list_empty(&peer->ksnp_conns)) + if (list_empty(&peer_ni->ksnp_conns)) return 0; - if (peer->ksnp_proto != &ksocknal_protocol_v3x) + if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x) return 0; if (*ksocknal_tunables.ksnd_keepalive <= 0 || cfs_time_before(cfs_time_current(), - cfs_time_add(peer->ksnp_last_alive, + cfs_time_add(peer_ni->ksnp_last_alive, cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive)))) return 0; if (cfs_time_before(cfs_time_current(), - peer->ksnp_send_keepalive)) + peer_ni->ksnp_send_keepalive)) return 0; /* retry 10 secs later, so we wouldn't put pressure - * on this peer if we failed to send keepalive this time */ - peer->ksnp_send_keepalive = cfs_time_shift(10); + * on this peer_ni if we failed to send keepalive this time */ + peer_ni->ksnp_send_keepalive = cfs_time_shift(10); - conn = ksocknal_find_conn_locked(peer, NULL, 1); + conn = ksocknal_find_conn_locked(peer_ni, NULL, 1); if (conn != NULL) { sched = conn->ksnc_scheduler; @@ -2400,7 +2400,7 @@ __must_hold(&ksocknal_data.ksnd_global_lock) return -ENOMEM; } - if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) { + if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id) == 0) { read_lock(&ksocknal_data.ksnd_global_lock); return 1; } @@ -2416,7 +2416,7 @@ static void ksocknal_check_peer_timeouts (int idx) { struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; - ksock_peer_t *peer; + ksock_peer_ni_t *peer_ni; ksock_conn_t *conn; ksock_tx_t *tx; @@ -2426,18 +2426,18 @@ ksocknal_check_peer_timeouts (int idx) * take a look... */ read_lock(&ksocknal_data.ksnd_global_lock); - list_for_each_entry(peer, peers, ksnp_list) { + list_for_each_entry(peer_ni, peers, ksnp_list) { ksock_tx_t *tx_stale; cfs_time_t deadline = 0; int resid = 0; int n = 0; - if (ksocknal_send_keepalive_locked(peer) != 0) { + if (ksocknal_send_keepalive_locked(peer_ni) != 0) { read_unlock(&ksocknal_data.ksnd_global_lock); goto again; } - conn = ksocknal_find_timed_out_conn (peer); + conn = ksocknal_find_timed_out_conn (peer_ni); if (conn != NULL) { read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2445,7 +2445,7 @@ ksocknal_check_peer_timeouts (int idx) ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); /* NB we won't find this one again, but we can't - * just proceed with the next peer, since we dropped + * just proceed with the next peer_ni, since we dropped * ksnd_global_lock and it might be dead already! */ ksocknal_conn_decref(conn); goto again; @@ -2453,30 +2453,30 @@ ksocknal_check_peer_timeouts (int idx) /* we can't process stale txs right here because we're * holding only shared lock */ - if (!list_empty(&peer->ksnp_tx_queue)) { + if (!list_empty(&peer_ni->ksnp_tx_queue)) { ksock_tx_t *tx = - list_entry(peer->ksnp_tx_queue.next, + list_entry(peer_ni->ksnp_tx_queue.next, ksock_tx_t, tx_list); if (cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) { - ksocknal_peer_addref(peer); + ksocknal_peer_addref(peer_ni); read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_flush_stale_txs(peer); + ksocknal_flush_stale_txs(peer_ni); - ksocknal_peer_decref(peer); + ksocknal_peer_decref(peer_ni); goto again; } } - if (list_empty(&peer->ksnp_zc_req_list)) + if (list_empty(&peer_ni->ksnp_zc_req_list)) continue; tx_stale = NULL; - spin_lock(&peer->ksnp_lock); - list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) { + spin_lock(&peer_ni->ksnp_lock); + list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) { if (!cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) break; @@ -2489,7 +2489,7 @@ ksocknal_check_peer_timeouts (int idx) } if (tx_stale == NULL) { - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); continue; } @@ -2498,13 +2498,13 @@ ksocknal_check_peer_timeouts (int idx) conn = tx_stale->tx_conn; ksocknal_conn_addref(conn); - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); read_unlock(&ksocknal_data.ksnd_global_lock); - CERROR("Total %d stale ZC_REQs for peer %s detected; the " + CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the " "oldest(%p) timed out %ld secs ago, " "resid: %d, wmem: %d\n", - n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale, + n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale, cfs_duration_sec(cfs_time_current() - deadline), resid, conn->ksnc_sock->sk->sk_wmem_queued); @@ -2602,7 +2602,7 @@ int ksocknal_reaper(void *arg) int chunk = ksocknal_data.ksnd_peer_hash_size; /* Time to check for timeouts on a few more peers: I do - * checks every 'p' seconds on a proportion of the peer + * checks every 'p' seconds on a proportion of the peer_ni * table and I need to check every connection 'n' times * within a timeout interval, to ensure I detect a * timeout on any connection within (n+1)/n times the diff --git a/lnet/klnds/socklnd/socklnd_lib.c b/lnet/klnds/socklnd/socklnd_lib.c index 6384875..1215488 100644 --- a/lnet/klnds/socklnd/socklnd_lib.c +++ b/lnet/klnds/socklnd/socklnd_lib.c @@ -43,7 +43,7 @@ ksocknal_lib_get_conn_addrs (ksock_conn_t *conn) LASSERT (!conn->ksnc_closing); if (rc != 0) { - CERROR ("Error %d getting sock peer IP\n", rc); + CERROR ("Error %d getting sock peer_ni IP\n", rc); return rc; } @@ -189,7 +189,7 @@ ksocknal_lib_eager_ack (ksock_conn_t *conn) /* Remind the socket to ACK eagerly. If I don't, the socket might * think I'm about to send something it could piggy-back the ACK * on, introducing delay in completing zero-copy sends in my - * peer. */ + * peer_ni. */ kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt, sizeof(opt)); diff --git a/lnet/klnds/socklnd/socklnd_proto.c b/lnet/klnds/socklnd/socklnd_proto.c index 12165a3..dff1166 100644 --- a/lnet/klnds/socklnd/socklnd_proto.c +++ b/lnet/klnds/socklnd/socklnd_proto.c @@ -361,14 +361,14 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) static int ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) { - ksock_peer_t *peer = c->ksnc_peer; + ksock_peer_ni_t *peer_ni = c->ksnc_peer; ksock_conn_t *conn; ksock_tx_t *tx; int rc; read_lock(&ksocknal_data.ksnd_global_lock); - conn = ksocknal_find_conn_locked(peer, NULL, !!remote); + conn = ksocknal_find_conn_locked(peer_ni, NULL, !!remote); if (conn != NULL) { ksock_sched_t *sched = conn->ksnc_scheduler; @@ -393,7 +393,7 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) if (tx == NULL) return -ENOMEM; - if ((rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) == 0) + if ((rc = ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id)) == 0) return 0; ksocknal_free_tx(tx); @@ -404,7 +404,7 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) static int ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) { - ksock_peer_t *peer = conn->ksnc_peer; + ksock_peer_ni_t *peer_ni = conn->ksnc_peer; ksock_tx_t *tx; ksock_tx_t *tmp; struct list_head zlist = LIST_HEAD_INIT(zlist); @@ -421,10 +421,10 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) return count == 1 ? 0 : -EPROTO; } - spin_lock(&peer->ksnp_lock); + spin_lock(&peer_ni->ksnp_lock); list_for_each_entry_safe(tx, tmp, - &peer->ksnp_zc_req_list, tx_zc_list) { + &peer_ni->ksnp_zc_req_list, tx_zc_list) { __u64 c = tx->tx_msg.ksm_zc_cookies[0]; if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) { @@ -437,7 +437,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) } } - spin_unlock(&peer->ksnp_lock); + spin_unlock(&peer_ni->ksnp_lock); while (!list_empty(&zlist)) { tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);