From 679e73db770d188f43aa4d50592d65e337ad135e Mon Sep 17 00:00:00 2001 From: Chris Horn Date: Thu, 11 Aug 2022 13:03:04 -0600 Subject: [PATCH] LU-16213 kfilnd: Rename struct kfilnd_peer members Prefix members of struct kfilnd_peer with kp_ to make these variable names easier to find. Also use 'kp' as a standard name for pointers to struct kfilnd_peer instead of 'peer' (again to make these pointers easier to find). As such, struct kfilnd_transaction::peer is also renamed to struct kfilnd_transaction::tn_kp. HPE-bug-id: LUS-11128 Test-Parameters: trivial Change-Id: Id535c7af28a5335026037a55920c706a4e16f947 Signed-off-by: Chris Horn Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/48780 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Ron Gredvig Reviewed-by: Ian Ziemba Reviewed-by: Oleg Drokin --- lnet/klnds/kfilnd/kfilnd.c | 2 +- lnet/klnds/kfilnd/kfilnd.h | 48 ++++++++--------- lnet/klnds/kfilnd/kfilnd_ep.c | 6 +-- lnet/klnds/kfilnd/kfilnd_peer.c | 115 ++++++++++++++++++++-------------------- lnet/klnds/kfilnd/kfilnd_peer.h | 12 ++--- lnet/klnds/kfilnd/kfilnd_tn.c | 115 ++++++++++++++++++++-------------------- 6 files changed, 150 insertions(+), 148 deletions(-) diff --git a/lnet/klnds/kfilnd/kfilnd.c b/lnet/klnds/kfilnd/kfilnd.c index 4ad1a1f..b74f58e 100644 --- a/lnet/klnds/kfilnd/kfilnd.c +++ b/lnet/klnds/kfilnd/kfilnd.c @@ -148,7 +148,7 @@ static int kfilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *msg) } /* Need to fire off special transaction if this is a new peer. */ - if (kfilnd_peer_is_new_peer(tn->peer)) { + if (kfilnd_peer_is_new_peer(tn->tn_kp)) { rc = kfilnd_send_hello_request(dev, cpt, tgt_nid4); if (rc) { kfilnd_tn_free(tn); diff --git a/lnet/klnds/kfilnd/kfilnd.h b/lnet/klnds/kfilnd/kfilnd.h index 49781e5..abf9adf 100644 --- a/lnet/klnds/kfilnd/kfilnd.h +++ b/lnet/klnds/kfilnd/kfilnd.h @@ -221,35 +221,35 @@ struct kfilnd_ep { }; struct kfilnd_peer { - struct rhash_head node; - struct rcu_head rcu_head; - struct kfilnd_dev *dev; - lnet_nid_t nid; - kfi_addr_t addr; - atomic_t rx_base; - atomic_t remove_peer; - refcount_t cnt; - time64_t last_alive; - u16 version; - u32 local_session_key; - u32 remote_session_key; + struct rhash_head kp_node; + struct rcu_head kp_rcu_head; + struct kfilnd_dev *kp_dev; + lnet_nid_t kp_nid; + kfi_addr_t kp_addr; + atomic_t kp_rx_base; + atomic_t kp_remove_peer; + refcount_t kp_cnt; + time64_t kp_last_alive; + u16 kp_version; + u32 kp_local_session_key; + u32 kp_remote_session_key; }; -static inline bool kfilnd_peer_is_new_peer(struct kfilnd_peer *peer) +static inline bool kfilnd_peer_is_new_peer(struct kfilnd_peer *kp) { - return peer->version == 0; + return kp->kp_version == 0; } -static inline void kfilnd_peer_set_version(struct kfilnd_peer *peer, +static inline void kfilnd_peer_set_version(struct kfilnd_peer *kp, u16 version) { - peer->version = version; + kp->kp_version = version; } -static inline void kfilnd_peer_set_remote_session_key(struct kfilnd_peer *peer, +static inline void kfilnd_peer_set_remote_session_key(struct kfilnd_peer *kp, u32 session_key) { - peer->remote_session_key = session_key; + kp->kp_remote_session_key = session_key; } struct kfilnd_fab { @@ -478,16 +478,16 @@ struct kfilnd_msg { (ep)->end_context_id, ##__VA_ARGS__) #define KFILND_TN_PEER_VALID(tn) \ - !IS_ERR_OR_NULL((tn)->peer) + !IS_ERR_OR_NULL((tn)->tn_kp) #define KFILND_TN_DIR_DEBUG(tn, fmt, dir, ...) \ CDEBUG(D_NET, "Transaction ID %p: %s:%u %s %s:%llu " fmt "\n", \ (tn), \ libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \ (tn)->tn_ep->end_context_id, dir, \ - libcfs_nid2str((tn)->peer->nid), \ + libcfs_nid2str((tn)->tn_kp->kp_nid), \ KFILND_TN_PEER_VALID(tn) ? \ - KFILND_RX_CONTEXT((tn)->peer->addr) : 0, \ + KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \ ##__VA_ARGS__) #define KFILND_TN_DEBUG(tn, fmt, ...) \ @@ -503,9 +503,9 @@ struct kfilnd_msg { (tn), \ libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \ (tn)->tn_ep->end_context_id, dir, \ - libcfs_nid2str((tn)->peer->nid), \ + libcfs_nid2str((tn)->tn_kp->kp_nid), \ KFILND_TN_PEER_VALID(tn) ? \ - KFILND_RX_CONTEXT((tn)->peer->addr) : 0, \ + KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \ ##__VA_ARGS__) #define KFILND_TN_ERROR(tn, fmt, ...) \ @@ -642,7 +642,7 @@ struct kfilnd_transaction { /* Transaction send message and target address. */ kfi_addr_t tn_target_addr; - struct kfilnd_peer *peer; + struct kfilnd_peer *tn_kp; struct kfilnd_transaction_msg tn_tx_msg; /* Transaction multi-receive buffer and associated receive message. */ diff --git a/lnet/klnds/kfilnd/kfilnd_ep.c b/lnet/klnds/kfilnd/kfilnd_ep.c index 72b4e0e..dec95f2 100644 --- a/lnet/klnds/kfilnd/kfilnd_ep.c +++ b/lnet/klnds/kfilnd/kfilnd_ep.c @@ -199,7 +199,7 @@ static int kfilnd_ep_gen_fake_err(struct kfilnd_ep *ep, static uint64_t gen_init_tag_bits(struct kfilnd_transaction *tn) { - return (tn->peer->remote_session_key << KFILND_EP_KEY_BITS) | + return (tn->tn_kp->kp_remote_session_key << KFILND_EP_KEY_BITS) | tn->tn_response_mr_key; } @@ -296,7 +296,7 @@ int kfilnd_ep_cancel_tagged_recv(struct kfilnd_ep *ep, static uint64_t gen_target_tag_bits(struct kfilnd_transaction *tn) { - return (tn->peer->local_session_key << KFILND_EP_KEY_BITS) | + return (tn->tn_kp->kp_local_session_key << KFILND_EP_KEY_BITS) | tn->tn_mr_key; } @@ -316,7 +316,7 @@ int kfilnd_ep_post_tagged_recv(struct kfilnd_ep *ep, struct kfi_msg_tagged msg = { .tag = gen_target_tag_bits(tn), .context = tn, - .addr = tn->peer->addr, + .addr = tn->tn_kp->kp_addr, }; struct kfi_cq_err_entry fake_error = { .op_context = tn, diff --git a/lnet/klnds/kfilnd/kfilnd_peer.c b/lnet/klnds/kfilnd/kfilnd_peer.c index f2dc2fe..84653ca 100644 --- a/lnet/klnds/kfilnd/kfilnd_peer.c +++ b/lnet/klnds/kfilnd/kfilnd_peer.c @@ -32,9 +32,9 @@ #include "kfilnd_dev.h" static const struct rhashtable_params peer_cache_params = { - .head_offset = offsetof(struct kfilnd_peer, node), - .key_offset = offsetof(struct kfilnd_peer, nid), - .key_len = sizeof_field(struct kfilnd_peer, nid), + .head_offset = offsetof(struct kfilnd_peer, kp_node), + .key_offset = offsetof(struct kfilnd_peer, kp_nid), + .key_len = sizeof_field(struct kfilnd_peer, kp_nid), .automatic_shrinking = true, }; @@ -45,62 +45,62 @@ static const struct rhashtable_params peer_cache_params = { */ static void kfilnd_peer_free(void *ptr, void *arg) { - struct kfilnd_peer *peer = ptr; + struct kfilnd_peer *kp = ptr; CDEBUG(D_NET, "%s(0x%llx) peer entry freed\n", - libcfs_nid2str(peer->nid), peer->addr); + libcfs_nid2str(kp->kp_nid), kp->kp_addr); - kfi_av_remove(peer->dev->kfd_av, &peer->addr, 1, 0); + kfi_av_remove(kp->kp_dev->kfd_av, &kp->kp_addr, 1, 0); - kfree_rcu(peer, rcu_head); + kfree_rcu(kp, kp_rcu_head); } /** * kfilnd_peer_down() - Mark a peer as down. - * @peer: Peer to be downed. + * @kp: Peer to be downed. */ -void kfilnd_peer_down(struct kfilnd_peer *peer) +void kfilnd_peer_down(struct kfilnd_peer *kp) { - if (atomic_cmpxchg(&peer->remove_peer, 0, 1) == 0) { + if (atomic_cmpxchg(&kp->kp_remove_peer, 0, 1) == 0) { struct lnet_nid peer_nid; - lnet_nid4_to_nid(peer->nid, &peer_nid); + lnet_nid4_to_nid(kp->kp_nid, &peer_nid); CDEBUG(D_NET, "%s(0x%llx) marked for removal from peer cache\n", - libcfs_nidstr(&peer_nid), peer->addr); + libcfs_nidstr(&peer_nid), kp->kp_addr); - lnet_notify(peer->dev->kfd_ni, &peer_nid, false, false, - peer->last_alive); + lnet_notify(kp->kp_dev->kfd_ni, &peer_nid, false, false, + kp->kp_last_alive); } } /** * kfilnd_peer_put() - Return a reference for a peer. - * @peer: Peer where the reference should be returned. + * @kp: Peer where the reference should be returned. */ -void kfilnd_peer_put(struct kfilnd_peer *peer) +void kfilnd_peer_put(struct kfilnd_peer *kp) { rcu_read_lock(); /* Return allocation reference if the peer was marked for removal. */ - if (atomic_cmpxchg(&peer->remove_peer, 1, 2) == 1) { - rhashtable_remove_fast(&peer->dev->peer_cache, &peer->node, + if (atomic_cmpxchg(&kp->kp_remove_peer, 1, 2) == 1) { + rhashtable_remove_fast(&kp->kp_dev->peer_cache, &kp->kp_node, peer_cache_params); - refcount_dec(&peer->cnt); + refcount_dec(&kp->kp_cnt); CDEBUG(D_NET, "%s(0x%llx) removed from peer cache\n", - libcfs_nid2str(peer->nid), peer->addr); + libcfs_nid2str(kp->kp_nid), kp->kp_addr); } - if (refcount_dec_and_test(&peer->cnt)) - kfilnd_peer_free(peer, NULL); + if (refcount_dec_and_test(&kp->kp_cnt)) + kfilnd_peer_free(kp, NULL); rcu_read_unlock(); } -u16 kfilnd_peer_target_rx_base(struct kfilnd_peer *peer) +u16 kfilnd_peer_target_rx_base(struct kfilnd_peer *kp) { - int cpt = lnet_cpt_of_nid(peer->nid, peer->dev->kfd_ni); - struct kfilnd_ep *ep = peer->dev->cpt_to_endpoint[cpt]; + int cpt = lnet_cpt_of_nid(kp->kp_nid, kp->kp_dev->kfd_ni); + struct kfilnd_ep *ep = kp->kp_dev->cpt_to_endpoint[cpt]; return ep->end_context_id; } @@ -119,24 +119,24 @@ struct kfilnd_peer *kfilnd_peer_get(struct kfilnd_dev *dev, lnet_nid_t nid) int rc; u32 nid_addr = LNET_NIDADDR(nid); u32 net_num = LNET_NETNUM(LNET_NIDNET(nid)); - struct kfilnd_peer *peer; + struct kfilnd_peer *kp; struct kfilnd_peer *clash_peer; again: /* Check the cache for a match. */ rcu_read_lock(); - peer = rhashtable_lookup_fast(&dev->peer_cache, &nid, + kp = rhashtable_lookup_fast(&dev->peer_cache, &nid, peer_cache_params); - if (peer && !refcount_inc_not_zero(&peer->cnt)) - peer = NULL; + if (kp && !refcount_inc_not_zero(&kp->kp_cnt)) + kp = NULL; rcu_read_unlock(); - if (peer) - return peer; + if (kp) + return kp; /* Allocate a new peer for the cache. */ - peer = kzalloc(sizeof(*peer), GFP_KERNEL); - if (!peer) { + kp = kzalloc(sizeof(*kp), GFP_KERNEL); + if (!kp) { rc = -ENOMEM; goto err; } @@ -156,7 +156,7 @@ again: /* Use the KFI address vector to translate node and service string into * a KFI address handle. */ - rc = kfi_av_insertsvc(dev->kfd_av, node, service, &peer->addr, 0, dev); + rc = kfi_av_insertsvc(dev->kfd_av, node, service, &kp->kp_addr, 0, dev); kfree(service); kfree(node); @@ -168,25 +168,25 @@ again: goto err_free_peer; } - peer->dev = dev; - peer->nid = nid; - atomic_set(&peer->rx_base, 0); - atomic_set(&peer->remove_peer, 0); - peer->local_session_key = kfilnd_dev_get_session_key(dev); + kp->kp_dev = dev; + kp->kp_nid = nid; + atomic_set(&kp->kp_rx_base, 0); + atomic_set(&kp->kp_remove_peer, 0); + kp->kp_local_session_key = kfilnd_dev_get_session_key(dev); /* One reference for the allocation and another for get operation * performed for this peer. The allocation reference is returned when * the entry is marked for removal. */ - refcount_set(&peer->cnt, 2); + refcount_set(&kp->kp_cnt, 2); clash_peer = rhashtable_lookup_get_insert_fast(&dev->peer_cache, - &peer->node, + &kp->kp_node, peer_cache_params); if (clash_peer) { - kfi_av_remove(dev->kfd_av, &peer->addr, 1, 0); - kfree(peer); + kfi_av_remove(dev->kfd_av, &kp->kp_addr, 1, 0); + kfree(kp); if (IS_ERR(clash_peer)) { rc = PTR_ERR(clash_peer); @@ -196,17 +196,17 @@ again: } } - kfilnd_peer_alive(peer); + kfilnd_peer_alive(kp); CDEBUG(D_NET, "%s(0x%llx) peer entry allocated\n", - libcfs_nid2str(peer->nid), peer->addr); + libcfs_nid2str(kp->kp_nid), kp->kp_addr); - return peer; + return kp; err_free_node_str: kfree(node); err_free_peer: - kfree(peer); + kfree(kp); err: return ERR_PTR(rc); } @@ -214,7 +214,7 @@ err: /** * kfilnd_peer_get_kfi_addr() - Return kfi_addr_t used for eager untagged send * kfi operations. - * @peer: Peer struct. + * @kp: Peer struct. * * The returned kfi_addr_t is updated to target a specific RX context. The * address return by this function should not be used if a specific RX context @@ -223,36 +223,37 @@ err: * * Return: kfi_addr_t. */ -kfi_addr_t kfilnd_peer_get_kfi_addr(struct kfilnd_peer *peer) +kfi_addr_t kfilnd_peer_get_kfi_addr(struct kfilnd_peer *kp) { /* TODO: Support RX count by round-robining the generated kfi_addr_t's * across multiple RX contexts using RX base and RX count. */ - return kfi_rx_addr(KFILND_BASE_ADDR(peer->addr), - atomic_read(&peer->rx_base), KFILND_FAB_RX_CTX_BITS); + return kfi_rx_addr(KFILND_BASE_ADDR(kp->kp_addr), + atomic_read(&kp->kp_rx_base), + KFILND_FAB_RX_CTX_BITS); } /** * kfilnd_peer_update_rx_contexts() - Update the RX context for a peer. - * @peer: Peer to be updated. + * @kp: Peer to be updated. * @rx_base: New RX base for peer. * @rx_count: New RX count for peer. */ -void kfilnd_peer_update_rx_contexts(struct kfilnd_peer *peer, +void kfilnd_peer_update_rx_contexts(struct kfilnd_peer *kp, unsigned int rx_base, unsigned int rx_count) { /* TODO: Support RX count. */ LASSERT(rx_count > 0); - atomic_set(&peer->rx_base, rx_base); + atomic_set(&kp->kp_rx_base, rx_base); } /** * kfilnd_peer_alive() - Update when the peer was last alive. - * @peer: Peer to be updated. + * @kp: Peer to be updated. */ -void kfilnd_peer_alive(struct kfilnd_peer *peer) +void kfilnd_peer_alive(struct kfilnd_peer *kp) { - peer->last_alive = ktime_get_seconds(); + kp->kp_last_alive = ktime_get_seconds(); /* Ensure timestamp is committed to memory before used. */ smp_mb(); diff --git a/lnet/klnds/kfilnd/kfilnd_peer.h b/lnet/klnds/kfilnd/kfilnd_peer.h index 27a72bd..4e18e95 100644 --- a/lnet/klnds/kfilnd/kfilnd_peer.h +++ b/lnet/klnds/kfilnd/kfilnd_peer.h @@ -34,16 +34,16 @@ #include "kfilnd.h" -void kfilnd_peer_down(struct kfilnd_peer *peer); -void kfilnd_peer_put(struct kfilnd_peer *peer); +void kfilnd_peer_down(struct kfilnd_peer *kp); +void kfilnd_peer_put(struct kfilnd_peer *kp); struct kfilnd_peer *kfilnd_peer_get(struct kfilnd_dev *dev, lnet_nid_t nid); -void kfilnd_peer_update_rx_contexts(struct kfilnd_peer *peer, +void kfilnd_peer_update_rx_contexts(struct kfilnd_peer *kp, unsigned int rx_base, unsigned int rx_count); -void kfilnd_peer_alive(struct kfilnd_peer *peer); +void kfilnd_peer_alive(struct kfilnd_peer *kp); void kfilnd_peer_destroy(struct kfilnd_dev *dev); void kfilnd_peer_init(struct kfilnd_dev *dev); -kfi_addr_t kfilnd_peer_get_kfi_addr(struct kfilnd_peer *peer); -u16 kfilnd_peer_target_rx_base(struct kfilnd_peer *peer); +kfi_addr_t kfilnd_peer_get_kfi_addr(struct kfilnd_peer *kp); +u16 kfilnd_peer_target_rx_base(struct kfilnd_peer *kp); #endif /* _KFILND_PEER_ */ diff --git a/lnet/klnds/kfilnd/kfilnd_tn.c b/lnet/klnds/kfilnd/kfilnd_tn.c index ba79c80..e36fd61 100644 --- a/lnet/klnds/kfilnd/kfilnd_tn.c +++ b/lnet/klnds/kfilnd/kfilnd_tn.c @@ -69,8 +69,8 @@ static void kfilnd_tn_pack_hello_req(struct kfilnd_transaction *tn) /* Pack the protocol header and payload. */ msg->proto.hello.version = KFILND_MSG_VERSION; - msg->proto.hello.rx_base = kfilnd_peer_target_rx_base(tn->peer); - msg->proto.hello.session_key = tn->peer->local_session_key; + msg->proto.hello.rx_base = kfilnd_peer_target_rx_base(tn->tn_kp); + msg->proto.hello.session_key = tn->tn_kp->kp_local_session_key; /* TODO: Support multiple RX contexts per peer. */ msg->proto.hello.rx_count = 1; @@ -85,7 +85,7 @@ static void kfilnd_tn_pack_hello_req(struct kfilnd_transaction *tn) offsetof(struct kfilnd_msg, proto); msg->cksum = NO_CHECKSUM; msg->srcnid = lnet_nid_to_nid4(&tn->tn_ep->end_dev->kfd_ni->ni_nid); - msg->dstnid = tn->peer->nid; + msg->dstnid = tn->tn_kp->kp_nid; /* Checksum entire message. */ msg->cksum = kfilnd_tn_cksum(msg, msg->nob); @@ -98,9 +98,9 @@ static void kfilnd_tn_pack_hello_rsp(struct kfilnd_transaction *tn) struct kfilnd_msg *msg = tn->tn_tx_msg.msg; /* Pack the protocol header and payload. */ - msg->proto.hello.version = tn->peer->version; - msg->proto.hello.rx_base = kfilnd_peer_target_rx_base(tn->peer); - msg->proto.hello.session_key = tn->peer->local_session_key; + msg->proto.hello.version = tn->tn_kp->kp_version; + msg->proto.hello.rx_base = kfilnd_peer_target_rx_base(tn->tn_kp); + msg->proto.hello.session_key = tn->tn_kp->kp_local_session_key; /* TODO: Support multiple RX contexts per peer. */ msg->proto.hello.rx_count = 1; @@ -115,7 +115,7 @@ static void kfilnd_tn_pack_hello_rsp(struct kfilnd_transaction *tn) offsetof(struct kfilnd_msg, proto); msg->cksum = NO_CHECKSUM; msg->srcnid = lnet_nid_to_nid4(&tn->tn_ep->end_dev->kfd_ni->ni_nid); - msg->dstnid = tn->peer->nid; + msg->dstnid = tn->tn_kp->kp_nid; /* Checksum entire message. */ msg->cksum = kfilnd_tn_cksum(msg, msg->nob); @@ -140,7 +140,7 @@ static void kfilnd_tn_pack_bulk_req(struct kfilnd_transaction *tn) offsetof(struct kfilnd_msg, proto); msg->cksum = NO_CHECKSUM; msg->srcnid = lnet_nid_to_nid4(&tn->tn_ep->end_dev->kfd_ni->ni_nid); - msg->dstnid = tn->peer->nid; + msg->dstnid = tn->tn_kp->kp_nid; /* Checksum entire message. */ msg->cksum = kfilnd_tn_cksum(msg, msg->nob); @@ -169,7 +169,7 @@ static void kfilnd_tn_pack_immed_msg(struct kfilnd_transaction *tn) msg->nob = offsetof(struct kfilnd_msg, proto.immed.payload[tn->tn_nob]); msg->cksum = NO_CHECKSUM; msg->srcnid = lnet_nid_to_nid4(&tn->tn_ep->end_dev->kfd_ni->ni_nid); - msg->dstnid = tn->peer->nid; + msg->dstnid = tn->tn_kp->kp_nid; /* Checksum entire message. */ msg->cksum = kfilnd_tn_cksum(msg, msg->nob); @@ -433,7 +433,7 @@ static void kfilnd_tn_finalize(struct kfilnd_transaction *tn, bool *tn_released) } if (KFILND_TN_PEER_VALID(tn)) - kfilnd_peer_put(tn->peer); + kfilnd_peer_put(tn->tn_kp); kfilnd_tn_record_state_change(tn); kfilnd_tn_record_duration(tn); @@ -558,9 +558,9 @@ static int kfilnd_tn_state_tagged_recv_posted(struct kfilnd_transaction *tn, switch (event) { case TN_EVENT_INIT_BULK: - tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->peer); + tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->tn_kp); KFILND_TN_DEBUG(tn, "Using peer %s(%#llx)", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr); kfilnd_tn_pack_bulk_req(tn); @@ -578,7 +578,7 @@ static int kfilnd_tn_state_tagged_recv_posted(struct kfilnd_transaction *tn, case -EAGAIN: KFILND_TN_DEBUG(tn, "Need to replay post send to %s(%#llx)", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr); return -EAGAIN; @@ -588,7 +588,7 @@ static int kfilnd_tn_state_tagged_recv_posted(struct kfilnd_transaction *tn, default: KFILND_TN_ERROR(tn, "Failed to post send to %s(%#llx): rc=%d", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr, rc); kfilnd_tn_status_update(tn, rc, LNET_MSG_STATUS_LOCAL_ERROR); @@ -624,7 +624,7 @@ static int kfilnd_tn_state_idle(struct kfilnd_transaction *tn, /* For new peers, send a hello request message and queue the true LNet * message for replay. */ - if (kfilnd_peer_is_new_peer(tn->peer) && + if (kfilnd_peer_is_new_peer(tn->tn_kp) && (event == TN_EVENT_INIT_IMMEDIATE || event == TN_EVENT_INIT_BULK)) { remaining_time = max_t(ktime_t, 0, tn->deadline - ktime_get_seconds()); @@ -636,7 +636,7 @@ static int kfilnd_tn_state_idle(struct kfilnd_transaction *tn, */ if (remaining_time > 0) { KFILND_TN_DEBUG(tn, "%s hello response pending", - libcfs_nid2str(tn->peer->nid)); + libcfs_nid2str(tn->tn_kp->kp_nid)); return -EAGAIN; } @@ -649,9 +649,9 @@ static int kfilnd_tn_state_idle(struct kfilnd_transaction *tn, switch (event) { case TN_EVENT_INIT_IMMEDIATE: case TN_EVENT_TX_HELLO: - tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->peer); + tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->tn_kp); KFILND_TN_DEBUG(tn, "Using peer %s(%#llx)", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr); if (event == TN_EVENT_INIT_IMMEDIATE) @@ -672,14 +672,14 @@ static int kfilnd_tn_state_idle(struct kfilnd_transaction *tn, */ case -EAGAIN: KFILND_TN_DEBUG(tn, "Need to replay send to %s(%#llx)", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr); return -EAGAIN; default: KFILND_TN_ERROR(tn, "Failed to post send to %s(%#llx): rc=%d", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr, rc); kfilnd_tn_status_update(tn, rc, LNET_MSG_STATUS_LOCAL_ERROR); @@ -723,10 +723,10 @@ static int kfilnd_tn_state_idle(struct kfilnd_transaction *tn, * requires dropping the incoming message and initiating a hello * handshake. */ - if (kfilnd_peer_is_new_peer(tn->peer)) { + if (kfilnd_peer_is_new_peer(tn->tn_kp)) { rc = kfilnd_send_hello_request(tn->tn_ep->end_dev, tn->tn_ep->end_cpt, - tn->peer->nid); + tn->tn_kp->kp_nid); if (rc) KFILND_TN_ERROR(tn, "Failed to send hello request: rc=%d", @@ -737,18 +737,18 @@ static int kfilnd_tn_state_idle(struct kfilnd_transaction *tn, */ KFILND_TN_ERROR(tn, "Dropping message from %s due to stale peer", - libcfs_nid2str(tn->peer->nid)); + libcfs_nid2str(tn->tn_kp->kp_nid)); kfilnd_tn_status_update(tn, -EPROTO, LNET_MSG_STATUS_LOCAL_DROPPED); rc = 0; goto out; } - LASSERT(kfilnd_peer_is_new_peer(tn->peer) == false); + LASSERT(kfilnd_peer_is_new_peer(tn->tn_kp) == false); msg = tn->tn_rx_msg.msg; /* Update the NID address with the new preferred RX context. */ - kfilnd_peer_alive(tn->peer); + kfilnd_peer_alive(tn->tn_kp); /* Pass message up to LNet * The TN will be reused in this call chain so we need to @@ -787,26 +787,27 @@ static int kfilnd_tn_state_idle(struct kfilnd_transaction *tn, switch (msg->type) { case KFILND_MSG_HELLO_REQ: - kfilnd_peer_update_rx_contexts(tn->peer, + kfilnd_peer_update_rx_contexts(tn->tn_kp, msg->proto.hello.rx_base, msg->proto.hello.rx_count); - kfilnd_peer_set_remote_session_key(tn->peer, + kfilnd_peer_set_remote_session_key(tn->tn_kp, msg->proto.hello.session_key); /* Negotiate kfilnd version used between peers. Fallback * to the minimum implemented kfilnd version. */ - kfilnd_peer_set_version(tn->peer, + kfilnd_peer_set_version(tn->tn_kp, min_t(__u16, KFILND_MSG_VERSION, msg->proto.hello.version)); KFILND_TN_DEBUG(tn, "Peer kfilnd version: %u; Local kfilnd version: %u; Negotiated kfilnd verions: %u", msg->proto.hello.version, - KFILND_MSG_VERSION, tn->peer->version); + KFILND_MSG_VERSION, + tn->tn_kp->kp_version); - tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->peer); + tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->tn_kp); KFILND_TN_DEBUG(tn, "Using peer %s(%#llx)", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr); kfilnd_tn_pack_hello_rsp(tn); @@ -820,14 +821,14 @@ static int kfilnd_tn_state_idle(struct kfilnd_transaction *tn, case -EAGAIN: KFILND_TN_DEBUG(tn, "Need to replay send to %s(%#llx)", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr); return -EAGAIN; default: KFILND_TN_ERROR(tn, "Failed to post send to %s(%#llx): rc=%d", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr, rc); kfilnd_tn_status_update(tn, rc, LNET_MSG_STATUS_LOCAL_ERROR); @@ -836,12 +837,12 @@ static int kfilnd_tn_state_idle(struct kfilnd_transaction *tn, case KFILND_MSG_HELLO_RSP: rc = 0; - kfilnd_peer_update_rx_contexts(tn->peer, + kfilnd_peer_update_rx_contexts(tn->tn_kp, msg->proto.hello.rx_base, msg->proto.hello.rx_count); - kfilnd_peer_set_remote_session_key(tn->peer, + kfilnd_peer_set_remote_session_key(tn->tn_kp, msg->proto.hello.session_key); - kfilnd_peer_set_version(tn->peer, + kfilnd_peer_set_version(tn->tn_kp, msg->proto.hello.version); KFILND_TN_DEBUG(tn, "Negotiated kfilnd version: %u", msg->proto.hello.version); @@ -887,11 +888,11 @@ static int kfilnd_tn_state_imm_send(struct kfilnd_transaction *tn, hstatus = LNET_MSG_STATUS_REMOTE_ERROR; kfilnd_tn_status_update(tn, status, hstatus); - kfilnd_peer_down(tn->peer); + kfilnd_peer_down(tn->tn_kp); break; case TN_EVENT_TX_OK: - kfilnd_peer_alive(tn->peer); + kfilnd_peer_alive(tn->tn_kp); break; default: @@ -932,10 +933,10 @@ static int kfilnd_tn_state_imm_recv(struct kfilnd_transaction *tn, /* Update the KFI address to use the response RX context. */ tn->tn_target_addr = - kfi_rx_addr(KFILND_BASE_ADDR(tn->peer->addr), + kfi_rx_addr(KFILND_BASE_ADDR(tn->tn_kp->kp_addr), tn->tn_response_rx, KFILND_FAB_RX_CTX_BITS); KFILND_TN_DEBUG(tn, "Using peer %s(0x%llx)", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr); /* Initiate the RMA operation to push/pull the LNet payload or @@ -962,7 +963,7 @@ static int kfilnd_tn_state_imm_recv(struct kfilnd_transaction *tn, KFILND_TN_DEBUG(tn, "Need to replay tagged %s to %s(%#llx)", tn->sink_buffer ? "read" : "write", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr); return -EAGAIN; @@ -970,7 +971,7 @@ static int kfilnd_tn_state_imm_recv(struct kfilnd_transaction *tn, KFILND_TN_ERROR(tn, "Failed to post tagged %s to %s(%#llx): rc=%d", tn->sink_buffer ? "read" : "write", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr, rc); kfilnd_tn_status_update(tn, rc, LNET_MSG_STATUS_LOCAL_ERROR); @@ -1006,14 +1007,14 @@ static int kfilnd_tn_state_imm_recv(struct kfilnd_transaction *tn, case -EAGAIN: KFILND_TN_DEBUG(tn, "Need to replay tagged send to %s(%#llx)", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr); return -EAGAIN; default: KFILND_TN_ERROR(tn, "Failed to post tagged send to %s(%#llx): rc=%d", - libcfs_nid2str(tn->peer->nid), + libcfs_nid2str(tn->tn_kp->kp_nid), tn->tn_target_addr, rc); kfilnd_tn_status_update(tn, rc, LNET_MSG_STATUS_LOCAL_ERROR); @@ -1051,7 +1052,7 @@ static int kfilnd_tn_state_wait_comp(struct kfilnd_transaction *tn, switch (event) { case TN_EVENT_TX_OK: - kfilnd_peer_alive(tn->peer); + kfilnd_peer_alive(tn->tn_kp); kfilnd_tn_timeout_enable(tn); kfilnd_tn_state_change(tn, TN_STATE_WAIT_TAG_COMP); break; @@ -1067,7 +1068,7 @@ static int kfilnd_tn_state_wait_comp(struct kfilnd_transaction *tn, hstatus = LNET_MSG_STATUS_REMOTE_ERROR; kfilnd_tn_status_update(tn, status, hstatus); - kfilnd_peer_down(tn->peer); + kfilnd_peer_down(tn->tn_kp); /* Need to cancel the tagged receive to prevent resources from * being leaked. @@ -1120,7 +1121,7 @@ static int kfilnd_tn_state_wait_send_comp(struct kfilnd_transaction *tn, status); if (event == TN_EVENT_TX_OK) { - kfilnd_peer_alive(tn->peer); + kfilnd_peer_alive(tn->tn_kp); kfilnd_tn_finalize(tn, tn_released); } else { KFILND_TN_ERROR(tn, "Invalid %s event", tn_event_to_str(event)); @@ -1141,7 +1142,7 @@ static int kfilnd_tn_state_wait_tag_rma_comp(struct kfilnd_transaction *tn, switch (event) { case TN_EVENT_TAG_TX_OK: - kfilnd_peer_alive(tn->peer); + kfilnd_peer_alive(tn->tn_kp); break; case TN_EVENT_TAG_TX_FAIL: @@ -1151,7 +1152,7 @@ static int kfilnd_tn_state_wait_tag_rma_comp(struct kfilnd_transaction *tn, hstatus = LNET_MSG_STATUS_REMOTE_ERROR; kfilnd_tn_status_update(tn, status, hstatus); - kfilnd_peer_down(tn->peer); + kfilnd_peer_down(tn->tn_kp); break; default: @@ -1232,11 +1233,11 @@ static int kfilnd_tn_state_wait_tag_comp(struct kfilnd_transaction *tn, hstatus = LNET_MSG_STATUS_REMOTE_ERROR; kfilnd_tn_status_update(tn, status, hstatus); - kfilnd_peer_down(tn->peer); + kfilnd_peer_down(tn->tn_kp); break; case TN_EVENT_TAG_TX_OK: - kfilnd_peer_alive(tn->peer); + kfilnd_peer_alive(tn->tn_kp); break; default: @@ -1258,11 +1259,11 @@ static int kfilnd_tn_state_fail(struct kfilnd_transaction *tn, switch (event) { case TN_EVENT_TX_FAIL: - kfilnd_peer_down(tn->peer); + kfilnd_peer_down(tn->tn_kp); break; case TN_EVENT_TX_OK: - kfilnd_peer_alive(tn->peer); + kfilnd_peer_alive(tn->tn_kp); break; case TN_EVENT_TAG_RX_FAIL: @@ -1290,7 +1291,7 @@ static int kfilnd_tn_state_wait_timeout_tag_comp(struct kfilnd_transaction *tn, case TN_EVENT_TAG_RX_CANCEL: kfilnd_tn_status_update(tn, -ETIMEDOUT, LNET_MSG_STATUS_REMOTE_TIMEOUT); - kfilnd_peer_down(tn->peer); + kfilnd_peer_down(tn->tn_kp); break; case TN_EVENT_TAG_RX_FAIL: @@ -1466,9 +1467,9 @@ struct kfilnd_transaction *kfilnd_tn_alloc(struct kfilnd_dev *dev, int cpt, tn->tn_mr_key = rc; } - tn->peer = kfilnd_peer_get(dev, target_nid); - if (IS_ERR(tn->peer)) { - rc = PTR_ERR(tn->peer); + tn->tn_kp = kfilnd_peer_get(dev, target_nid); + if (IS_ERR(tn->tn_kp)) { + rc = PTR_ERR(tn->tn_kp); goto err_put_mr_key; } -- 1.8.3.1