};
struct kfilnd_peer {
- struct rhash_head node;
- struct rcu_head rcu_head;
- struct kfilnd_dev *dev;
- lnet_nid_t nid;
- kfi_addr_t addr;
- atomic_t rx_base;
- atomic_t remove_peer;
- refcount_t cnt;
- time64_t last_alive;
- u16 version;
- u32 local_session_key;
- u32 remote_session_key;
+ struct rhash_head kp_node;
+ struct rcu_head kp_rcu_head;
+ struct kfilnd_dev *kp_dev;
+ lnet_nid_t kp_nid;
+ kfi_addr_t kp_addr;
+ atomic_t kp_rx_base;
+ atomic_t kp_remove_peer;
+ refcount_t kp_cnt;
+ time64_t kp_last_alive;
+ u16 kp_version;
+ u32 kp_local_session_key;
+ u32 kp_remote_session_key;
};
-static inline bool kfilnd_peer_is_new_peer(struct kfilnd_peer *peer)
+static inline bool kfilnd_peer_is_new_peer(struct kfilnd_peer *kp)
{
- return peer->version == 0;
+ return kp->kp_version == 0;
}
-static inline void kfilnd_peer_set_version(struct kfilnd_peer *peer,
+static inline void kfilnd_peer_set_version(struct kfilnd_peer *kp,
u16 version)
{
- peer->version = version;
+ kp->kp_version = version;
}
-static inline void kfilnd_peer_set_remote_session_key(struct kfilnd_peer *peer,
+static inline void kfilnd_peer_set_remote_session_key(struct kfilnd_peer *kp,
u32 session_key)
{
- peer->remote_session_key = session_key;
+ kp->kp_remote_session_key = session_key;
}
struct kfilnd_fab {
(ep)->end_context_id, ##__VA_ARGS__)
#define KFILND_TN_PEER_VALID(tn) \
- !IS_ERR_OR_NULL((tn)->peer)
+ !IS_ERR_OR_NULL((tn)->tn_kp)
#define KFILND_TN_DIR_DEBUG(tn, fmt, dir, ...) \
CDEBUG(D_NET, "Transaction ID %p: %s:%u %s %s:%llu " fmt "\n", \
(tn), \
libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
(tn)->tn_ep->end_context_id, dir, \
- libcfs_nid2str((tn)->peer->nid), \
+ libcfs_nid2str((tn)->tn_kp->kp_nid), \
KFILND_TN_PEER_VALID(tn) ? \
- KFILND_RX_CONTEXT((tn)->peer->addr) : 0, \
+ KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
##__VA_ARGS__)
#define KFILND_TN_DEBUG(tn, fmt, ...) \
(tn), \
libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
(tn)->tn_ep->end_context_id, dir, \
- libcfs_nid2str((tn)->peer->nid), \
+ libcfs_nid2str((tn)->tn_kp->kp_nid), \
KFILND_TN_PEER_VALID(tn) ? \
- KFILND_RX_CONTEXT((tn)->peer->addr) : 0, \
+ KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
##__VA_ARGS__)
#define KFILND_TN_ERROR(tn, fmt, ...) \
/* Transaction send message and target address. */
kfi_addr_t tn_target_addr;
- struct kfilnd_peer *peer;
+ struct kfilnd_peer *tn_kp;
struct kfilnd_transaction_msg tn_tx_msg;
/* Transaction multi-receive buffer and associated receive message. */
#include "kfilnd_dev.h"
static const struct rhashtable_params peer_cache_params = {
- .head_offset = offsetof(struct kfilnd_peer, node),
- .key_offset = offsetof(struct kfilnd_peer, nid),
- .key_len = sizeof_field(struct kfilnd_peer, nid),
+ .head_offset = offsetof(struct kfilnd_peer, kp_node),
+ .key_offset = offsetof(struct kfilnd_peer, kp_nid),
+ .key_len = sizeof_field(struct kfilnd_peer, kp_nid),
.automatic_shrinking = true,
};
*/
static void kfilnd_peer_free(void *ptr, void *arg)
{
- struct kfilnd_peer *peer = ptr;
+ struct kfilnd_peer *kp = ptr;
CDEBUG(D_NET, "%s(0x%llx) peer entry freed\n",
- libcfs_nid2str(peer->nid), peer->addr);
+ libcfs_nid2str(kp->kp_nid), kp->kp_addr);
- kfi_av_remove(peer->dev->kfd_av, &peer->addr, 1, 0);
+ kfi_av_remove(kp->kp_dev->kfd_av, &kp->kp_addr, 1, 0);
- kfree_rcu(peer, rcu_head);
+ kfree_rcu(kp, kp_rcu_head);
}
/**
* kfilnd_peer_down() - Mark a peer as down.
- * @peer: Peer to be downed.
+ * @kp: Peer to be downed.
*/
-void kfilnd_peer_down(struct kfilnd_peer *peer)
+void kfilnd_peer_down(struct kfilnd_peer *kp)
{
- if (atomic_cmpxchg(&peer->remove_peer, 0, 1) == 0) {
+ if (atomic_cmpxchg(&kp->kp_remove_peer, 0, 1) == 0) {
struct lnet_nid peer_nid;
- lnet_nid4_to_nid(peer->nid, &peer_nid);
+ lnet_nid4_to_nid(kp->kp_nid, &peer_nid);
CDEBUG(D_NET, "%s(0x%llx) marked for removal from peer cache\n",
- libcfs_nidstr(&peer_nid), peer->addr);
+ libcfs_nidstr(&peer_nid), kp->kp_addr);
- lnet_notify(peer->dev->kfd_ni, &peer_nid, false, false,
- peer->last_alive);
+ lnet_notify(kp->kp_dev->kfd_ni, &peer_nid, false, false,
+ kp->kp_last_alive);
}
}
/**
* kfilnd_peer_put() - Return a reference for a peer.
- * @peer: Peer where the reference should be returned.
+ * @kp: Peer where the reference should be returned.
*/
-void kfilnd_peer_put(struct kfilnd_peer *peer)
+void kfilnd_peer_put(struct kfilnd_peer *kp)
{
rcu_read_lock();
/* Return allocation reference if the peer was marked for removal. */
- if (atomic_cmpxchg(&peer->remove_peer, 1, 2) == 1) {
- rhashtable_remove_fast(&peer->dev->peer_cache, &peer->node,
+ if (atomic_cmpxchg(&kp->kp_remove_peer, 1, 2) == 1) {
+ rhashtable_remove_fast(&kp->kp_dev->peer_cache, &kp->kp_node,
peer_cache_params);
- refcount_dec(&peer->cnt);
+ refcount_dec(&kp->kp_cnt);
CDEBUG(D_NET, "%s(0x%llx) removed from peer cache\n",
- libcfs_nid2str(peer->nid), peer->addr);
+ libcfs_nid2str(kp->kp_nid), kp->kp_addr);
}
- if (refcount_dec_and_test(&peer->cnt))
- kfilnd_peer_free(peer, NULL);
+ if (refcount_dec_and_test(&kp->kp_cnt))
+ kfilnd_peer_free(kp, NULL);
rcu_read_unlock();
}
-u16 kfilnd_peer_target_rx_base(struct kfilnd_peer *peer)
+u16 kfilnd_peer_target_rx_base(struct kfilnd_peer *kp)
{
- int cpt = lnet_cpt_of_nid(peer->nid, peer->dev->kfd_ni);
- struct kfilnd_ep *ep = peer->dev->cpt_to_endpoint[cpt];
+ int cpt = lnet_cpt_of_nid(kp->kp_nid, kp->kp_dev->kfd_ni);
+ struct kfilnd_ep *ep = kp->kp_dev->cpt_to_endpoint[cpt];
return ep->end_context_id;
}
int rc;
u32 nid_addr = LNET_NIDADDR(nid);
u32 net_num = LNET_NETNUM(LNET_NIDNET(nid));
- struct kfilnd_peer *peer;
+ struct kfilnd_peer *kp;
struct kfilnd_peer *clash_peer;
again:
/* Check the cache for a match. */
rcu_read_lock();
- peer = rhashtable_lookup_fast(&dev->peer_cache, &nid,
+ kp = rhashtable_lookup_fast(&dev->peer_cache, &nid,
peer_cache_params);
- if (peer && !refcount_inc_not_zero(&peer->cnt))
- peer = NULL;
+ if (kp && !refcount_inc_not_zero(&kp->kp_cnt))
+ kp = NULL;
rcu_read_unlock();
- if (peer)
- return peer;
+ if (kp)
+ return kp;
/* Allocate a new peer for the cache. */
- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
- if (!peer) {
+ kp = kzalloc(sizeof(*kp), GFP_KERNEL);
+ if (!kp) {
rc = -ENOMEM;
goto err;
}
/* Use the KFI address vector to translate node and service string into
* a KFI address handle.
*/
- rc = kfi_av_insertsvc(dev->kfd_av, node, service, &peer->addr, 0, dev);
+ rc = kfi_av_insertsvc(dev->kfd_av, node, service, &kp->kp_addr, 0, dev);
kfree(service);
kfree(node);
goto err_free_peer;
}
- peer->dev = dev;
- peer->nid = nid;
- atomic_set(&peer->rx_base, 0);
- atomic_set(&peer->remove_peer, 0);
- peer->local_session_key = kfilnd_dev_get_session_key(dev);
+ kp->kp_dev = dev;
+ kp->kp_nid = nid;
+ atomic_set(&kp->kp_rx_base, 0);
+ atomic_set(&kp->kp_remove_peer, 0);
+ kp->kp_local_session_key = kfilnd_dev_get_session_key(dev);
/* One reference for the allocation and another for get operation
* performed for this peer. The allocation reference is returned when
* the entry is marked for removal.
*/
- refcount_set(&peer->cnt, 2);
+ refcount_set(&kp->kp_cnt, 2);
clash_peer = rhashtable_lookup_get_insert_fast(&dev->peer_cache,
- &peer->node,
+ &kp->kp_node,
peer_cache_params);
if (clash_peer) {
- kfi_av_remove(dev->kfd_av, &peer->addr, 1, 0);
- kfree(peer);
+ kfi_av_remove(dev->kfd_av, &kp->kp_addr, 1, 0);
+ kfree(kp);
if (IS_ERR(clash_peer)) {
rc = PTR_ERR(clash_peer);
}
}
- kfilnd_peer_alive(peer);
+ kfilnd_peer_alive(kp);
CDEBUG(D_NET, "%s(0x%llx) peer entry allocated\n",
- libcfs_nid2str(peer->nid), peer->addr);
+ libcfs_nid2str(kp->kp_nid), kp->kp_addr);
- return peer;
+ return kp;
err_free_node_str:
kfree(node);
err_free_peer:
- kfree(peer);
+ kfree(kp);
err:
return ERR_PTR(rc);
}
/**
* kfilnd_peer_get_kfi_addr() - Return kfi_addr_t used for eager untagged send
* kfi operations.
- * @peer: Peer struct.
+ * @kp: Peer struct.
*
* The returned kfi_addr_t is updated to target a specific RX context. The
* address return by this function should not be used if a specific RX context
*
* Return: kfi_addr_t.
*/
-kfi_addr_t kfilnd_peer_get_kfi_addr(struct kfilnd_peer *peer)
+kfi_addr_t kfilnd_peer_get_kfi_addr(struct kfilnd_peer *kp)
{
/* TODO: Support RX count by round-robining the generated kfi_addr_t's
* across multiple RX contexts using RX base and RX count.
*/
- return kfi_rx_addr(KFILND_BASE_ADDR(peer->addr),
- atomic_read(&peer->rx_base), KFILND_FAB_RX_CTX_BITS);
+ return kfi_rx_addr(KFILND_BASE_ADDR(kp->kp_addr),
+ atomic_read(&kp->kp_rx_base),
+ KFILND_FAB_RX_CTX_BITS);
}
/**
* kfilnd_peer_update_rx_contexts() - Update the RX context for a peer.
- * @peer: Peer to be updated.
+ * @kp: Peer to be updated.
* @rx_base: New RX base for peer.
* @rx_count: New RX count for peer.
*/
-void kfilnd_peer_update_rx_contexts(struct kfilnd_peer *peer,
+void kfilnd_peer_update_rx_contexts(struct kfilnd_peer *kp,
unsigned int rx_base, unsigned int rx_count)
{
/* TODO: Support RX count. */
LASSERT(rx_count > 0);
- atomic_set(&peer->rx_base, rx_base);
+ atomic_set(&kp->kp_rx_base, rx_base);
}
/**
* kfilnd_peer_alive() - Update when the peer was last alive.
- * @peer: Peer to be updated.
+ * @kp: Peer to be updated.
*/
-void kfilnd_peer_alive(struct kfilnd_peer *peer)
+void kfilnd_peer_alive(struct kfilnd_peer *kp)
{
- peer->last_alive = ktime_get_seconds();
+ kp->kp_last_alive = ktime_get_seconds();
/* Ensure timestamp is committed to memory before used. */
smp_mb();
/* Pack the protocol header and payload. */
msg->proto.hello.version = KFILND_MSG_VERSION;
- msg->proto.hello.rx_base = kfilnd_peer_target_rx_base(tn->peer);
- msg->proto.hello.session_key = tn->peer->local_session_key;
+ msg->proto.hello.rx_base = kfilnd_peer_target_rx_base(tn->tn_kp);
+ msg->proto.hello.session_key = tn->tn_kp->kp_local_session_key;
/* TODO: Support multiple RX contexts per peer. */
msg->proto.hello.rx_count = 1;
offsetof(struct kfilnd_msg, proto);
msg->cksum = NO_CHECKSUM;
msg->srcnid = lnet_nid_to_nid4(&tn->tn_ep->end_dev->kfd_ni->ni_nid);
- msg->dstnid = tn->peer->nid;
+ msg->dstnid = tn->tn_kp->kp_nid;
/* Checksum entire message. */
msg->cksum = kfilnd_tn_cksum(msg, msg->nob);
struct kfilnd_msg *msg = tn->tn_tx_msg.msg;
/* Pack the protocol header and payload. */
- msg->proto.hello.version = tn->peer->version;
- msg->proto.hello.rx_base = kfilnd_peer_target_rx_base(tn->peer);
- msg->proto.hello.session_key = tn->peer->local_session_key;
+ msg->proto.hello.version = tn->tn_kp->kp_version;
+ msg->proto.hello.rx_base = kfilnd_peer_target_rx_base(tn->tn_kp);
+ msg->proto.hello.session_key = tn->tn_kp->kp_local_session_key;
/* TODO: Support multiple RX contexts per peer. */
msg->proto.hello.rx_count = 1;
offsetof(struct kfilnd_msg, proto);
msg->cksum = NO_CHECKSUM;
msg->srcnid = lnet_nid_to_nid4(&tn->tn_ep->end_dev->kfd_ni->ni_nid);
- msg->dstnid = tn->peer->nid;
+ msg->dstnid = tn->tn_kp->kp_nid;
/* Checksum entire message. */
msg->cksum = kfilnd_tn_cksum(msg, msg->nob);
offsetof(struct kfilnd_msg, proto);
msg->cksum = NO_CHECKSUM;
msg->srcnid = lnet_nid_to_nid4(&tn->tn_ep->end_dev->kfd_ni->ni_nid);
- msg->dstnid = tn->peer->nid;
+ msg->dstnid = tn->tn_kp->kp_nid;
/* Checksum entire message. */
msg->cksum = kfilnd_tn_cksum(msg, msg->nob);
msg->nob = offsetof(struct kfilnd_msg, proto.immed.payload[tn->tn_nob]);
msg->cksum = NO_CHECKSUM;
msg->srcnid = lnet_nid_to_nid4(&tn->tn_ep->end_dev->kfd_ni->ni_nid);
- msg->dstnid = tn->peer->nid;
+ msg->dstnid = tn->tn_kp->kp_nid;
/* Checksum entire message. */
msg->cksum = kfilnd_tn_cksum(msg, msg->nob);
}
if (KFILND_TN_PEER_VALID(tn))
- kfilnd_peer_put(tn->peer);
+ kfilnd_peer_put(tn->tn_kp);
kfilnd_tn_record_state_change(tn);
kfilnd_tn_record_duration(tn);
switch (event) {
case TN_EVENT_INIT_BULK:
- tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->peer);
+ tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->tn_kp);
KFILND_TN_DEBUG(tn, "Using peer %s(%#llx)",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr);
kfilnd_tn_pack_bulk_req(tn);
case -EAGAIN:
KFILND_TN_DEBUG(tn,
"Need to replay post send to %s(%#llx)",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr);
return -EAGAIN;
default:
KFILND_TN_ERROR(tn,
"Failed to post send to %s(%#llx): rc=%d",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr, rc);
kfilnd_tn_status_update(tn, rc,
LNET_MSG_STATUS_LOCAL_ERROR);
/* For new peers, send a hello request message and queue the true LNet
* message for replay.
*/
- if (kfilnd_peer_is_new_peer(tn->peer) &&
+ if (kfilnd_peer_is_new_peer(tn->tn_kp) &&
(event == TN_EVENT_INIT_IMMEDIATE || event == TN_EVENT_INIT_BULK)) {
remaining_time = max_t(ktime_t, 0,
tn->deadline - ktime_get_seconds());
*/
if (remaining_time > 0) {
KFILND_TN_DEBUG(tn, "%s hello response pending",
- libcfs_nid2str(tn->peer->nid));
+ libcfs_nid2str(tn->tn_kp->kp_nid));
return -EAGAIN;
}
switch (event) {
case TN_EVENT_INIT_IMMEDIATE:
case TN_EVENT_TX_HELLO:
- tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->peer);
+ tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->tn_kp);
KFILND_TN_DEBUG(tn, "Using peer %s(%#llx)",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr);
if (event == TN_EVENT_INIT_IMMEDIATE)
*/
case -EAGAIN:
KFILND_TN_DEBUG(tn, "Need to replay send to %s(%#llx)",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr);
return -EAGAIN;
default:
KFILND_TN_ERROR(tn,
"Failed to post send to %s(%#llx): rc=%d",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr, rc);
kfilnd_tn_status_update(tn, rc,
LNET_MSG_STATUS_LOCAL_ERROR);
* requires dropping the incoming message and initiating a hello
* handshake.
*/
- if (kfilnd_peer_is_new_peer(tn->peer)) {
+ if (kfilnd_peer_is_new_peer(tn->tn_kp)) {
rc = kfilnd_send_hello_request(tn->tn_ep->end_dev,
tn->tn_ep->end_cpt,
- tn->peer->nid);
+ tn->tn_kp->kp_nid);
if (rc)
KFILND_TN_ERROR(tn,
"Failed to send hello request: rc=%d",
*/
KFILND_TN_ERROR(tn,
"Dropping message from %s due to stale peer",
- libcfs_nid2str(tn->peer->nid));
+ libcfs_nid2str(tn->tn_kp->kp_nid));
kfilnd_tn_status_update(tn, -EPROTO,
LNET_MSG_STATUS_LOCAL_DROPPED);
rc = 0;
goto out;
}
- LASSERT(kfilnd_peer_is_new_peer(tn->peer) == false);
+ LASSERT(kfilnd_peer_is_new_peer(tn->tn_kp) == false);
msg = tn->tn_rx_msg.msg;
/* Update the NID address with the new preferred RX context. */
- kfilnd_peer_alive(tn->peer);
+ kfilnd_peer_alive(tn->tn_kp);
/* Pass message up to LNet
* The TN will be reused in this call chain so we need to
switch (msg->type) {
case KFILND_MSG_HELLO_REQ:
- kfilnd_peer_update_rx_contexts(tn->peer,
+ kfilnd_peer_update_rx_contexts(tn->tn_kp,
msg->proto.hello.rx_base,
msg->proto.hello.rx_count);
- kfilnd_peer_set_remote_session_key(tn->peer,
+ kfilnd_peer_set_remote_session_key(tn->tn_kp,
msg->proto.hello.session_key);
/* Negotiate kfilnd version used between peers. Fallback
* to the minimum implemented kfilnd version.
*/
- kfilnd_peer_set_version(tn->peer,
+ kfilnd_peer_set_version(tn->tn_kp,
min_t(__u16, KFILND_MSG_VERSION,
msg->proto.hello.version));
KFILND_TN_DEBUG(tn,
"Peer kfilnd version: %u; Local kfilnd version: %u; Negotiated kfilnd verions: %u",
msg->proto.hello.version,
- KFILND_MSG_VERSION, tn->peer->version);
+ KFILND_MSG_VERSION,
+ tn->tn_kp->kp_version);
- tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->peer);
+ tn->tn_target_addr = kfilnd_peer_get_kfi_addr(tn->tn_kp);
KFILND_TN_DEBUG(tn, "Using peer %s(%#llx)",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr);
kfilnd_tn_pack_hello_rsp(tn);
case -EAGAIN:
KFILND_TN_DEBUG(tn, "Need to replay send to %s(%#llx)",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr);
return -EAGAIN;
default:
KFILND_TN_ERROR(tn,
"Failed to post send to %s(%#llx): rc=%d",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr, rc);
kfilnd_tn_status_update(tn, rc,
LNET_MSG_STATUS_LOCAL_ERROR);
case KFILND_MSG_HELLO_RSP:
rc = 0;
- kfilnd_peer_update_rx_contexts(tn->peer,
+ kfilnd_peer_update_rx_contexts(tn->tn_kp,
msg->proto.hello.rx_base,
msg->proto.hello.rx_count);
- kfilnd_peer_set_remote_session_key(tn->peer,
+ kfilnd_peer_set_remote_session_key(tn->tn_kp,
msg->proto.hello.session_key);
- kfilnd_peer_set_version(tn->peer,
+ kfilnd_peer_set_version(tn->tn_kp,
msg->proto.hello.version);
KFILND_TN_DEBUG(tn, "Negotiated kfilnd version: %u",
msg->proto.hello.version);
hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
kfilnd_tn_status_update(tn, status, hstatus);
- kfilnd_peer_down(tn->peer);
+ kfilnd_peer_down(tn->tn_kp);
break;
case TN_EVENT_TX_OK:
- kfilnd_peer_alive(tn->peer);
+ kfilnd_peer_alive(tn->tn_kp);
break;
default:
/* Update the KFI address to use the response RX context. */
tn->tn_target_addr =
- kfi_rx_addr(KFILND_BASE_ADDR(tn->peer->addr),
+ kfi_rx_addr(KFILND_BASE_ADDR(tn->tn_kp->kp_addr),
tn->tn_response_rx, KFILND_FAB_RX_CTX_BITS);
KFILND_TN_DEBUG(tn, "Using peer %s(0x%llx)",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr);
/* Initiate the RMA operation to push/pull the LNet payload or
KFILND_TN_DEBUG(tn,
"Need to replay tagged %s to %s(%#llx)",
tn->sink_buffer ? "read" : "write",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr);
return -EAGAIN;
KFILND_TN_ERROR(tn,
"Failed to post tagged %s to %s(%#llx): rc=%d",
tn->sink_buffer ? "read" : "write",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr, rc);
kfilnd_tn_status_update(tn, rc,
LNET_MSG_STATUS_LOCAL_ERROR);
case -EAGAIN:
KFILND_TN_DEBUG(tn,
"Need to replay tagged send to %s(%#llx)",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr);
return -EAGAIN;
default:
KFILND_TN_ERROR(tn,
"Failed to post tagged send to %s(%#llx): rc=%d",
- libcfs_nid2str(tn->peer->nid),
+ libcfs_nid2str(tn->tn_kp->kp_nid),
tn->tn_target_addr, rc);
kfilnd_tn_status_update(tn, rc,
LNET_MSG_STATUS_LOCAL_ERROR);
switch (event) {
case TN_EVENT_TX_OK:
- kfilnd_peer_alive(tn->peer);
+ kfilnd_peer_alive(tn->tn_kp);
kfilnd_tn_timeout_enable(tn);
kfilnd_tn_state_change(tn, TN_STATE_WAIT_TAG_COMP);
break;
hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
kfilnd_tn_status_update(tn, status, hstatus);
- kfilnd_peer_down(tn->peer);
+ kfilnd_peer_down(tn->tn_kp);
/* Need to cancel the tagged receive to prevent resources from
* being leaked.
status);
if (event == TN_EVENT_TX_OK) {
- kfilnd_peer_alive(tn->peer);
+ kfilnd_peer_alive(tn->tn_kp);
kfilnd_tn_finalize(tn, tn_released);
} else {
KFILND_TN_ERROR(tn, "Invalid %s event", tn_event_to_str(event));
switch (event) {
case TN_EVENT_TAG_TX_OK:
- kfilnd_peer_alive(tn->peer);
+ kfilnd_peer_alive(tn->tn_kp);
break;
case TN_EVENT_TAG_TX_FAIL:
hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
kfilnd_tn_status_update(tn, status, hstatus);
- kfilnd_peer_down(tn->peer);
+ kfilnd_peer_down(tn->tn_kp);
break;
default:
hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
kfilnd_tn_status_update(tn, status, hstatus);
- kfilnd_peer_down(tn->peer);
+ kfilnd_peer_down(tn->tn_kp);
break;
case TN_EVENT_TAG_TX_OK:
- kfilnd_peer_alive(tn->peer);
+ kfilnd_peer_alive(tn->tn_kp);
break;
default:
switch (event) {
case TN_EVENT_TX_FAIL:
- kfilnd_peer_down(tn->peer);
+ kfilnd_peer_down(tn->tn_kp);
break;
case TN_EVENT_TX_OK:
- kfilnd_peer_alive(tn->peer);
+ kfilnd_peer_alive(tn->tn_kp);
break;
case TN_EVENT_TAG_RX_FAIL:
case TN_EVENT_TAG_RX_CANCEL:
kfilnd_tn_status_update(tn, -ETIMEDOUT,
LNET_MSG_STATUS_REMOTE_TIMEOUT);
- kfilnd_peer_down(tn->peer);
+ kfilnd_peer_down(tn->tn_kp);
break;
case TN_EVENT_TAG_RX_FAIL:
tn->tn_mr_key = rc;
}
- tn->peer = kfilnd_peer_get(dev, target_nid);
- if (IS_ERR(tn->peer)) {
- rc = PTR_ERR(tn->peer);
+ tn->tn_kp = kfilnd_peer_get(dev, target_nid);
+ if (IS_ERR(tn->tn_kp)) {
+ rc = PTR_ERR(tn->tn_kp);
goto err_put_mr_key;
}