4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright 2022 Hewlett Packard Enterprise Development LP
26 * This file is part of Lustre, http://www.lustre.org/
29 * kfilnd main interface.
35 #include <linux/version.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/kthread.h>
40 #include <linux/string.h>
41 #include <linux/stat.h>
42 #include <linux/errno.h>
43 #include <linux/unistd.h>
44 #include <linux/uio.h>
45 #include <linux/rwsem.h>
46 #include <linux/mutex.h>
47 #include <linux/rhashtable.h>
48 #include <linux/workqueue.h>
49 #include <linux/debugfs.h>
50 #include <linux/seq_file.h>
51 #include <linux/ktime.h>
53 #include <asm/uaccess.h>
56 #include <linux/init.h>
58 #include <linux/file.h>
59 #include <linux/stat.h>
60 #include <linux/list.h>
61 #include <linux/kmod.h>
62 #include <linux/sysctl.h>
63 #include <linux/pci.h>
68 #define KFILND_VERSION "0.2.0"
70 #define DEBUG_SUBSYSTEM S_LND
72 #include <libcfs/libcfs.h>
73 #include <libcfs/linux/linux-net.h>
74 #include <lnet/lib-lnet.h>
75 #include "kfi_endpoint.h"
76 #include "kfi_errno.h"
78 #include "kfi_tagged.h"
79 #include "kfi_cxi_ext.h"
81 /* KFILND CFS fail range 0xF100 - 0xF1FF. */
83 #define CFS_KFI_FAIL_SEND_EVENT 0xF100
84 #define CFS_KFI_FAIL_READ_EVENT 0xF101
85 #define CFS_KFI_FAIL_WRITE_EVENT 0xF102
86 #define CFS_KFI_FAIL_TAGGED_SEND_EVENT 0xF103
87 #define CFS_KFI_FAIL_TAGGED_RECV_EVENT 0xF104
88 #define CFS_KFI_FAIL_BULK_TIMEOUT 0xF105
89 #define CFS_KFI_FAIL_SEND 0xF106
90 #define CFS_KFI_FAIL_READ 0xF107
91 #define CFS_KFI_FAIL_WRITE 0xF108
92 #define CFS_KFI_FAIL_TAGGED_SEND 0xF109
93 #define CFS_KFI_FAIL_TAGGED_RECV 0xF10A
94 #define CFS_KFI_FAIL_SEND_EAGAIN 0xF10B
95 #define CFS_KFI_FAIL_READ_EAGAIN 0xF10C
96 #define CFS_KFI_FAIL_WRITE_EAGAIN 0xF10D
97 #define CFS_KFI_FAIL_TAGGED_SEND_EAGAIN 0xF10E
98 #define CFS_KFI_FAIL_TAGGED_RECV_EAGAIN 0xF10F
99 #define CFS_KFI_FAIL_TAGGED_RECV_CANCEL_EAGAIN 0xF110
100 #define CFS_KFI_FAIL_RECV_EAGAIN 0xF111
101 #define CFS_KFI_FAIL_RECV 0xF112
102 #define CFS_KFI_FAIL_MSG_UNPACK 0xF113
103 #define CFS_KFI_FAIL_MSG_TYPE 0xF114
104 #define CFS_KFI_FAIL_WAIT_SEND_COMP1 0xF115
105 #define CFS_KFI_FAIL_WAIT_SEND_COMP2 0xF116
107 /* Maximum number of transaction keys supported. */
108 #define KFILND_EP_KEY_BITS 16U
109 #define KFILND_EP_KEY_MAX (BIT(KFILND_EP_KEY_BITS) - 1)
111 /* Some constants which should be turned into tunables */
112 #define KFILND_IMMEDIATE_MSG_SIZE 4096
114 #define KFILND_MY_PROCID 49152
116 /* 256 Rx contexts max */
117 #define KFILND_FAB_RX_CTX_BITS 8
119 /* Get the KFI base address from a KFI RX address. RX context information is
120 * stored in the MSBs of the KFI address.
122 #define KFILND_BASE_ADDR(addr) \
123 ((addr) & ((1UL << (64 - KFILND_FAB_RX_CTX_BITS)) - 1))
125 #define MIN_DURATION_RESET 0x7fffffffffffffffLL
127 /* States used by all kfilnd structures */
128 enum kfilnd_object_states {
129 KFILND_STATE_UNINITIALIZED,
130 KFILND_STATE_INITIALIZED,
131 KFILND_STATE_SHUTTING_DOWN
134 enum kfilnd_ni_lnd_tunables_attr {
135 LNET_NET_KFILND_TUNABLES_ATTR_UNSPEC = 0,
137 LNET_NET_KFILND_TUNABLES_ATTR_PROV_MAJOR,
138 LNET_NET_KFILND_TUNABLES_ATTR_PROV_MINOR,
139 LNET_NET_KFILND_TUNABLES_ATTR_AUTH_KEY,
140 LNET_NET_KFILND_TUNABLES_ATTR_TRAFFIC_CLASS,
141 __LNET_NET_KFILND_TUNABLES_ATTR_MAX_PLUS_ONE,
144 #define LNET_NET_KFILND_TUNABLES_ATTR_MAX (__LNET_NET_KFILND_TUNABLES_ATTR_MAX_PLUS_ONE - 1)
146 extern struct dentry *kfilnd_debug_dir;
147 extern const struct file_operations kfilnd_initiator_state_stats_file_ops;
148 extern const struct file_operations kfilnd_target_state_stats_file_ops;
149 extern const struct file_operations kfilnd_target_stats_file_ops;
150 extern const struct file_operations kfilnd_initiator_stats_file_ops;
151 extern const struct file_operations kfilnd_reset_stats_file_ops;
153 extern struct workqueue_struct *kfilnd_wq;
155 extern unsigned int cksum;
156 extern unsigned int tx_scale_factor;
157 extern unsigned int rx_cq_scale_factor;
158 extern unsigned int tx_cq_scale_factor;
159 extern unsigned int eq_size;
160 extern unsigned int immediate_rx_buf_count;
162 int kfilnd_tunables_setup(struct lnet_ni *ni);
163 int kfilnd_tunables_init(void);
165 struct kfilnd_transaction;
169 /* Multi-receive buffers for immediate receives */
170 struct kfilnd_immediate_buffer {
172 size_t immed_buf_size;
173 struct page *immed_buf_page;
175 bool immed_no_repost;
176 struct list_head replay_entry;
177 struct kfilnd_ep *immed_end;
180 extern atomic_t kfilnd_rx_count;
184 struct kfilnd_cq_work {
185 struct kfilnd_cq *cq;
186 unsigned int work_cpu;
187 struct work_struct work;
191 struct kfilnd_ep *ep;
193 unsigned int cq_work_count;
194 struct kfilnd_cq_work cq_works[];
198 /* The contexts for this CPT */
199 struct kfid_ep *end_tx;
200 struct kfid_ep *end_rx;
202 /* Corresponding CQs */
203 struct kfilnd_cq *end_tx_cq;
204 struct kfilnd_cq *end_rx_cq;
206 /* Specific config values for this endpoint */
207 struct kfilnd_dev *end_dev;
211 /* List of transactions. */
212 struct list_head tn_list;
213 spinlock_t tn_list_lock;
216 struct list_head tn_replay;
217 struct list_head imm_buffer_replay;
218 spinlock_t replay_lock;
219 struct timer_list replay_timer;
220 struct work_struct replay_work;
221 atomic_t replay_count;
223 /* Key used to build the tag for tagged buffers. */
226 /* Pre-posted immediate buffers */
227 struct kfilnd_immediate_buffer end_immed_bufs[];
230 /* Newly allocated peer */
231 #define KP_STATE_NEW 0x1
232 /* Peer after successful hello handshake */
233 #define KP_STATE_UPTODATE 0x2
234 /* Peer experienced some sort of network failure */
235 #define KP_STATE_STALE 0x3
236 /* We suspect this peer is actually down or otherwise unreachable */
237 #define KP_STATE_DOWN 0x4
240 struct rhash_head kp_node;
241 struct rcu_head kp_rcu_head;
242 struct kfilnd_dev *kp_dev;
246 atomic_t kp_remove_peer;
248 time64_t kp_last_alive;
250 u32 kp_local_session_key;
251 u32 kp_remote_session_key;
252 atomic_t kp_hello_pending;
253 time64_t kp_hello_ts;
257 static inline bool kfilnd_peer_deleted(struct kfilnd_peer *kp)
259 return atomic_read(&kp->kp_remove_peer) > 0;
262 /* Sets kp_hello_sending
263 * Returns true if it was already set
264 * Returns false otherwise
266 static inline bool kfilnd_peer_set_check_hello_pending(struct kfilnd_peer *kp)
268 return (atomic_cmpxchg(&kp->kp_hello_pending, 0, 1) == 1);
271 static inline void kfilnd_peer_clear_hello_pending(struct kfilnd_peer *kp)
273 atomic_set(&kp->kp_hello_pending, 0);
276 static inline bool kfilnd_peer_is_new_peer(struct kfilnd_peer *kp)
278 return atomic_read(&kp->kp_state) == KP_STATE_NEW;
281 static inline bool kfilnd_peer_needs_throttle(struct kfilnd_peer *kp)
283 unsigned int kp_state = atomic_read(&kp->kp_state);
285 return (kp_state == KP_STATE_NEW || kp_state == KP_STATE_DOWN);
288 /* Peer needs hello if it is not up to date and there is not already a hello
291 * Called from the send path and the receive path. When called from send path
292 * we additionally consider the peer's last alive value, and proactively
293 * handshake peers that we haven't talked to in a while.
295 * If hello was sent more than LND timeout seconds ago, and we never received a
296 * response, then send another one.
298 static inline bool kfilnd_peer_needs_hello(struct kfilnd_peer *kp,
299 bool proactive_handshake)
301 if (atomic_read(&kp->kp_hello_pending) == 0) {
302 if (atomic_read(&kp->kp_state) != KP_STATE_UPTODATE)
304 else if (proactive_handshake &&
305 ktime_before(kp->kp_last_alive +
306 lnet_get_lnd_timeout() * 2,
307 ktime_get_seconds()))
309 } else if (ktime_before(kp->kp_hello_ts + lnet_get_lnd_timeout(),
310 ktime_get_seconds())) {
311 /* Sent hello but never received reply */
313 "No response from %s(%p):0x%llx after %lld\n",
314 libcfs_nid2str(kp->kp_nid), kp, kp->kp_addr,
315 ktime_sub(ktime_get_seconds(), kp->kp_hello_ts));
317 kfilnd_peer_clear_hello_pending(kp);
325 struct list_head entry;
326 struct list_head dom_list;
327 struct mutex dom_list_lock;
328 struct kfid_fabric *fabric;
333 struct list_head entry;
334 struct list_head dev_list;
336 struct kfilnd_fab *fab;
337 struct kfid_domain *domain;
341 /* Transaction States */
345 /* Shared initiator and target states. */
347 TN_STATE_WAIT_TAG_COMP,
349 /* Initiator immediate states. */
352 /* Initiator bulk states. */
353 TN_STATE_TAGGED_RECV_POSTED,
354 TN_STATE_SEND_FAILED,
356 TN_STATE_WAIT_TIMEOUT_COMP,
357 TN_STATE_WAIT_SEND_COMP,
358 TN_STATE_WAIT_TIMEOUT_TAG_COMP,
363 TN_STATE_WAIT_TAG_RMA_COMP,
365 /* Invalid max value. */
369 /* Base duration state stats. */
370 struct kfilnd_tn_duration_stat {
371 atomic64_t accumulated_duration;
372 atomic_t accumulated_count;
373 atomic64_t max_duration;
374 atomic64_t min_duration;
377 /* Transaction state stats group into 22 buckets. Bucket zero corresponds to
378 * LNet message size of 0 bytes and buckets 1 through 21 correspond to LNet
379 * message sizes of 1 to 1048576 bytes increasing by a power of 2. LNet message
380 * sizes are round up to the nearest power of 2.
382 #define KFILND_DATA_SIZE_BUCKETS 22U
383 #define KFILND_DATA_SIZE_MAX_SIZE (1U << (KFILND_DATA_SIZE_BUCKETS - 2))
384 struct kfilnd_tn_data_size_duration_stats {
385 struct kfilnd_tn_duration_stat data_size[KFILND_DATA_SIZE_BUCKETS];
388 static inline unsigned int kfilnd_msg_len_to_data_size_bucket(size_t size)
394 if (size >= KFILND_DATA_SIZE_MAX_SIZE)
395 return KFILND_DATA_SIZE_BUCKETS - 1;
397 /* Round size up to the nearest power of 2. */
402 return (unsigned int)bit;
405 /* One data size duraction state bucket for each transaction state. */
406 struct kfilnd_tn_state_data_size_duration_stats {
407 struct kfilnd_tn_data_size_duration_stats state[TN_STATE_MAX];
411 struct list_head kfd_list; /* chain on kfid_devs */
412 struct lnet_ni *kfd_ni;
413 enum kfilnd_object_states kfd_state;
415 /* KFI LND domain the device is associated with. */
416 struct kfilnd_dom *dom;
418 /* Fields specific to kfabric operation */
420 struct kfid_ep *kfd_sep;
421 struct kfid_av *kfd_av;
422 struct kfilnd_ep **kfd_endpoints;
424 /* Map of LNet NI CPTs to endpoints. */
425 struct kfilnd_ep **cpt_to_endpoint;
427 /* Hash of LNet NIDs to KFI addresses. */
428 struct rhashtable peer_cache;
430 /* Per LNet NI states. */
431 struct kfilnd_tn_state_data_size_duration_stats initiator_state_stats;
432 struct kfilnd_tn_state_data_size_duration_stats target_state_stats;
433 struct kfilnd_tn_data_size_duration_stats initiator_stats;
434 struct kfilnd_tn_data_size_duration_stats target_stats;
436 /* Per LNet NI debugfs stats. */
437 struct dentry *dev_dir;
438 struct dentry *initiator_state_stats_file;
439 struct dentry *initiator_stats_file;
440 struct dentry *target_state_stats_file;
441 struct dentry *target_stats_file;
442 struct dentry *reset_stats_file;
444 /* Physical NIC address. */
445 unsigned int nic_addr;
446 atomic_t session_keys;
449 /* Invalid checksum value is treated as no checksum. */
450 /* TODO: Module parameter to disable checksum? */
451 #define NO_CHECKSUM 0x0
453 /* Hello message header. */
454 struct kfilnd_hello_msg {
455 /* Support kfilnd version. */
458 /* Base RX context peer should used. */
461 /* Session key used by peer. */
464 /* RX context count peer can target. */
468 /* Immediate message header. */
469 struct kfilnd_immed_msg {
470 /* Entire LNet header needed by the destination to match incoming
473 struct lnet_hdr_nid4 hdr;
475 /* Entire LNet message payload. */
479 /* Bulk request message header. */
480 struct kfilnd_bulk_req_msg {
481 /* Entire LNet header needed by the destination to match incoming
484 struct lnet_hdr_nid4 hdr;
486 /* Specific RX context the target must target to push/pull LNet
491 /* Memory key needed by the target to push/pull LNet payload. */
495 /* Kfilnd message. Includes base transport header plus embedded protocol
499 /* Unique kfilnd magic. */
502 /* Version of the kfilnd protocol. */
505 /* Specific kfilnd protocol type. */
511 /* Number of bytes in message. */
514 /* Checksum of entire message. 0 is checksum disabled. */
517 /* Message LNet source NID. */
520 /* Message LNet target NID. */
523 /* Embedded protocol headers. Must remain at bottom. */
525 struct kfilnd_immed_msg immed;
526 struct kfilnd_bulk_req_msg bulk_req;
527 struct kfilnd_hello_msg hello;
531 #define KFILND_MSG_MAGIC LNET_PROTO_KFI_MAGIC /* unique magic */
533 #define KFILND_MSG_VERSION_1 0x1
534 #define KFILND_MSG_VERSION KFILND_MSG_VERSION_1
536 /* Get the KFI RX context from a KFI RX address. RX context information is
537 * stored in the MSBs of the KFI address.
539 #define KFILND_RX_CONTEXT(addr) ((addr) >> (64 - KFILND_FAB_RX_CTX_BITS))
541 #define KFILND_EP_DEBUG(ep, fmt, ...) \
542 CDEBUG(D_NET, "%s:%d " fmt "\n", \
543 libcfs_nidstr(&(ep)->end_dev->kfd_ni->ni_nid), \
544 (ep)->end_context_id, ##__VA_ARGS__)
546 #define KFILND_EP_ERROR(ep, fmt, ...) \
547 CNETERR("%s:%d " fmt "\n", \
548 libcfs_nidstr(&(ep)->end_dev->kfd_ni->ni_nid), \
549 (ep)->end_context_id, ##__VA_ARGS__)
551 #define KFILND_TN_PEER_VALID(tn) \
552 !IS_ERR_OR_NULL((tn)->tn_kp)
554 #define KFILND_TN_DIR_DEBUG(tn, fmt, dir, ...) \
555 CDEBUG(D_NET, "%s Transaction ID %p: %s:%u %s %s(%p):0x%llx " fmt "\n", \
556 msg_type_to_str(tn->msg_type), \
558 libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
559 (tn)->tn_ep->end_context_id, dir, \
560 libcfs_nid2str((tn)->tn_kp->kp_nid), tn->tn_kp, \
561 KFILND_TN_PEER_VALID(tn) ? \
562 KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
565 #define KFILND_TN_DEBUG(tn, fmt, ...) \
567 if ((tn)->is_initiator) \
568 KFILND_TN_DIR_DEBUG(tn, fmt, "->", ##__VA_ARGS__); \
570 KFILND_TN_DIR_DEBUG(tn, fmt, "<-", ##__VA_ARGS__); \
573 #define KFILND_TN_DIR_ERROR(tn, fmt, dir, ...) \
574 CNETERR("Transaction ID %p: %s:%u %s %s(%p):0x%llx " fmt "\n", \
576 libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
577 (tn)->tn_ep->end_context_id, dir, \
578 libcfs_nid2str((tn)->tn_kp->kp_nid), tn->tn_kp, \
579 KFILND_TN_PEER_VALID(tn) ? \
580 KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
583 #define KFILND_TN_ERROR(tn, fmt, ...) \
585 if ((tn)->is_initiator) \
586 KFILND_TN_DIR_ERROR(tn, fmt, "->", ##__VA_ARGS__); \
588 KFILND_TN_DIR_ERROR(tn, fmt, "<-", ##__VA_ARGS__); \
591 /* TODO: Support NOOPs? */
592 enum kfilnd_msg_type {
593 /* Valid message types start at 1. */
596 /* Valid message types. */
597 KFILND_MSG_IMMEDIATE,
598 KFILND_MSG_BULK_PUT_REQ,
599 KFILND_MSG_BULK_GET_REQ,
600 KFILND_MSG_HELLO_REQ,
601 KFILND_MSG_HELLO_RSP,
603 /* Invalid max value. */
607 static inline const char *msg_type_to_str(enum kfilnd_msg_type type)
609 static const char *str[KFILND_MSG_MAX] = {
610 [KFILND_MSG_INVALID] = "KFILND_MSG_INVALID",
611 [KFILND_MSG_IMMEDIATE] = "KFILND_MSG_IMMEDIATE",
612 [KFILND_MSG_BULK_PUT_REQ] = "KFILND_MSG_BULK_PUT_REQ",
613 [KFILND_MSG_BULK_GET_REQ] = "KFILND_MSG_BULK_GET_REQ",
614 [KFILND_MSG_HELLO_REQ] = "KFILND_MSG_HELLO_REQ",
615 [KFILND_MSG_HELLO_RSP] = "KFILND_MSG_HELLO_RSP",
618 if (type >= KFILND_MSG_MAX)
619 return "KFILND_MSG_INVALID";
624 static inline const char *tn_state_to_str(enum tn_states type)
626 static const char *str[TN_STATE_MAX] = {
627 [TN_STATE_INVALID] = "TN_STATE_INVALID",
628 [TN_STATE_IDLE] = "TN_STATE_IDLE",
629 [TN_STATE_WAIT_TAG_COMP] = "TN_STATE_WAIT_TAG_COMP",
630 [TN_STATE_IMM_SEND] = "TN_STATE_IMM_SEND",
631 [TN_STATE_TAGGED_RECV_POSTED] = "TN_STATE_TAGGED_RECV_POSTED",
632 [TN_STATE_SEND_FAILED] = "TN_STATE_SEND_FAILED",
633 [TN_STATE_WAIT_COMP] = "TN_STATE_WAIT_COMP",
634 [TN_STATE_WAIT_TIMEOUT_COMP] = "TN_STATE_WAIT_TIMEOUT_COMP",
635 [TN_STATE_WAIT_SEND_COMP] = "TN_STATE_WAIT_SEND_COMP",
636 [TN_STATE_WAIT_TIMEOUT_TAG_COMP] = "TN_STATE_WAIT_TIMEOUT_TAG_COMP",
637 [TN_STATE_FAIL] = "TN_STATE_FAIL",
638 [TN_STATE_IMM_RECV] = "TN_STATE_IMM_RECV",
639 [TN_STATE_WAIT_TAG_RMA_COMP] = "TN_STATE_WAIT_TAG_RMA_COMP",
645 /* Transaction Events */
649 /* Initiator events. */
650 TN_EVENT_INIT_IMMEDIATE,
656 TN_EVENT_TAG_RX_FAIL,
657 TN_EVENT_TAG_RX_CANCEL,
664 TN_EVENT_INIT_TAG_RMA,
665 TN_EVENT_SKIP_TAG_RMA,
667 TN_EVENT_TAG_TX_FAIL,
669 /* Invalid max value. */
673 static inline const char *tn_event_to_str(enum tn_events type)
675 static const char *str[TN_EVENT_MAX] = {
676 [TN_EVENT_INVALID] = "TN_EVENT_INVALID",
677 [TN_EVENT_INIT_IMMEDIATE] = "TN_EVENT_INIT_IMMEDIATE",
678 [TN_EVENT_INIT_BULK] = "TN_EVENT_INIT_BULK",
679 [TN_EVENT_TX_HELLO] = "TN_EVENT_TX_HELLO",
680 [TN_EVENT_TX_OK] = "TN_EVENT_TX_OK",
681 [TN_EVENT_TX_FAIL] = "TN_EVENT_TX_FAIL",
682 [TN_EVENT_TAG_RX_OK] = "TN_EVENT_TAG_RX_OK",
683 [TN_EVENT_TAG_RX_FAIL] = "TN_EVENT_TAG_RX_FAIL",
684 [TN_EVENT_TAG_RX_CANCEL] = "TN_EVENT_TAG_RX_CANCEL",
685 [TN_EVENT_TIMEOUT] = "TN_EVENT_TIMEOUT",
686 [TN_EVENT_RX_HELLO] = "TN_EVENT_RX_HELLO",
687 [TN_EVENT_RX_OK] = "TN_EVENT_RX_OK",
688 [TN_EVENT_RX_FAIL] = "TN_EVENT_RX_FAIL",
689 [TN_EVENT_INIT_TAG_RMA] = "TN_EVENT_INIT_TAG_RMA",
690 [TN_EVENT_SKIP_TAG_RMA] = "TN_EVENT_SKIP_TAG_RMA",
691 [TN_EVENT_TAG_TX_FAIL] = "TN_EVENT_TAG_TX_FAIL",
697 struct kfilnd_transaction_msg {
698 struct kfilnd_msg *msg;
702 /* Initiator and target transaction structure. */
703 struct kfilnd_transaction {
704 /* Endpoint list transaction lives on. */
705 struct list_head tn_entry;
706 struct mutex tn_lock; /* to serialize events */
707 int tn_status; /* return code from ops */
708 struct kfilnd_ep *tn_ep; /* endpoint we operate under */
709 enum tn_states tn_state; /* current state of Tn */
710 struct lnet_msg *tn_lntmsg; /* LNet msg to finalize */
711 struct lnet_msg *tn_getreply; /* GET LNet msg to finalize */
713 bool is_initiator; /* Initiated LNet transfer. */
715 /* Transaction send message and target address. */
716 kfi_addr_t tn_target_addr;
717 struct kfilnd_peer *tn_kp;
718 struct kfilnd_transaction_msg tn_tx_msg;
720 /* Transaction multi-receive buffer and associated receive message. */
721 struct kfilnd_immediate_buffer *tn_posted_buf;
722 struct kfilnd_transaction_msg tn_rx_msg;
724 /* LNet buffer used to register a memory region or perform a RMA
727 struct bio_vec tn_kiov[LNET_MAX_IOV];
728 unsigned int tn_num_iovec;
730 /* LNet transaction payload byte count. */
733 /* Bulk transaction buffer is sink or source buffer. */
736 /* Memory region and remote key used to cover initiator's buffer. */
739 /* RX context used to perform response operations to a Put/Get
740 * request. This is required since the request initiator locks in a
741 * transactions to a specific RX context.
743 u16 tn_response_mr_key;
746 /* Immediate data used to convey transaction state from LNet target to
751 /* Bulk operation timeout timer. */
752 struct timer_list timeout_timer;
753 struct work_struct timeout_work;
755 /* Transaction health status. */
756 enum lnet_msg_hstatus hstatus;
758 /* Transaction deadline. */
760 /* Transaction replay deadline. */
761 ktime_t tn_replay_deadline;
767 /* Fields used to replay transaction. */
768 struct list_head replay_entry;
769 enum tn_events replay_event;
772 enum kfilnd_msg_type msg_type;
775 int kfilnd_send_hello_request(struct kfilnd_dev *dev, int cpt,
776 struct kfilnd_peer *kp);
778 #endif /* _KFILND_ */