4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright 2022 Hewlett Packard Enterprise Development LP
26 * This file is part of Lustre, http://www.lustre.org/
29 * kfilnd main interface.
35 #include <linux/version.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/kthread.h>
40 #include <linux/string.h>
41 #include <linux/stat.h>
42 #include <linux/errno.h>
43 #include <linux/unistd.h>
44 #include <linux/uio.h>
45 #include <linux/rwsem.h>
46 #include <linux/mutex.h>
47 #include <linux/rhashtable.h>
48 #include <linux/workqueue.h>
49 #include <linux/debugfs.h>
50 #include <linux/seq_file.h>
51 #include <linux/ktime.h>
53 #include <asm/uaccess.h>
56 #include <linux/init.h>
58 #include <linux/file.h>
59 #include <linux/stat.h>
60 #include <linux/list.h>
61 #include <linux/kmod.h>
62 #include <linux/sysctl.h>
63 #include <linux/pci.h>
68 #define KFILND_VERSION "0.2.0"
70 #define DEBUG_SUBSYSTEM S_LND
72 #include <libcfs/libcfs.h>
73 #include <libcfs/linux/linux-net.h>
74 #include <lnet/lib-lnet.h>
75 #include "kfi_endpoint.h"
76 #include "kfi_errno.h"
78 #include "kfi_tagged.h"
79 #include "kfi_cxi_ext.h"
81 /* KFILND CFS fail range 0xF100 - 0xF1FF. */
83 #define CFS_KFI_FAIL_SEND_EVENT 0xF100
84 #define CFS_KFI_FAIL_READ_EVENT 0xF101
85 #define CFS_KFI_FAIL_WRITE_EVENT 0xF102
86 #define CFS_KFI_FAIL_TAGGED_SEND_EVENT 0xF103
87 #define CFS_KFI_FAIL_TAGGED_RECV_EVENT 0xF104
88 #define CFS_KFI_FAIL_BULK_TIMEOUT 0xF105
89 #define CFS_KFI_FAIL_SEND 0xF106
90 #define CFS_KFI_FAIL_READ 0xF107
91 #define CFS_KFI_FAIL_WRITE 0xF108
92 #define CFS_KFI_FAIL_TAGGED_SEND 0xF109
93 #define CFS_KFI_FAIL_TAGGED_RECV 0xF10A
94 #define CFS_KFI_FAIL_SEND_EAGAIN 0xF10B
95 #define CFS_KFI_FAIL_READ_EAGAIN 0xF10C
96 #define CFS_KFI_FAIL_WRITE_EAGAIN 0xF10D
97 #define CFS_KFI_FAIL_TAGGED_SEND_EAGAIN 0xF10E
98 #define CFS_KFI_FAIL_TAGGED_RECV_EAGAIN 0xF10F
99 #define CFS_KFI_FAIL_TAGGED_RECV_CANCEL_EAGAIN 0xF110
100 #define CFS_KFI_FAIL_RECV_EAGAIN 0xF111
101 #define CFS_KFI_FAIL_RECV 0xF112
102 #define CFS_KFI_FAIL_MSG_UNPACK 0xF113
103 #define CFS_KFI_FAIL_MSG_TYPE 0xF114
105 /* Maximum number of transaction keys supported. */
106 #define KFILND_EP_KEY_BITS 16U
107 #define KFILND_EP_KEY_MAX (BIT(KFILND_EP_KEY_BITS) - 1)
109 /* Some constants which should be turned into tunables */
110 #define KFILND_IMMEDIATE_MSG_SIZE 4096
112 #define KFILND_MY_PROCID 49152
114 /* 256 Rx contexts max */
115 #define KFILND_FAB_RX_CTX_BITS 8
117 /* Get the KFI base address from a KFI RX address. RX context information is
118 * stored in the MSBs of the KFI address.
120 #define KFILND_BASE_ADDR(addr) \
121 ((addr) & ((1UL << (64 - KFILND_FAB_RX_CTX_BITS)) - 1))
123 /* States used by all kfilnd structures */
124 enum kfilnd_object_states {
125 KFILND_STATE_UNINITIALIZED,
126 KFILND_STATE_INITIALIZED,
127 KFILND_STATE_SHUTTING_DOWN
130 enum kfilnd_ni_lnd_tunables_attr {
131 LNET_NET_KFILND_TUNABLES_ATTR_UNSPEC = 0,
133 LNET_NET_KFILND_TUNABLES_ATTR_PROV_MAJOR,
134 LNET_NET_KFILND_TUNABLES_ATTR_PROV_MINOR,
135 LNET_NET_KFILND_TUNABLES_ATTR_AUTH_KEY,
136 LNET_NET_KFILND_TUNABLES_ATTR_TRAFFIC_CLASS,
137 __LNET_NET_KFILND_TUNABLES_ATTR_MAX_PLUS_ONE,
140 #define LNET_NET_KFILND_TUNABLES_ATTR_MAX (__LNET_NET_KFILND_TUNABLES_ATTR_MAX_PLUS_ONE - 1)
142 extern struct dentry *kfilnd_debug_dir;
143 extern const struct file_operations kfilnd_initiator_state_stats_file_ops;
144 extern const struct file_operations kfilnd_target_state_stats_file_ops;
145 extern const struct file_operations kfilnd_target_stats_file_ops;
146 extern const struct file_operations kfilnd_initiator_stats_file_ops;
147 extern const struct file_operations kfilnd_reset_stats_file_ops;
149 extern struct workqueue_struct *kfilnd_wq;
151 extern unsigned int cksum;
152 extern unsigned int tx_scale_factor;
153 extern unsigned int rx_cq_scale_factor;
154 extern unsigned int tx_cq_scale_factor;
155 extern unsigned int eq_size;
156 extern unsigned int immediate_rx_buf_count;
158 int kfilnd_tunables_setup(struct lnet_ni *ni);
159 int kfilnd_tunables_init(void);
161 struct kfilnd_transaction;
165 /* Multi-receive buffers for immediate receives */
166 struct kfilnd_immediate_buffer {
168 size_t immed_buf_size;
169 struct page *immed_buf_page;
171 bool immed_no_repost;
172 struct list_head replay_entry;
173 struct kfilnd_ep *immed_end;
176 extern atomic_t kfilnd_rx_count;
180 struct kfilnd_cq_work {
181 struct kfilnd_cq *cq;
182 unsigned int work_cpu;
183 struct work_struct work;
187 struct kfilnd_ep *ep;
189 unsigned int cq_work_count;
190 struct kfilnd_cq_work cq_works[];
194 /* The contexts for this CPT */
195 struct kfid_ep *end_tx;
196 struct kfid_ep *end_rx;
198 /* Corresponding CQs */
199 struct kfilnd_cq *end_tx_cq;
200 struct kfilnd_cq *end_rx_cq;
202 /* Specific config values for this endpoint */
203 struct kfilnd_dev *end_dev;
207 /* List of transactions. */
208 struct list_head tn_list;
209 spinlock_t tn_list_lock;
212 struct list_head tn_replay;
213 struct list_head imm_buffer_replay;
214 spinlock_t replay_lock;
215 struct timer_list replay_timer;
216 struct work_struct replay_work;
217 atomic_t replay_count;
219 /* Key used to build the tag for tagged buffers. */
222 /* Pre-posted immediate buffers */
223 struct kfilnd_immediate_buffer end_immed_bufs[];
226 /* Newly allocated peer */
227 #define KP_STATE_NEW 0x1
228 /* Peer after successful hello handshake */
229 #define KP_STATE_UPTODATE 0x2
230 /* Peer experienced some sort of network failure */
231 #define KP_STATE_STALE 0x3
232 /* We suspect this peer is actually down or otherwise unreachable */
233 #define KP_STATE_DOWN 0x4
236 struct rhash_head kp_node;
237 struct rcu_head kp_rcu_head;
238 struct kfilnd_dev *kp_dev;
242 atomic_t kp_remove_peer;
244 time64_t kp_last_alive;
246 u32 kp_local_session_key;
247 u32 kp_remote_session_key;
248 atomic_t kp_hello_pending;
249 time64_t kp_hello_ts;
253 static inline bool kfilnd_peer_deleted(struct kfilnd_peer *kp)
255 return atomic_read(&kp->kp_remove_peer) > 0;
258 /* Sets kp_hello_sending
259 * Returns true if it was already set
260 * Returns false otherwise
262 static inline bool kfilnd_peer_set_check_hello_pending(struct kfilnd_peer *kp)
264 return (atomic_cmpxchg(&kp->kp_hello_pending, 0, 1) == 1);
267 static inline void kfilnd_peer_clear_hello_pending(struct kfilnd_peer *kp)
269 atomic_set(&kp->kp_hello_pending, 0);
272 static inline bool kfilnd_peer_is_new_peer(struct kfilnd_peer *kp)
274 return atomic_read(&kp->kp_state) == KP_STATE_NEW;
277 static inline bool kfilnd_peer_needs_throttle(struct kfilnd_peer *kp)
279 unsigned int kp_state = atomic_read(&kp->kp_state);
281 return (kp_state == KP_STATE_NEW || kp_state == KP_STATE_DOWN);
284 /* Peer needs hello if it is not up to date and there is not already a hello
287 * Called from the send path and the receive path. When called from send path
288 * we additionally consider the peer's last alive value, and proactively
289 * handshake peers that we haven't talked to in a while.
291 * If hello was sent more than LND timeout seconds ago, and we never received a
292 * response, then send another one.
294 static inline bool kfilnd_peer_needs_hello(struct kfilnd_peer *kp,
295 bool proactive_handshake)
297 if (atomic_read(&kp->kp_hello_pending) == 0) {
298 if (atomic_read(&kp->kp_state) != KP_STATE_UPTODATE)
300 else if (proactive_handshake &&
301 ktime_before(kp->kp_last_alive +
302 lnet_get_lnd_timeout() * 2,
303 ktime_get_seconds()))
305 } else if (ktime_before(kp->kp_hello_ts + lnet_get_lnd_timeout(),
306 ktime_get_seconds())) {
307 /* Sent hello but never received reply */
309 "No response from %s(%p):0x%llx after %lld\n",
310 libcfs_nid2str(kp->kp_nid), kp, kp->kp_addr,
311 ktime_sub(ktime_get_seconds(), kp->kp_hello_ts));
313 kfilnd_peer_clear_hello_pending(kp);
321 struct list_head entry;
322 struct list_head dom_list;
323 struct mutex dom_list_lock;
324 struct kfid_fabric *fabric;
329 struct list_head entry;
330 struct list_head dev_list;
332 struct kfilnd_fab *fab;
333 struct kfid_domain *domain;
337 /* Transaction States */
341 /* Shared initiator and target states. */
343 TN_STATE_WAIT_TAG_COMP,
345 /* Initiator immediate states. */
348 /* Initiator bulk states. */
349 TN_STATE_TAGGED_RECV_POSTED,
350 TN_STATE_SEND_FAILED,
352 TN_STATE_WAIT_TIMEOUT_COMP,
353 TN_STATE_WAIT_SEND_COMP,
354 TN_STATE_WAIT_TIMEOUT_TAG_COMP,
359 TN_STATE_WAIT_TAG_RMA_COMP,
361 /* Invalid max value. */
365 /* Base duration state stats. */
366 struct kfilnd_tn_duration_stat {
367 atomic64_t accumulated_duration;
368 atomic_t accumulated_count;
371 /* Transaction state stats group into 22 buckets. Bucket zero corresponds to
372 * LNet message size of 0 bytes and buckets 1 through 21 correspond to LNet
373 * message sizes of 1 to 1048576 bytes increasing by a power of 2. LNet message
374 * sizes are round up to the nearest power of 2.
376 #define KFILND_DATA_SIZE_BUCKETS 22U
377 #define KFILND_DATA_SIZE_MAX_SIZE (1U << (KFILND_DATA_SIZE_BUCKETS - 2))
378 struct kfilnd_tn_data_size_duration_stats {
379 struct kfilnd_tn_duration_stat data_size[KFILND_DATA_SIZE_BUCKETS];
382 static inline unsigned int kfilnd_msg_len_to_data_size_bucket(size_t size)
388 if (size >= KFILND_DATA_SIZE_MAX_SIZE)
389 return KFILND_DATA_SIZE_BUCKETS - 1;
391 /* Round size up to the nearest power of 2. */
396 return (unsigned int)bit;
399 /* One data size duraction state bucket for each transaction state. */
400 struct kfilnd_tn_state_data_size_duration_stats {
401 struct kfilnd_tn_data_size_duration_stats state[TN_STATE_MAX];
405 struct list_head kfd_list; /* chain on kfid_devs */
406 struct lnet_ni *kfd_ni;
407 enum kfilnd_object_states kfd_state;
409 /* KFI LND domain the device is associated with. */
410 struct kfilnd_dom *dom;
412 /* Fields specific to kfabric operation */
414 struct kfid_ep *kfd_sep;
415 struct kfid_av *kfd_av;
416 struct kfilnd_ep **kfd_endpoints;
418 /* Map of LNet NI CPTs to endpoints. */
419 struct kfilnd_ep **cpt_to_endpoint;
421 /* Hash of LNet NIDs to KFI addresses. */
422 struct rhashtable peer_cache;
424 /* Per LNet NI states. */
425 struct kfilnd_tn_state_data_size_duration_stats initiator_state_stats;
426 struct kfilnd_tn_state_data_size_duration_stats target_state_stats;
427 struct kfilnd_tn_data_size_duration_stats initiator_stats;
428 struct kfilnd_tn_data_size_duration_stats target_stats;
430 /* Per LNet NI debugfs stats. */
431 struct dentry *dev_dir;
432 struct dentry *initiator_state_stats_file;
433 struct dentry *initiator_stats_file;
434 struct dentry *target_state_stats_file;
435 struct dentry *target_stats_file;
436 struct dentry *reset_stats_file;
438 /* Physical NIC address. */
439 unsigned int nic_addr;
440 atomic_t session_keys;
443 /* Invalid checksum value is treated as no checksum. */
444 /* TODO: Module parameter to disable checksum? */
445 #define NO_CHECKSUM 0x0
447 /* Hello message header. */
448 struct kfilnd_hello_msg {
449 /* Support kfilnd version. */
452 /* Base RX context peer should used. */
455 /* Session key used by peer. */
458 /* RX context count peer can target. */
462 /* Immediate message header. */
463 struct kfilnd_immed_msg {
464 /* Entire LNet header needed by the destination to match incoming
467 struct lnet_hdr_nid4 hdr;
469 /* Entire LNet message payload. */
473 /* Bulk request message header. */
474 struct kfilnd_bulk_req_msg {
475 /* Entire LNet header needed by the destination to match incoming
478 struct lnet_hdr_nid4 hdr;
480 /* Specific RX context the target must target to push/pull LNet
485 /* Memory key needed by the target to push/pull LNet payload. */
489 /* Kfilnd message. Includes base transport header plus embedded protocol
493 /* Unique kfilnd magic. */
496 /* Version of the kfilnd protocol. */
499 /* Specific kfilnd protocol type. */
505 /* Number of bytes in message. */
508 /* Checksum of entire message. 0 is checksum disabled. */
511 /* Message LNet source NID. */
514 /* Message LNet target NID. */
517 /* Embedded protocol headers. Must remain at bottom. */
519 struct kfilnd_immed_msg immed;
520 struct kfilnd_bulk_req_msg bulk_req;
521 struct kfilnd_hello_msg hello;
525 #define KFILND_MSG_MAGIC LNET_PROTO_KFI_MAGIC /* unique magic */
527 #define KFILND_MSG_VERSION_1 0x1
528 #define KFILND_MSG_VERSION KFILND_MSG_VERSION_1
530 /* Get the KFI RX context from a KFI RX address. RX context information is
531 * stored in the MSBs of the KFI address.
533 #define KFILND_RX_CONTEXT(addr) ((addr) >> (64 - KFILND_FAB_RX_CTX_BITS))
535 #define KFILND_EP_DEBUG(ep, fmt, ...) \
536 CDEBUG(D_NET, "%s:%d " fmt "\n", \
537 libcfs_nidstr(&(ep)->end_dev->kfd_ni->ni_nid), \
538 (ep)->end_context_id, ##__VA_ARGS__)
540 #define KFILND_EP_ERROR(ep, fmt, ...) \
541 CNETERR("%s:%d " fmt "\n", \
542 libcfs_nidstr(&(ep)->end_dev->kfd_ni->ni_nid), \
543 (ep)->end_context_id, ##__VA_ARGS__)
545 #define KFILND_TN_PEER_VALID(tn) \
546 !IS_ERR_OR_NULL((tn)->tn_kp)
548 #define KFILND_TN_DIR_DEBUG(tn, fmt, dir, ...) \
549 CDEBUG(D_NET, "%s Transaction ID %p: %s:%u %s %s(%p):0x%llx " fmt "\n", \
550 msg_type_to_str(tn->msg_type), \
552 libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
553 (tn)->tn_ep->end_context_id, dir, \
554 libcfs_nid2str((tn)->tn_kp->kp_nid), tn->tn_kp, \
555 KFILND_TN_PEER_VALID(tn) ? \
556 KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
559 #define KFILND_TN_DEBUG(tn, fmt, ...) \
561 if ((tn)->is_initiator) \
562 KFILND_TN_DIR_DEBUG(tn, fmt, "->", ##__VA_ARGS__); \
564 KFILND_TN_DIR_DEBUG(tn, fmt, "<-", ##__VA_ARGS__); \
567 #define KFILND_TN_DIR_ERROR(tn, fmt, dir, ...) \
568 CNETERR("Transaction ID %p: %s:%u %s %s(%p):0x%llx " fmt "\n", \
570 libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
571 (tn)->tn_ep->end_context_id, dir, \
572 libcfs_nid2str((tn)->tn_kp->kp_nid), tn->tn_kp, \
573 KFILND_TN_PEER_VALID(tn) ? \
574 KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
577 #define KFILND_TN_ERROR(tn, fmt, ...) \
579 if ((tn)->is_initiator) \
580 KFILND_TN_DIR_ERROR(tn, fmt, "->", ##__VA_ARGS__); \
582 KFILND_TN_DIR_ERROR(tn, fmt, "<-", ##__VA_ARGS__); \
585 /* TODO: Support NOOPs? */
586 enum kfilnd_msg_type {
587 /* Valid message types start at 1. */
590 /* Valid message types. */
591 KFILND_MSG_IMMEDIATE,
592 KFILND_MSG_BULK_PUT_REQ,
593 KFILND_MSG_BULK_GET_REQ,
594 KFILND_MSG_HELLO_REQ,
595 KFILND_MSG_HELLO_RSP,
597 /* Invalid max value. */
601 static inline const char *msg_type_to_str(enum kfilnd_msg_type type)
603 static const char *str[KFILND_MSG_MAX] = {
604 [KFILND_MSG_INVALID] = "KFILND_MSG_INVALID",
605 [KFILND_MSG_IMMEDIATE] = "KFILND_MSG_IMMEDIATE",
606 [KFILND_MSG_BULK_PUT_REQ] = "KFILND_MSG_BULK_PUT_REQ",
607 [KFILND_MSG_BULK_GET_REQ] = "KFILND_MSG_BULK_GET_REQ",
608 [KFILND_MSG_HELLO_REQ] = "KFILND_MSG_HELLO_REQ",
609 [KFILND_MSG_HELLO_RSP] = "KFILND_MSG_HELLO_RSP",
612 if (type >= KFILND_MSG_MAX)
613 return "KFILND_MSG_INVALID";
618 static inline const char *tn_state_to_str(enum tn_states type)
620 static const char *str[TN_STATE_MAX] = {
621 [TN_STATE_INVALID] = "TN_STATE_INVALID",
622 [TN_STATE_IDLE] = "TN_STATE_IDLE",
623 [TN_STATE_WAIT_TAG_COMP] = "TN_STATE_WAIT_TAG_COMP",
624 [TN_STATE_IMM_SEND] = "TN_STATE_IMM_SEND",
625 [TN_STATE_TAGGED_RECV_POSTED] = "TN_STATE_TAGGED_RECV_POSTED",
626 [TN_STATE_SEND_FAILED] = "TN_STATE_SEND_FAILED",
627 [TN_STATE_WAIT_COMP] = "TN_STATE_WAIT_COMP",
628 [TN_STATE_WAIT_TIMEOUT_COMP] = "TN_STATE_WAIT_TIMEOUT_COMP",
629 [TN_STATE_WAIT_SEND_COMP] = "TN_STATE_WAIT_SEND_COMP",
630 [TN_STATE_WAIT_TIMEOUT_TAG_COMP] = "TN_STATE_WAIT_TIMEOUT_TAG_COMP",
631 [TN_STATE_FAIL] = "TN_STATE_FAIL",
632 [TN_STATE_IMM_RECV] = "TN_STATE_IMM_RECV",
633 [TN_STATE_WAIT_TAG_RMA_COMP] = "TN_STATE_WAIT_TAG_RMA_COMP",
639 /* Transaction Events */
643 /* Initiator events. */
644 TN_EVENT_INIT_IMMEDIATE,
650 TN_EVENT_TAG_RX_FAIL,
651 TN_EVENT_TAG_RX_CANCEL,
658 TN_EVENT_INIT_TAG_RMA,
659 TN_EVENT_SKIP_TAG_RMA,
661 TN_EVENT_TAG_TX_FAIL,
663 /* Invalid max value. */
667 static inline const char *tn_event_to_str(enum tn_events type)
669 static const char *str[TN_EVENT_MAX] = {
670 [TN_EVENT_INVALID] = "TN_EVENT_INVALID",
671 [TN_EVENT_INIT_IMMEDIATE] = "TN_EVENT_INIT_IMMEDIATE",
672 [TN_EVENT_INIT_BULK] = "TN_EVENT_INIT_BULK",
673 [TN_EVENT_TX_HELLO] = "TN_EVENT_TX_HELLO",
674 [TN_EVENT_TX_OK] = "TN_EVENT_TX_OK",
675 [TN_EVENT_TX_FAIL] = "TN_EVENT_TX_FAIL",
676 [TN_EVENT_TAG_RX_OK] = "TN_EVENT_TAG_RX_OK",
677 [TN_EVENT_TAG_RX_FAIL] = "TN_EVENT_TAG_RX_FAIL",
678 [TN_EVENT_TAG_RX_CANCEL] = "TN_EVENT_TAG_RX_CANCEL",
679 [TN_EVENT_TIMEOUT] = "TN_EVENT_TIMEOUT",
680 [TN_EVENT_RX_HELLO] = "TN_EVENT_RX_HELLO",
681 [TN_EVENT_RX_OK] = "TN_EVENT_RX_OK",
682 [TN_EVENT_RX_FAIL] = "TN_EVENT_RX_FAIL",
683 [TN_EVENT_INIT_TAG_RMA] = "TN_EVENT_INIT_TAG_RMA",
684 [TN_EVENT_SKIP_TAG_RMA] = "TN_EVENT_SKIP_TAG_RMA",
685 [TN_EVENT_TAG_TX_FAIL] = "TN_EVENT_TAG_TX_FAIL",
691 struct kfilnd_transaction_msg {
692 struct kfilnd_msg *msg;
696 /* Initiator and target transaction structure. */
697 struct kfilnd_transaction {
698 /* Endpoint list transaction lives on. */
699 struct list_head tn_entry;
700 struct mutex tn_lock; /* to serialize events */
701 int tn_status; /* return code from ops */
702 struct kfilnd_ep *tn_ep; /* endpoint we operate under */
703 enum tn_states tn_state; /* current state of Tn */
704 struct lnet_msg *tn_lntmsg; /* LNet msg to finalize */
705 struct lnet_msg *tn_getreply; /* GET LNet msg to finalize */
707 bool is_initiator; /* Initiated LNet transfer. */
709 /* Transaction send message and target address. */
710 kfi_addr_t tn_target_addr;
711 struct kfilnd_peer *tn_kp;
712 struct kfilnd_transaction_msg tn_tx_msg;
714 /* Transaction multi-receive buffer and associated receive message. */
715 struct kfilnd_immediate_buffer *tn_posted_buf;
716 struct kfilnd_transaction_msg tn_rx_msg;
718 /* LNet buffer used to register a memory region or perform a RMA
721 struct bio_vec tn_kiov[LNET_MAX_IOV];
722 unsigned int tn_num_iovec;
724 /* LNet transaction payload byte count. */
727 /* Bulk transaction buffer is sink or source buffer. */
730 /* Memory region and remote key used to cover initiator's buffer. */
733 /* RX context used to perform response operations to a Put/Get
734 * request. This is required since the request initiator locks in a
735 * transactions to a specific RX context.
737 u16 tn_response_mr_key;
740 /* Immediate data used to convey transaction state from LNet target to
745 /* Bulk operation timeout timer. */
746 struct timer_list timeout_timer;
747 struct work_struct timeout_work;
749 /* Transaction health status. */
750 enum lnet_msg_hstatus hstatus;
752 /* Transaction deadline. */
754 /* Transaction replay deadline. */
755 ktime_t tn_replay_deadline;
761 /* Fields used to replay transaction. */
762 struct list_head replay_entry;
763 enum tn_events replay_event;
766 enum kfilnd_msg_type msg_type;
769 int kfilnd_send_hello_request(struct kfilnd_dev *dev, int cpt,
770 struct kfilnd_peer *kp);
772 #endif /* _KFILND_ */