4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright 2022 Hewlett Packard Enterprise Development LP
26 * This file is part of Lustre, http://www.lustre.org/
29 * kfilnd main interface.
35 #include <linux/version.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/kthread.h>
40 #include <linux/string.h>
41 #include <linux/stat.h>
42 #include <linux/errno.h>
43 #include <linux/unistd.h>
44 #include <linux/uio.h>
45 #include <linux/rwsem.h>
46 #include <linux/mutex.h>
47 #include <linux/rhashtable.h>
48 #include <linux/workqueue.h>
49 #include <linux/debugfs.h>
50 #include <linux/seq_file.h>
51 #include <linux/ktime.h>
53 #include <asm/uaccess.h>
56 #include <linux/init.h>
58 #include <linux/file.h>
59 #include <linux/stat.h>
60 #include <linux/list.h>
61 #include <linux/kmod.h>
62 #include <linux/sysctl.h>
63 #include <linux/pci.h>
68 #define KFILND_VERSION "0.2.0"
70 #define DEBUG_SUBSYSTEM S_LND
72 #include <libcfs/libcfs.h>
73 #include <lnet/lib-lnet.h>
74 #include "kfi_endpoint.h"
75 #include "kfi_errno.h"
77 #include "kfi_tagged.h"
78 #include "kfi_cxi_ext.h"
80 /* KFILND CFS fail range 0xF100 - 0xF1FF. */
82 #define CFS_KFI_FAIL_SEND_EVENT 0xF100
83 #define CFS_KFI_FAIL_READ_EVENT 0xF101
84 #define CFS_KFI_FAIL_WRITE_EVENT 0xF102
85 #define CFS_KFI_FAIL_TAGGED_SEND_EVENT 0xF103
86 #define CFS_KFI_FAIL_TAGGED_RECV_EVENT 0xF104
87 #define CFS_KFI_FAIL_BULK_TIMEOUT 0xF105
88 #define CFS_KFI_FAIL_SEND 0xF106
89 #define CFS_KFI_FAIL_READ 0xF107
90 #define CFS_KFI_FAIL_WRITE 0xF108
91 #define CFS_KFI_FAIL_TAGGED_SEND 0xF109
92 #define CFS_KFI_FAIL_TAGGED_RECV 0xF10A
93 #define CFS_KFI_FAIL_SEND_EAGAIN 0xF10B
94 #define CFS_KFI_FAIL_READ_EAGAIN 0xF10C
95 #define CFS_KFI_FAIL_WRITE_EAGAIN 0xF10D
96 #define CFS_KFI_FAIL_TAGGED_SEND_EAGAIN 0xF10E
97 #define CFS_KFI_FAIL_TAGGED_RECV_EAGAIN 0xF10F
98 #define CFS_KFI_FAIL_TAGGED_RECV_CANCEL_EAGAIN 0xF110
99 #define CFS_KFI_FAIL_RECV_EAGAIN 0xF111
100 #define CFS_KFI_FAIL_RECV 0xF112
101 #define CFS_KFI_FAIL_MSG_UNPACK 0xF113
102 #define CFS_KFI_FAIL_MSG_TYPE 0xF114
104 /* Maximum number of transaction keys supported. */
105 #define KFILND_EP_KEY_BITS 16U
106 #define KFILND_EP_KEY_MAX (BIT(KFILND_EP_KEY_BITS) - 1)
108 /* Some constants which should be turned into tunables */
109 #define KFILND_IMMEDIATE_MSG_SIZE 4096
111 #define KFILND_MY_PROCID 49152
113 /* 256 Rx contexts max */
114 #define KFILND_FAB_RX_CTX_BITS 8
116 /* Get the KFI base address from a KFI RX address. RX context information is
117 * stored in the MSBs of the KFI address.
119 #define KFILND_BASE_ADDR(addr) \
120 ((addr) & ((1UL << (64 - KFILND_FAB_RX_CTX_BITS)) - 1))
122 /* States used by all kfilnd structures */
123 enum kfilnd_object_states {
124 KFILND_STATE_UNINITIALIZED,
125 KFILND_STATE_INITIALIZED,
126 KFILND_STATE_SHUTTING_DOWN
129 enum kfilnd_ni_lnd_tunables_attr {
130 LNET_NET_KFILND_TUNABLES_ATTR_UNSPEC = 0,
132 LNET_NET_KFILND_TUNABLES_ATTR_PROV_MAJOR,
133 LNET_NET_KFILND_TUNABLES_ATTR_PROV_MINOR,
134 LNET_NET_KFILND_TUNABLES_ATTR_AUTH_KEY,
135 __LNET_NET_KFILND_TUNABLES_ATTR_MAX_PLUS_ONE,
138 #define LNET_NET_KFILND_TUNABLES_ATTR_MAX (__LNET_NET_KFILND_TUNABLES_ATTR_MAX_PLUS_ONE - 1)
140 extern struct dentry *kfilnd_debug_dir;
141 extern const struct file_operations kfilnd_initiator_state_stats_file_ops;
142 extern const struct file_operations kfilnd_target_state_stats_file_ops;
143 extern const struct file_operations kfilnd_target_stats_file_ops;
144 extern const struct file_operations kfilnd_initiator_stats_file_ops;
145 extern const struct file_operations kfilnd_reset_stats_file_ops;
147 extern struct workqueue_struct *kfilnd_wq;
149 extern unsigned int cksum;
150 extern unsigned int tx_scale_factor;
151 extern unsigned int rx_cq_scale_factor;
152 extern unsigned int tx_cq_scale_factor;
153 extern unsigned int eq_size;
154 extern unsigned int immediate_rx_buf_count;
156 int kfilnd_tunables_setup(struct lnet_ni *ni);
157 int kfilnd_tunables_init(void);
159 struct kfilnd_transaction;
163 /* Multi-receive buffers for immediate receives */
164 struct kfilnd_immediate_buffer {
166 size_t immed_buf_size;
167 struct page *immed_buf_page;
169 bool immed_no_repost;
170 struct list_head replay_entry;
171 struct kfilnd_ep *immed_end;
174 extern atomic_t kfilnd_rx_count;
178 struct kfilnd_cq_work {
179 struct kfilnd_cq *cq;
180 unsigned int work_cpu;
181 struct work_struct work;
185 struct kfilnd_ep *ep;
187 unsigned int cq_work_count;
188 struct kfilnd_cq_work cq_works[];
192 /* The contexts for this CPT */
193 struct kfid_ep *end_tx;
194 struct kfid_ep *end_rx;
196 /* Corresponding CQs */
197 struct kfilnd_cq *end_tx_cq;
198 struct kfilnd_cq *end_rx_cq;
200 /* Specific config values for this endpoint */
201 struct kfilnd_dev *end_dev;
205 /* List of transactions. */
206 struct list_head tn_list;
207 spinlock_t tn_list_lock;
210 struct list_head tn_replay;
211 struct list_head imm_buffer_replay;
212 spinlock_t replay_lock;
213 struct timer_list replay_timer;
214 struct work_struct replay_work;
215 atomic_t replay_count;
217 /* Key used to build the tag for tagged buffers. */
220 /* Pre-posted immediate buffers */
221 struct kfilnd_immediate_buffer end_immed_bufs[];
224 /* Newly allocated peer */
225 #define KP_STATE_NEW 0x1
226 /* Peer after successful hello handshake */
227 #define KP_STATE_UPTODATE 0x2
228 /* Peer experienced some sort of network failure */
229 #define KP_STATE_STALE 0x3
230 /* We suspect this peer is actually down or otherwise unreachable */
231 #define KP_STATE_DOWN 0x4
234 struct rhash_head kp_node;
235 struct rcu_head kp_rcu_head;
236 struct kfilnd_dev *kp_dev;
240 atomic_t kp_remove_peer;
242 time64_t kp_last_alive;
244 u32 kp_local_session_key;
245 u32 kp_remote_session_key;
246 atomic_t kp_hello_pending;
247 time64_t kp_hello_ts;
251 static inline bool kfilnd_peer_deleted(struct kfilnd_peer *kp)
253 return atomic_read(&kp->kp_remove_peer) > 0;
256 /* Sets kp_hello_sending
257 * Returns true if it was already set
258 * Returns false otherwise
260 static inline bool kfilnd_peer_set_check_hello_pending(struct kfilnd_peer *kp)
262 return (atomic_cmpxchg(&kp->kp_hello_pending, 0, 1) == 1);
265 static inline void kfilnd_peer_clear_hello_pending(struct kfilnd_peer *kp)
267 atomic_set(&kp->kp_hello_pending, 0);
270 static inline bool kfilnd_peer_is_new_peer(struct kfilnd_peer *kp)
272 return atomic_read(&kp->kp_state) == KP_STATE_NEW;
275 static inline bool kfilnd_peer_needs_throttle(struct kfilnd_peer *kp)
277 unsigned int kp_state = atomic_read(&kp->kp_state);
279 return (kp_state == KP_STATE_NEW || kp_state == KP_STATE_DOWN);
282 /* Peer needs hello if it is not up to date and there is not already a hello
285 * Called from the send path and the receive path. When called from send path
286 * we additionally consider the peer's last alive value, and proactively
287 * handshake peers that we haven't talked to in a while.
289 * If hello was sent more than LND timeout seconds ago, and we never received a
290 * response, then send another one.
292 static inline bool kfilnd_peer_needs_hello(struct kfilnd_peer *kp,
293 bool proactive_handshake)
295 if (atomic_read(&kp->kp_hello_pending) == 0) {
296 if (atomic_read(&kp->kp_state) != KP_STATE_UPTODATE)
298 else if (proactive_handshake &&
299 ktime_before(kp->kp_last_alive +
300 lnet_get_lnd_timeout() * 2,
301 ktime_get_seconds()))
303 } else if (ktime_before(kp->kp_hello_ts + lnet_get_lnd_timeout(),
304 ktime_get_seconds())) {
305 /* Sent hello but never received reply */
307 "No response from %s(%p):0x%llx after %lld\n",
308 libcfs_nid2str(kp->kp_nid), kp, kp->kp_addr,
309 ktime_sub(ktime_get_seconds(), kp->kp_hello_ts));
311 kfilnd_peer_clear_hello_pending(kp);
319 struct list_head entry;
320 struct list_head dom_list;
321 struct mutex dom_list_lock;
322 struct kfid_fabric *fabric;
327 struct list_head entry;
328 struct list_head dev_list;
330 struct kfilnd_fab *fab;
331 struct kfid_domain *domain;
335 /* Transaction States */
339 /* Shared initiator and target states. */
341 TN_STATE_WAIT_TAG_COMP,
343 /* Initiator immediate states. */
346 /* Initiator bulk states. */
347 TN_STATE_TAGGED_RECV_POSTED,
348 TN_STATE_SEND_FAILED,
350 TN_STATE_WAIT_TIMEOUT_COMP,
351 TN_STATE_WAIT_SEND_COMP,
352 TN_STATE_WAIT_TIMEOUT_TAG_COMP,
357 TN_STATE_WAIT_TAG_RMA_COMP,
359 /* Invalid max value. */
363 /* Base duration state stats. */
364 struct kfilnd_tn_duration_stat {
365 atomic64_t accumulated_duration;
366 atomic_t accumulated_count;
369 /* Transaction state stats group into 22 buckets. Bucket zero corresponds to
370 * LNet message size of 0 bytes and buckets 1 through 21 correspond to LNet
371 * message sizes of 1 to 1048576 bytes increasing by a power of 2. LNet message
372 * sizes are round up to the nearest power of 2.
374 #define KFILND_DATA_SIZE_BUCKETS 22U
375 #define KFILND_DATA_SIZE_MAX_SIZE (1U << (KFILND_DATA_SIZE_BUCKETS - 2))
376 struct kfilnd_tn_data_size_duration_stats {
377 struct kfilnd_tn_duration_stat data_size[KFILND_DATA_SIZE_BUCKETS];
380 static inline unsigned int kfilnd_msg_len_to_data_size_bucket(size_t size)
386 if (size >= KFILND_DATA_SIZE_MAX_SIZE)
387 return KFILND_DATA_SIZE_BUCKETS - 1;
389 /* Round size up to the nearest power of 2. */
394 return (unsigned int)bit;
397 /* One data size duraction state bucket for each transaction state. */
398 struct kfilnd_tn_state_data_size_duration_stats {
399 struct kfilnd_tn_data_size_duration_stats state[TN_STATE_MAX];
403 struct list_head kfd_list; /* chain on kfid_devs */
404 struct lnet_ni *kfd_ni;
405 enum kfilnd_object_states kfd_state;
407 /* KFI LND domain the device is associated with. */
408 struct kfilnd_dom *dom;
410 /* Fields specific to kfabric operation */
412 struct kfid_ep *kfd_sep;
413 struct kfid_av *kfd_av;
414 struct kfilnd_ep **kfd_endpoints;
416 /* Map of LNet NI CPTs to endpoints. */
417 struct kfilnd_ep **cpt_to_endpoint;
419 /* Hash of LNet NIDs to KFI addresses. */
420 struct rhashtable peer_cache;
422 /* Per LNet NI states. */
423 struct kfilnd_tn_state_data_size_duration_stats initiator_state_stats;
424 struct kfilnd_tn_state_data_size_duration_stats target_state_stats;
425 struct kfilnd_tn_data_size_duration_stats initiator_stats;
426 struct kfilnd_tn_data_size_duration_stats target_stats;
428 /* Per LNet NI debugfs stats. */
429 struct dentry *dev_dir;
430 struct dentry *initiator_state_stats_file;
431 struct dentry *initiator_stats_file;
432 struct dentry *target_state_stats_file;
433 struct dentry *target_stats_file;
434 struct dentry *reset_stats_file;
436 /* Physical NIC address. */
437 unsigned int nic_addr;
438 atomic_t session_keys;
441 /* Invalid checksum value is treated as no checksum. */
442 /* TODO: Module parameter to disable checksum? */
443 #define NO_CHECKSUM 0x0
445 /* Hello message header. */
446 struct kfilnd_hello_msg {
447 /* Support kfilnd version. */
450 /* Base RX context peer should used. */
453 /* Session key used by peer. */
456 /* RX context count peer can target. */
460 /* Immediate message header. */
461 struct kfilnd_immed_msg {
462 /* Entire LNet header needed by the destination to match incoming
465 struct lnet_hdr_nid4 hdr;
467 /* Entire LNet message payload. */
471 /* Bulk request message header. */
472 struct kfilnd_bulk_req_msg {
473 /* Entire LNet header needed by the destination to match incoming
476 struct lnet_hdr_nid4 hdr;
478 /* Specific RX context the target must target to push/pull LNet
483 /* Memory key needed by the target to push/pull LNet payload. */
487 /* Kfilnd message. Includes base transport header plus embedded protocol
491 /* Unique kfilnd magic. */
494 /* Version of the kfilnd protocol. */
497 /* Specific kfilnd protocol type. */
503 /* Number of bytes in message. */
506 /* Checksum of entire message. 0 is checksum disabled. */
509 /* Message LNet source NID. */
512 /* Message LNet target NID. */
515 /* Embedded protocol headers. Must remain at bottom. */
517 struct kfilnd_immed_msg immed;
518 struct kfilnd_bulk_req_msg bulk_req;
519 struct kfilnd_hello_msg hello;
523 #define KFILND_MSG_MAGIC LNET_PROTO_KFI_MAGIC /* unique magic */
525 #define KFILND_MSG_VERSION_1 0x1
526 #define KFILND_MSG_VERSION KFILND_MSG_VERSION_1
528 /* Get the KFI RX context from a KFI RX address. RX context information is
529 * stored in the MSBs of the KFI address.
531 #define KFILND_RX_CONTEXT(addr) ((addr) >> (64 - KFILND_FAB_RX_CTX_BITS))
533 #define KFILND_EP_DEBUG(ep, fmt, ...) \
534 CDEBUG(D_NET, "%s:%d " fmt "\n", \
535 libcfs_nidstr(&(ep)->end_dev->kfd_ni->ni_nid), \
536 (ep)->end_context_id, ##__VA_ARGS__)
538 #define KFILND_EP_ERROR(ep, fmt, ...) \
539 CNETERR("%s:%d " fmt "\n", \
540 libcfs_nidstr(&(ep)->end_dev->kfd_ni->ni_nid), \
541 (ep)->end_context_id, ##__VA_ARGS__)
543 #define KFILND_TN_PEER_VALID(tn) \
544 !IS_ERR_OR_NULL((tn)->tn_kp)
546 #define KFILND_TN_DIR_DEBUG(tn, fmt, dir, ...) \
547 CDEBUG(D_NET, "%s Transaction ID %p: %s:%u %s %s(%p):0x%llx " fmt "\n", \
548 msg_type_to_str(tn->msg_type), \
550 libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
551 (tn)->tn_ep->end_context_id, dir, \
552 libcfs_nid2str((tn)->tn_kp->kp_nid), tn->tn_kp, \
553 KFILND_TN_PEER_VALID(tn) ? \
554 KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
557 #define KFILND_TN_DEBUG(tn, fmt, ...) \
559 if ((tn)->is_initiator) \
560 KFILND_TN_DIR_DEBUG(tn, fmt, "->", ##__VA_ARGS__); \
562 KFILND_TN_DIR_DEBUG(tn, fmt, "<-", ##__VA_ARGS__); \
565 #define KFILND_TN_DIR_ERROR(tn, fmt, dir, ...) \
566 CNETERR("Transaction ID %p: %s:%u %s %s(%p):0x%llx " fmt "\n", \
568 libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
569 (tn)->tn_ep->end_context_id, dir, \
570 libcfs_nid2str((tn)->tn_kp->kp_nid), tn->tn_kp, \
571 KFILND_TN_PEER_VALID(tn) ? \
572 KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
575 #define KFILND_TN_ERROR(tn, fmt, ...) \
577 if ((tn)->is_initiator) \
578 KFILND_TN_DIR_ERROR(tn, fmt, "->", ##__VA_ARGS__); \
580 KFILND_TN_DIR_ERROR(tn, fmt, "<-", ##__VA_ARGS__); \
583 /* TODO: Support NOOPs? */
584 enum kfilnd_msg_type {
585 /* Valid message types start at 1. */
588 /* Valid message types. */
589 KFILND_MSG_IMMEDIATE,
590 KFILND_MSG_BULK_PUT_REQ,
591 KFILND_MSG_BULK_GET_REQ,
592 KFILND_MSG_HELLO_REQ,
593 KFILND_MSG_HELLO_RSP,
595 /* Invalid max value. */
599 static inline const char *msg_type_to_str(enum kfilnd_msg_type type)
601 static const char *str[KFILND_MSG_MAX] = {
602 [KFILND_MSG_INVALID] = "KFILND_MSG_INVALID",
603 [KFILND_MSG_IMMEDIATE] = "KFILND_MSG_IMMEDIATE",
604 [KFILND_MSG_BULK_PUT_REQ] = "KFILND_MSG_BULK_PUT_REQ",
605 [KFILND_MSG_BULK_GET_REQ] = "KFILND_MSG_BULK_GET_REQ",
606 [KFILND_MSG_HELLO_REQ] = "KFILND_MSG_HELLO_REQ",
607 [KFILND_MSG_HELLO_RSP] = "KFILND_MSG_HELLO_RSP",
610 if (type >= KFILND_MSG_MAX)
611 return "KFILND_MSG_INVALID";
616 static inline const char *tn_state_to_str(enum tn_states type)
618 static const char *str[TN_STATE_MAX] = {
619 [TN_STATE_INVALID] = "TN_STATE_INVALID",
620 [TN_STATE_IDLE] = "TN_STATE_IDLE",
621 [TN_STATE_WAIT_TAG_COMP] = "TN_STATE_WAIT_TAG_COMP",
622 [TN_STATE_IMM_SEND] = "TN_STATE_IMM_SEND",
623 [TN_STATE_TAGGED_RECV_POSTED] = "TN_STATE_TAGGED_RECV_POSTED",
624 [TN_STATE_SEND_FAILED] = "TN_STATE_SEND_FAILED",
625 [TN_STATE_WAIT_COMP] = "TN_STATE_WAIT_COMP",
626 [TN_STATE_WAIT_TIMEOUT_COMP] = "TN_STATE_WAIT_TIMEOUT_COMP",
627 [TN_STATE_WAIT_SEND_COMP] = "TN_STATE_WAIT_SEND_COMP",
628 [TN_STATE_WAIT_TIMEOUT_TAG_COMP] = "TN_STATE_WAIT_TIMEOUT_TAG_COMP",
629 [TN_STATE_FAIL] = "TN_STATE_FAIL",
630 [TN_STATE_IMM_RECV] = "TN_STATE_IMM_RECV",
631 [TN_STATE_WAIT_TAG_RMA_COMP] = "TN_STATE_WAIT_TAG_RMA_COMP",
637 /* Transaction Events */
641 /* Initiator events. */
642 TN_EVENT_INIT_IMMEDIATE,
648 TN_EVENT_TAG_RX_FAIL,
649 TN_EVENT_TAG_RX_CANCEL,
656 TN_EVENT_INIT_TAG_RMA,
657 TN_EVENT_SKIP_TAG_RMA,
659 TN_EVENT_TAG_TX_FAIL,
661 /* Invalid max value. */
665 static inline const char *tn_event_to_str(enum tn_events type)
667 static const char *str[TN_EVENT_MAX] = {
668 [TN_EVENT_INVALID] = "TN_EVENT_INVALID",
669 [TN_EVENT_INIT_IMMEDIATE] = "TN_EVENT_INIT_IMMEDIATE",
670 [TN_EVENT_INIT_BULK] = "TN_EVENT_INIT_BULK",
671 [TN_EVENT_TX_HELLO] = "TN_EVENT_TX_HELLO",
672 [TN_EVENT_TX_OK] = "TN_EVENT_TX_OK",
673 [TN_EVENT_TX_FAIL] = "TN_EVENT_TX_FAIL",
674 [TN_EVENT_TAG_RX_OK] = "TN_EVENT_TAG_RX_OK",
675 [TN_EVENT_TAG_RX_FAIL] = "TN_EVENT_TAG_RX_FAIL",
676 [TN_EVENT_TAG_RX_CANCEL] = "TN_EVENT_TAG_RX_CANCEL",
677 [TN_EVENT_TIMEOUT] = "TN_EVENT_TIMEOUT",
678 [TN_EVENT_RX_HELLO] = "TN_EVENT_RX_HELLO",
679 [TN_EVENT_RX_OK] = "TN_EVENT_RX_OK",
680 [TN_EVENT_RX_FAIL] = "TN_EVENT_RX_FAIL",
681 [TN_EVENT_INIT_TAG_RMA] = "TN_EVENT_INIT_TAG_RMA",
682 [TN_EVENT_SKIP_TAG_RMA] = "TN_EVENT_SKIP_TAG_RMA",
683 [TN_EVENT_TAG_TX_FAIL] = "TN_EVENT_TAG_TX_FAIL",
689 struct kfilnd_transaction_msg {
690 struct kfilnd_msg *msg;
694 /* Initiator and target transaction structure. */
695 struct kfilnd_transaction {
696 /* Endpoint list transaction lives on. */
697 struct list_head tn_entry;
698 struct mutex tn_lock; /* to serialize events */
699 int tn_status; /* return code from ops */
700 struct kfilnd_ep *tn_ep; /* endpoint we operate under */
701 enum tn_states tn_state; /* current state of Tn */
702 struct lnet_msg *tn_lntmsg; /* LNet msg to finalize */
703 struct lnet_msg *tn_getreply; /* GET LNet msg to finalize */
705 bool is_initiator; /* Initiated LNet transfer. */
707 /* Transaction send message and target address. */
708 kfi_addr_t tn_target_addr;
709 struct kfilnd_peer *tn_kp;
710 struct kfilnd_transaction_msg tn_tx_msg;
712 /* Transaction multi-receive buffer and associated receive message. */
713 struct kfilnd_immediate_buffer *tn_posted_buf;
714 struct kfilnd_transaction_msg tn_rx_msg;
716 /* LNet buffer used to register a memory region or perform a RMA
719 struct bio_vec tn_kiov[LNET_MAX_IOV];
720 unsigned int tn_num_iovec;
722 /* LNet transaction payload byte count. */
725 /* Bulk transaction buffer is sink or source buffer. */
728 /* Memory region and remote key used to cover initiator's buffer. */
731 /* RX context used to perform response operations to a Put/Get
732 * request. This is required since the request initiator locks in a
733 * transactions to a specific RX context.
735 u16 tn_response_mr_key;
738 /* Immediate data used to convey transaction state from LNet target to
743 /* Bulk operation timeout timer. */
744 struct timer_list timeout_timer;
745 struct work_struct timeout_work;
747 /* Transaction health status. */
748 enum lnet_msg_hstatus hstatus;
750 /* Transaction deadline. */
752 /* Transaction replay deadline. */
753 ktime_t tn_replay_deadline;
759 /* Fields used to replay transaction. */
760 struct list_head replay_entry;
761 enum tn_events replay_event;
764 enum kfilnd_msg_type msg_type;
767 int kfilnd_send_hello_request(struct kfilnd_dev *dev, int cpt,
768 struct kfilnd_peer *kp);
770 #endif /* _KFILND_ */