4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright 2022 Hewlett Packard Enterprise Development LP
26 * This file is part of Lustre, http://www.lustre.org/
29 * kfilnd main interface.
35 #include <linux/version.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/kthread.h>
40 #include <linux/string.h>
41 #include <linux/stat.h>
42 #include <linux/errno.h>
43 #include <linux/unistd.h>
44 #include <linux/uio.h>
45 #include <linux/rwsem.h>
46 #include <linux/mutex.h>
47 #include <linux/rhashtable.h>
48 #include <linux/workqueue.h>
49 #include <linux/debugfs.h>
50 #include <linux/seq_file.h>
51 #include <linux/ktime.h>
53 #include <asm/uaccess.h>
56 #include <linux/init.h>
58 #include <linux/file.h>
59 #include <linux/stat.h>
60 #include <linux/list.h>
61 #include <linux/kmod.h>
62 #include <linux/sysctl.h>
63 #include <linux/pci.h>
68 #define KFILND_VERSION "0.2.0"
70 #define DEBUG_SUBSYSTEM S_LND
72 #include <libcfs/libcfs.h>
73 #include <lnet/lib-lnet.h>
74 #include "kfi_endpoint.h"
75 #include "kfi_errno.h"
77 #include "kfi_tagged.h"
78 #include "kfi_cxi_ext.h"
80 /* KFILND CFS fail range 0xF100 - 0xF1FF. */
82 #define CFS_KFI_FAIL_SEND_EVENT 0xF100
83 #define CFS_KFI_FAIL_READ_EVENT 0xF101
84 #define CFS_KFI_FAIL_WRITE_EVENT 0xF102
85 #define CFS_KFI_FAIL_TAGGED_SEND_EVENT 0xF103
86 #define CFS_KFI_FAIL_TAGGED_RECV_EVENT 0xF104
87 #define CFS_KFI_FAIL_BULK_TIMEOUT 0xF105
88 #define CFS_KFI_FAIL_SEND 0xF106
89 #define CFS_KFI_FAIL_READ 0xF107
90 #define CFS_KFI_FAIL_WRITE 0xF108
91 #define CFS_KFI_FAIL_TAGGED_SEND 0xF109
92 #define CFS_KFI_FAIL_TAGGED_RECV 0xF10A
93 #define CFS_KFI_FAIL_SEND_EAGAIN 0xF10B
94 #define CFS_KFI_FAIL_READ_EAGAIN 0xF10C
95 #define CFS_KFI_FAIL_WRITE_EAGAIN 0xF10D
96 #define CFS_KFI_FAIL_TAGGED_SEND_EAGAIN 0xF10E
97 #define CFS_KFI_FAIL_TAGGED_RECV_EAGAIN 0xF10F
98 #define CFS_KFI_FAIL_TAGGED_RECV_CANCEL_EAGAIN 0xF110
99 #define CFS_KFI_FAIL_RECV_EAGAIN 0xF111
100 #define CFS_KFI_FAIL_RECV 0xF112
101 #define CFS_KFI_FAIL_MSG_UNPACK 0xF113
102 #define CFS_KFI_FAIL_MSG_TYPE 0xF114
104 /* Maximum number of transaction keys supported. */
105 #define KFILND_EP_KEY_BITS 16U
106 #define KFILND_EP_KEY_MAX (BIT(KFILND_EP_KEY_BITS) - 1)
108 /* Some constants which should be turned into tunables */
109 #define KFILND_IMMEDIATE_MSG_SIZE 4096
111 #define KFILND_MY_PROCID 49152
113 /* 256 Rx contexts max */
114 #define KFILND_FAB_RX_CTX_BITS 8
116 /* Get the KFI base address from a KFI RX address. RX context information is
117 * stored in the MSBs of the KFI address.
119 #define KFILND_BASE_ADDR(addr) \
120 ((addr) & ((1UL << (64 - KFILND_FAB_RX_CTX_BITS)) - 1))
122 /* States used by all kfilnd structures */
123 enum kfilnd_object_states {
124 KFILND_STATE_UNINITIALIZED,
125 KFILND_STATE_INITIALIZED,
126 KFILND_STATE_SHUTTING_DOWN
129 enum kfilnd_ni_lnd_tunables_attr {
130 LNET_NET_KFILND_TUNABLES_ATTR_UNSPEC = 0,
132 LNET_NET_KFILND_TUNABLES_ATTR_PROV_MAJOR,
133 LNET_NET_KFILND_TUNABLES_ATTR_PROV_MINOR,
134 LNET_NET_KFILND_TUNABLES_ATTR_AUTH_KEY,
135 __LNET_NET_KFILND_TUNABLES_ATTR_MAX_PLUS_ONE,
138 #define LNET_NET_KFILND_TUNABLES_ATTR_MAX (__LNET_NET_KFILND_TUNABLES_ATTR_MAX_PLUS_ONE - 1)
140 extern struct dentry *kfilnd_debug_dir;
141 extern const struct file_operations kfilnd_initiator_state_stats_file_ops;
142 extern const struct file_operations kfilnd_target_state_stats_file_ops;
143 extern const struct file_operations kfilnd_target_stats_file_ops;
144 extern const struct file_operations kfilnd_initiator_stats_file_ops;
145 extern const struct file_operations kfilnd_reset_stats_file_ops;
147 extern struct workqueue_struct *kfilnd_wq;
149 extern unsigned int cksum;
150 extern unsigned int tx_scale_factor;
151 extern unsigned int rx_cq_scale_factor;
152 extern unsigned int tx_cq_scale_factor;
153 extern unsigned int eq_size;
154 extern unsigned int immediate_rx_buf_count;
156 int kfilnd_tunables_setup(struct lnet_ni *ni);
157 int kfilnd_tunables_init(void);
159 struct kfilnd_transaction;
163 /* Multi-receive buffers for immediate receives */
164 struct kfilnd_immediate_buffer {
166 size_t immed_buf_size;
167 struct page *immed_buf_page;
169 bool immed_no_repost;
170 struct list_head replay_entry;
171 struct kfilnd_ep *immed_end;
174 extern atomic_t kfilnd_rx_count;
178 struct kfilnd_cq_work {
179 struct kfilnd_cq *cq;
180 unsigned int work_cpu;
181 struct work_struct work;
185 struct kfilnd_ep *ep;
187 unsigned int cq_work_count;
188 struct kfilnd_cq_work cq_works[];
192 /* The contexts for this CPT */
193 struct kfid_ep *end_tx;
194 struct kfid_ep *end_rx;
196 /* Corresponding CQs */
197 struct kfilnd_cq *end_tx_cq;
198 struct kfilnd_cq *end_rx_cq;
200 /* Specific config values for this endpoint */
201 struct kfilnd_dev *end_dev;
205 /* List of transactions. */
206 struct list_head tn_list;
207 spinlock_t tn_list_lock;
210 struct list_head tn_replay;
211 struct list_head imm_buffer_replay;
212 spinlock_t replay_lock;
213 struct timer_list replay_timer;
214 struct work_struct replay_work;
215 atomic_t replay_count;
217 /* Key used to build the tag for tagged buffers. */
220 /* Pre-posted immediate buffers */
221 struct kfilnd_immediate_buffer end_immed_bufs[];
225 struct rhash_head kp_node;
226 struct rcu_head kp_rcu_head;
227 struct kfilnd_dev *kp_dev;
231 atomic_t kp_remove_peer;
233 time64_t kp_last_alive;
235 u32 kp_local_session_key;
236 u32 kp_remote_session_key;
237 atomic_t kp_hello_pending;
238 time64_t kp_hello_ts;
241 static inline bool kfilnd_peer_deleted(struct kfilnd_peer *kp)
243 return atomic_read(&kp->kp_remove_peer) > 0;
246 /* Sets kp_hello_sending
247 * Returns true if it was already set
248 * Returns false otherwise
250 static inline bool kfilnd_peer_set_check_hello_pending(struct kfilnd_peer *kp)
252 return (atomic_cmpxchg(&kp->kp_hello_pending, 0, 1) == 1);
255 static inline void kfilnd_peer_clear_hello_pending(struct kfilnd_peer *kp)
257 atomic_set(&kp->kp_hello_pending, 0);
260 static inline bool kfilnd_peer_is_new_peer(struct kfilnd_peer *kp)
262 return kp->kp_version == 0;
265 /* Peer needs hello if it is not up to date and there is not already a hello
268 * If hello was sent more than LND timeout seconds ago, and we never received a
269 * response, then send another one.
271 static inline bool kfilnd_peer_needs_hello(struct kfilnd_peer *kp)
273 if (atomic_read(&kp->kp_hello_pending) == 0) {
274 if (kfilnd_peer_is_new_peer(kp))
276 } else if (ktime_before(kp->kp_hello_ts + lnet_get_lnd_timeout(),
277 ktime_get_seconds())) {
278 /* Sent hello but never received reply */
280 "No response from %s(%p):0x%llx after %lld\n",
281 libcfs_nid2str(kp->kp_nid), kp, kp->kp_addr,
282 ktime_sub(ktime_get_seconds(), kp->kp_hello_ts));
284 kfilnd_peer_clear_hello_pending(kp);
292 struct list_head entry;
293 struct list_head dom_list;
294 struct mutex dom_list_lock;
295 struct kfid_fabric *fabric;
300 struct list_head entry;
301 struct list_head dev_list;
303 struct kfilnd_fab *fab;
304 struct kfid_domain *domain;
308 /* Transaction States */
312 /* Shared initiator and target states. */
314 TN_STATE_WAIT_TAG_COMP,
316 /* Initiator immediate states. */
319 /* Initiator bulk states. */
320 TN_STATE_TAGGED_RECV_POSTED,
321 TN_STATE_SEND_FAILED,
323 TN_STATE_WAIT_TIMEOUT_COMP,
324 TN_STATE_WAIT_SEND_COMP,
325 TN_STATE_WAIT_TIMEOUT_TAG_COMP,
330 TN_STATE_WAIT_TAG_RMA_COMP,
332 /* Invalid max value. */
336 /* Base duration state stats. */
337 struct kfilnd_tn_duration_stat {
338 atomic64_t accumulated_duration;
339 atomic_t accumulated_count;
342 /* Transaction state stats group into 22 buckets. Bucket zero corresponds to
343 * LNet message size of 0 bytes and buckets 1 through 21 correspond to LNet
344 * message sizes of 1 to 1048576 bytes increasing by a power of 2. LNet message
345 * sizes are round up to the nearest power of 2.
347 #define KFILND_DATA_SIZE_BUCKETS 22U
348 #define KFILND_DATA_SIZE_MAX_SIZE (1U << (KFILND_DATA_SIZE_BUCKETS - 2))
349 struct kfilnd_tn_data_size_duration_stats {
350 struct kfilnd_tn_duration_stat data_size[KFILND_DATA_SIZE_BUCKETS];
353 static inline unsigned int kfilnd_msg_len_to_data_size_bucket(size_t size)
359 if (size >= KFILND_DATA_SIZE_MAX_SIZE)
360 return KFILND_DATA_SIZE_BUCKETS - 1;
362 /* Round size up to the nearest power of 2. */
367 return (unsigned int)bit;
370 /* One data size duraction state bucket for each transaction state. */
371 struct kfilnd_tn_state_data_size_duration_stats {
372 struct kfilnd_tn_data_size_duration_stats state[TN_STATE_MAX];
376 struct list_head kfd_list; /* chain on kfid_devs */
377 struct lnet_ni *kfd_ni;
378 enum kfilnd_object_states kfd_state;
380 /* KFI LND domain the device is associated with. */
381 struct kfilnd_dom *dom;
383 /* Fields specific to kfabric operation */
385 struct kfid_ep *kfd_sep;
386 struct kfid_av *kfd_av;
387 struct kfilnd_ep **kfd_endpoints;
389 /* Map of LNet NI CPTs to endpoints. */
390 struct kfilnd_ep **cpt_to_endpoint;
392 /* Hash of LNet NIDs to KFI addresses. */
393 struct rhashtable peer_cache;
395 /* Per LNet NI states. */
396 struct kfilnd_tn_state_data_size_duration_stats initiator_state_stats;
397 struct kfilnd_tn_state_data_size_duration_stats target_state_stats;
398 struct kfilnd_tn_data_size_duration_stats initiator_stats;
399 struct kfilnd_tn_data_size_duration_stats target_stats;
401 /* Per LNet NI debugfs stats. */
402 struct dentry *dev_dir;
403 struct dentry *initiator_state_stats_file;
404 struct dentry *initiator_stats_file;
405 struct dentry *target_state_stats_file;
406 struct dentry *target_stats_file;
407 struct dentry *reset_stats_file;
409 /* Physical NIC address. */
410 unsigned int nic_addr;
411 atomic_t session_keys;
414 /* Invalid checksum value is treated as no checksum. */
415 /* TODO: Module parameter to disable checksum? */
416 #define NO_CHECKSUM 0x0
418 /* Hello message header. */
419 struct kfilnd_hello_msg {
420 /* Support kfilnd version. */
423 /* Base RX context peer should used. */
426 /* Session key used by peer. */
429 /* RX context count peer can target. */
433 /* Immediate message header. */
434 struct kfilnd_immed_msg {
435 /* Entire LNet header needed by the destination to match incoming
438 struct lnet_hdr_nid4 hdr;
440 /* Entire LNet message payload. */
444 /* Bulk request message header. */
445 struct kfilnd_bulk_req_msg {
446 /* Entire LNet header needed by the destination to match incoming
449 struct lnet_hdr_nid4 hdr;
451 /* Specific RX context the target must target to push/pull LNet
456 /* Memory key needed by the target to push/pull LNet payload. */
460 /* Kfilnd message. Includes base transport header plus embedded protocol
464 /* Unique kfilnd magic. */
467 /* Version of the kfilnd protocol. */
470 /* Specific kfilnd protocol type. */
476 /* Number of bytes in message. */
479 /* Checksum of entire message. 0 is checksum disabled. */
482 /* Message LNet source NID. */
485 /* Message LNet target NID. */
488 /* Embedded protocol headers. Must remain at bottom. */
490 struct kfilnd_immed_msg immed;
491 struct kfilnd_bulk_req_msg bulk_req;
492 struct kfilnd_hello_msg hello;
496 #define KFILND_MSG_MAGIC LNET_PROTO_KFI_MAGIC /* unique magic */
498 #define KFILND_MSG_VERSION_1 0x1
499 #define KFILND_MSG_VERSION KFILND_MSG_VERSION_1
501 /* Get the KFI RX context from a KFI RX address. RX context information is
502 * stored in the MSBs of the KFI address.
504 #define KFILND_RX_CONTEXT(addr) ((addr) >> (64 - KFILND_FAB_RX_CTX_BITS))
506 #define KFILND_EP_DEBUG(ep, fmt, ...) \
507 CDEBUG(D_NET, "%s:%d " fmt "\n", \
508 libcfs_nidstr(&(ep)->end_dev->kfd_ni->ni_nid), \
509 (ep)->end_context_id, ##__VA_ARGS__)
511 #define KFILND_EP_ERROR(ep, fmt, ...) \
512 CNETERR("%s:%d " fmt "\n", \
513 libcfs_nidstr(&(ep)->end_dev->kfd_ni->ni_nid), \
514 (ep)->end_context_id, ##__VA_ARGS__)
516 #define KFILND_TN_PEER_VALID(tn) \
517 !IS_ERR_OR_NULL((tn)->tn_kp)
519 #define KFILND_TN_DIR_DEBUG(tn, fmt, dir, ...) \
520 CDEBUG(D_NET, "%s Transaction ID %p: %s:%u %s %s(%p):0x%llx " fmt "\n", \
521 msg_type_to_str(tn->msg_type), \
523 libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
524 (tn)->tn_ep->end_context_id, dir, \
525 libcfs_nid2str((tn)->tn_kp->kp_nid), tn->tn_kp, \
526 KFILND_TN_PEER_VALID(tn) ? \
527 KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
530 #define KFILND_TN_DEBUG(tn, fmt, ...) \
532 if ((tn)->is_initiator) \
533 KFILND_TN_DIR_DEBUG(tn, fmt, "->", ##__VA_ARGS__); \
535 KFILND_TN_DIR_DEBUG(tn, fmt, "<-", ##__VA_ARGS__); \
538 #define KFILND_TN_DIR_ERROR(tn, fmt, dir, ...) \
539 CNETERR("Transaction ID %p: %s:%u %s %s(%p):0x%llx " fmt "\n", \
541 libcfs_nidstr(&(tn)->tn_ep->end_dev->kfd_ni->ni_nid), \
542 (tn)->tn_ep->end_context_id, dir, \
543 libcfs_nid2str((tn)->tn_kp->kp_nid), tn->tn_kp, \
544 KFILND_TN_PEER_VALID(tn) ? \
545 KFILND_RX_CONTEXT((tn)->tn_kp->kp_addr) : 0, \
548 #define KFILND_TN_ERROR(tn, fmt, ...) \
550 if ((tn)->is_initiator) \
551 KFILND_TN_DIR_ERROR(tn, fmt, "->", ##__VA_ARGS__); \
553 KFILND_TN_DIR_ERROR(tn, fmt, "<-", ##__VA_ARGS__); \
556 /* TODO: Support NOOPs? */
557 enum kfilnd_msg_type {
558 /* Valid message types start at 1. */
561 /* Valid message types. */
562 KFILND_MSG_IMMEDIATE,
563 KFILND_MSG_BULK_PUT_REQ,
564 KFILND_MSG_BULK_GET_REQ,
565 KFILND_MSG_HELLO_REQ,
566 KFILND_MSG_HELLO_RSP,
568 /* Invalid max value. */
572 static inline const char *msg_type_to_str(enum kfilnd_msg_type type)
574 static const char *str[KFILND_MSG_MAX] = {
575 [KFILND_MSG_INVALID] = "KFILND_MSG_INVALID",
576 [KFILND_MSG_IMMEDIATE] = "KFILND_MSG_IMMEDIATE",
577 [KFILND_MSG_BULK_PUT_REQ] = "KFILND_MSG_BULK_PUT_REQ",
578 [KFILND_MSG_BULK_GET_REQ] = "KFILND_MSG_BULK_GET_REQ",
579 [KFILND_MSG_HELLO_REQ] = "KFILND_MSG_HELLO_REQ",
580 [KFILND_MSG_HELLO_RSP] = "KFILND_MSG_HELLO_RSP",
583 if (type >= KFILND_MSG_MAX)
584 return "KFILND_MSG_INVALID";
589 static inline const char *tn_state_to_str(enum tn_states type)
591 static const char *str[TN_STATE_MAX] = {
592 [TN_STATE_INVALID] = "TN_STATE_INVALID",
593 [TN_STATE_IDLE] = "TN_STATE_IDLE",
594 [TN_STATE_WAIT_TAG_COMP] = "TN_STATE_WAIT_TAG_COMP",
595 [TN_STATE_IMM_SEND] = "TN_STATE_IMM_SEND",
596 [TN_STATE_TAGGED_RECV_POSTED] = "TN_STATE_TAGGED_RECV_POSTED",
597 [TN_STATE_SEND_FAILED] = "TN_STATE_SEND_FAILED",
598 [TN_STATE_WAIT_COMP] = "TN_STATE_WAIT_COMP",
599 [TN_STATE_WAIT_TIMEOUT_COMP] = "TN_STATE_WAIT_TIMEOUT_COMP",
600 [TN_STATE_WAIT_SEND_COMP] = "TN_STATE_WAIT_SEND_COMP",
601 [TN_STATE_WAIT_TIMEOUT_TAG_COMP] = "TN_STATE_WAIT_TIMEOUT_TAG_COMP",
602 [TN_STATE_FAIL] = "TN_STATE_FAIL",
603 [TN_STATE_IMM_RECV] = "TN_STATE_IMM_RECV",
604 [TN_STATE_WAIT_TAG_RMA_COMP] = "TN_STATE_WAIT_TAG_RMA_COMP",
610 /* Transaction Events */
614 /* Initiator events. */
615 TN_EVENT_INIT_IMMEDIATE,
621 TN_EVENT_TAG_RX_FAIL,
622 TN_EVENT_TAG_RX_CANCEL,
629 TN_EVENT_INIT_TAG_RMA,
630 TN_EVENT_SKIP_TAG_RMA,
632 TN_EVENT_TAG_TX_FAIL,
634 /* Invalid max value. */
638 static inline const char *tn_event_to_str(enum tn_events type)
640 static const char *str[TN_EVENT_MAX] = {
641 [TN_EVENT_INVALID] = "TN_EVENT_INVALID",
642 [TN_EVENT_INIT_IMMEDIATE] = "TN_EVENT_INIT_IMMEDIATE",
643 [TN_EVENT_INIT_BULK] = "TN_EVENT_INIT_BULK",
644 [TN_EVENT_TX_HELLO] = "TN_EVENT_TX_HELLO",
645 [TN_EVENT_TX_OK] = "TN_EVENT_TX_OK",
646 [TN_EVENT_TX_FAIL] = "TN_EVENT_TX_FAIL",
647 [TN_EVENT_TAG_RX_OK] = "TN_EVENT_TAG_RX_OK",
648 [TN_EVENT_TAG_RX_FAIL] = "TN_EVENT_TAG_RX_FAIL",
649 [TN_EVENT_TAG_RX_CANCEL] = "TN_EVENT_TAG_RX_CANCEL",
650 [TN_EVENT_TIMEOUT] = "TN_EVENT_TIMEOUT",
651 [TN_EVENT_RX_HELLO] = "TN_EVENT_RX_HELLO",
652 [TN_EVENT_RX_OK] = "TN_EVENT_RX_OK",
653 [TN_EVENT_RX_FAIL] = "TN_EVENT_RX_FAIL",
654 [TN_EVENT_INIT_TAG_RMA] = "TN_EVENT_INIT_TAG_RMA",
655 [TN_EVENT_SKIP_TAG_RMA] = "TN_EVENT_SKIP_TAG_RMA",
656 [TN_EVENT_TAG_TX_FAIL] = "TN_EVENT_TAG_TX_FAIL",
662 struct kfilnd_transaction_msg {
663 struct kfilnd_msg *msg;
667 /* Initiator and target transaction structure. */
668 struct kfilnd_transaction {
669 /* Endpoint list transaction lives on. */
670 struct list_head tn_entry;
671 struct mutex tn_lock; /* to serialize events */
672 int tn_status; /* return code from ops */
673 struct kfilnd_ep *tn_ep; /* endpoint we operate under */
674 enum tn_states tn_state; /* current state of Tn */
675 struct lnet_msg *tn_lntmsg; /* LNet msg to finalize */
676 struct lnet_msg *tn_getreply; /* GET LNet msg to finalize */
678 bool is_initiator; /* Initiated LNet transfer. */
680 /* Transaction send message and target address. */
681 kfi_addr_t tn_target_addr;
682 struct kfilnd_peer *tn_kp;
683 struct kfilnd_transaction_msg tn_tx_msg;
685 /* Transaction multi-receive buffer and associated receive message. */
686 struct kfilnd_immediate_buffer *tn_posted_buf;
687 struct kfilnd_transaction_msg tn_rx_msg;
689 /* LNet buffer used to register a memory region or perform a RMA
692 struct bio_vec tn_kiov[LNET_MAX_IOV];
693 unsigned int tn_num_iovec;
695 /* LNet transaction payload byte count. */
698 /* Bulk transaction buffer is sink or source buffer. */
701 /* Memory region and remote key used to cover initiator's buffer. */
704 /* RX context used to perform response operations to a Put/Get
705 * request. This is required since the request initiator locks in a
706 * transactions to a specific RX context.
708 u16 tn_response_mr_key;
711 /* Immediate data used to convey transaction state from LNet target to
716 /* Bulk operation timeout timer. */
717 struct timer_list timeout_timer;
718 struct work_struct timeout_work;
720 /* Transaction health status. */
721 enum lnet_msg_hstatus hstatus;
723 /* Transaction deadline. */
730 /* Fields used to replay transaction. */
731 struct list_head replay_entry;
732 enum tn_events replay_event;
735 enum kfilnd_msg_type msg_type;
738 int kfilnd_send_hello_request(struct kfilnd_dev *dev, int cpt,
739 struct kfilnd_peer *kp);
741 #endif /* _KFILND_ */