#include <uapi/linux/lnet/lnet-dlc.h>
#include <uapi/linux/lnet/lnetctl.h>
+#include <uapi/linux/lnet/nidstr.h>
/* Max payload size */
#define LNET_MAX_PAYLOAD LNET_MTU
-#define LNET_MAX_IOV (LNET_MAX_PAYLOAD >> PAGE_SHIFT)
+/** limit on the number of fragments in discontiguous MDs */
+#define LNET_MAX_IOV 256
/*
* This is the maximum health value.
unsigned int msg_wanted;
unsigned int msg_offset;
unsigned int msg_niov;
- struct kvec *msg_iov;
- lnet_kiov_t *msg_kiov;
+ struct bio_vec *msg_kiov;
struct lnet_event msg_ev;
struct lnet_hdr msg_hdr;
#define lh_entry(ptr, type, member) \
((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
-struct lnet_eq {
- struct list_head eq_list;
- struct lnet_libhandle eq_lh;
- unsigned long eq_enq_seq;
- unsigned long eq_deq_seq;
- unsigned int eq_size;
- lnet_eq_handler_t eq_callback;
- struct lnet_event *eq_events;
- int **eq_refs; /* percpt refcount for EQ */
-};
-
struct lnet_me {
struct list_head me_list;
- struct lnet_libhandle me_lh;
+ int me_cpt;
struct lnet_process_id me_match_id;
unsigned int me_portal;
unsigned int me_pos; /* hash offset in mt_hash */
unsigned int md_niov; /* # frags at end of struct */
void *md_user_ptr;
struct lnet_rsp_tracker *md_rspt_ptr;
- struct lnet_eq *md_eq;
+ lnet_handler_t md_handler;
struct lnet_handle_md md_bulk_handle;
- union {
- struct kvec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
- } md_iov;
+ struct bio_vec md_kiov[LNET_MAX_IOV];
};
-#define LNET_MD_FLAG_ZOMBIE (1 << 0)
-#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
-#define LNET_MD_FLAG_ABORTED (1 << 2)
+#define LNET_MD_FLAG_ZOMBIE BIT(0)
+#define LNET_MD_FLAG_AUTO_UNLINK BIT(1)
+#define LNET_MD_FLAG_ABORTED BIT(2)
struct lnet_test_peer {
/* info about peers we are trying to fail */
struct socket;
struct lnet_lnd {
- /* fields managed by portals */
- struct list_head lnd_list; /* stash in the LND table */
-
/* fields initialized by the LND */
__u32 lnd_type;
int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
/* In data movement APIs below, payload buffers are described as a set
- * of 'niov' fragments which are...
- * EITHER
- * in virtual memory (struct kvec *iov != NULL)
- * OR
- * in pages (kernel only: plt_kiov_t *kiov != NULL).
+ * of 'niov' fragments which are in pages.
* The LND may NOT overwrite these fragment descriptors.
* An 'offset' and may specify a byte offset within the set of
* fragments to start from
* credit if the LND does flow control. */
int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
/* lnet_parse() has had to delay processing of this message
/* notification of peer down */
void (*lnd_notify_peer_down)(lnet_nid_t peer);
- /* query of peer aliveness */
- void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, time64_t *when);
-
/* accept a new connection */
int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
};
bool net_tunables_set;
/* procedural interface */
- struct lnet_lnd *net_lnd;
+ const struct lnet_lnd *net_lnd;
/* list of NIs on this net */
struct list_head net_ni_list;
struct lnet_ping_buffer {
int pb_nnis;
atomic_t pb_refcnt;
+ bool pb_needs_post;
struct lnet_ping_info pb_info;
};
};
/* Preferred path added due to traffic on non-MR peer_ni */
-#define LNET_PEER_NI_NON_MR_PREF (1 << 0)
+#define LNET_PEER_NI_NON_MR_PREF BIT(0)
/* peer is being recovered. */
-#define LNET_PEER_NI_RECOVERY_PENDING (1 << 1)
+#define LNET_PEER_NI_RECOVERY_PENDING BIT(1)
/* recovery ping failed */
-#define LNET_PEER_NI_RECOVERY_FAILED (1 << 2)
+#define LNET_PEER_NI_RECOVERY_FAILED BIT(2)
/* peer is being deleted */
-#define LNET_PEER_NI_DELETING (1 << 3)
+#define LNET_PEER_NI_DELETING BIT(3)
struct lnet_peer {
/* chain on pt_peer_list */
/* primary NID of the peer */
lnet_nid_t lp_primary_nid;
+ /* source NID to use during discovery */
+ lnet_nid_t lp_disc_src_nid;
+
/* net to perform discovery on */
__u32 lp_disc_net_id;
/* tasks waiting on discovery of this peer */
wait_queue_head_t lp_dc_waitq;
+
+ /* cached peer aliveness */
+ bool lp_alive;
};
/*
*
* A peer is marked ROUTER if it indicates so in the feature bit.
*/
-#define LNET_PEER_MULTI_RAIL (1 << 0) /* Multi-rail aware */
-#define LNET_PEER_NO_DISCOVERY (1 << 1) /* Peer disabled discovery */
-#define LNET_PEER_ROUTER_ENABLED (1 << 2) /* router feature enabled */
+#define LNET_PEER_MULTI_RAIL BIT(0) /* Multi-rail aware */
+#define LNET_PEER_NO_DISCOVERY BIT(1) /* Peer disabled discovery */
+#define LNET_PEER_ROUTER_ENABLED BIT(2) /* router feature enabled */
/*
* A peer is marked CONFIGURED if it was configured by DLC.
* A peer that was created as the result of inbound traffic will not
* be marked at all.
*/
-#define LNET_PEER_CONFIGURED (1 << 3) /* Configured via DLC */
-#define LNET_PEER_DISCOVERED (1 << 4) /* Peer was discovered */
-#define LNET_PEER_REDISCOVER (1 << 5) /* Discovery was disabled */
+#define LNET_PEER_CONFIGURED BIT(3) /* Configured via DLC */
+#define LNET_PEER_DISCOVERED BIT(4) /* Peer was discovered */
+#define LNET_PEER_REDISCOVER BIT(5) /* Discovery was disabled */
/*
* A peer is marked DISCOVERING when discovery is in progress.
* The other flags below correspond to stages of discovery.
*/
-#define LNET_PEER_DISCOVERING (1 << 6) /* Discovering */
-#define LNET_PEER_DATA_PRESENT (1 << 7) /* Remote peer data present */
-#define LNET_PEER_NIDS_UPTODATE (1 << 8) /* Remote peer info uptodate */
-#define LNET_PEER_PING_SENT (1 << 9) /* Waiting for REPLY to Ping */
-#define LNET_PEER_PUSH_SENT (1 << 10) /* Waiting for ACK of Push */
-#define LNET_PEER_PING_FAILED (1 << 11) /* Ping send failure */
-#define LNET_PEER_PUSH_FAILED (1 << 12) /* Push send failure */
+#define LNET_PEER_DISCOVERING BIT(6) /* Discovering */
+#define LNET_PEER_DATA_PRESENT BIT(7) /* Remote peer data present */
+#define LNET_PEER_NIDS_UPTODATE BIT(8) /* Remote peer info uptodate */
+#define LNET_PEER_PING_SENT BIT(9) /* Waiting for REPLY to Ping */
+#define LNET_PEER_PUSH_SENT BIT(10) /* Waiting for ACK of Push */
+#define LNET_PEER_PING_FAILED BIT(11) /* Ping send failure */
+#define LNET_PEER_PUSH_FAILED BIT(12) /* Push send failure */
/*
* A ping can be forced as a way to fix up state, or as a manual
* intervention by an admin.
* A push can be forced in circumstances that would normally not
* allow for one to happen.
*/
-#define LNET_PEER_FORCE_PING (1 << 13) /* Forced Ping */
-#define LNET_PEER_FORCE_PUSH (1 << 14) /* Forced Push */
+#define LNET_PEER_FORCE_PING BIT(13) /* Forced Ping */
+#define LNET_PEER_FORCE_PUSH BIT(14) /* Forced Push */
/* force delete even if router */
-#define LNET_PEER_RTR_NI_FORCE_DEL (1 << 15)
+#define LNET_PEER_RTR_NI_FORCE_DEL BIT(15)
/* gw undergoing alive discovery */
-#define LNET_PEER_RTR_DISCOVERY (1 << 16)
+#define LNET_PEER_RTR_DISCOVERY BIT(16)
/* gw has undergone discovery (does not indicate success or failure) */
-#define LNET_PEER_RTR_DISCOVERED (1 << 17)
+#define LNET_PEER_RTR_DISCOVERED BIT(17)
+
+/* peer is marked for deletion */
+#define LNET_PEER_MARK_DELETION BIT(18)
struct lnet_peer_net {
/* chain on lp_peer_nets */
/* Net ID */
__u32 lpn_net_id;
+ /* peer net health */
+ int lpn_healthv;
+
/* time of last router net check attempt */
time64_t lpn_rtrcheck_timestamp;
__u32 lr_hops; /* how far I am */
unsigned int lr_priority; /* route priority */
bool lr_alive; /* cached route aliveness */
+ bool lr_single_hop; /* this route is single-hop */
};
#define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
struct lnet_rtrbuf {
struct list_head rb_list; /* chain on rbp_bufs */
struct lnet_rtrbufpool *rb_pool; /* owning pool */
- lnet_kiov_t rb_kiov[0]; /* the buffer space */
+ struct bio_vec rb_kiov[0]; /* the buffer space */
};
#define LNET_PEER_HASHSIZE 503 /* prime! */
enum lnet_match_flags {
/* Didn't match anything */
- LNET_MATCHMD_NONE = (1 << 0),
+ LNET_MATCHMD_NONE = BIT(0),
/* Matched OK */
- LNET_MATCHMD_OK = (1 << 1),
+ LNET_MATCHMD_OK = BIT(1),
/* Must be discarded */
- LNET_MATCHMD_DROP = (1 << 2),
+ LNET_MATCHMD_DROP = BIT(2),
/* match and buffer is exhausted */
- LNET_MATCHMD_EXHAUSTED = (1 << 3),
+ LNET_MATCHMD_EXHAUSTED = BIT(3),
/* match or drop */
LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
};
/* Options for struct lnet_portal::ptl_options */
-#define LNET_PTL_LAZY (1 << 0)
-#define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
-#define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match, request portal */
+#define LNET_PTL_LAZY BIT(0)
+#define LNET_PTL_MATCH_UNIQUE BIT(1) /* unique match, for RDMA */
+#define LNET_PTL_MATCH_WILDCARD BIT(2) /* wildcard match, request portal */
/* parameter for matching operations (GET, PUT) */
struct lnet_match_info {
int ln_nportals;
/* the vector of portals */
struct lnet_portal **ln_portals;
- /* percpt ME containers */
- struct lnet_res_container **ln_me_containers;
/* percpt MD container */
struct lnet_res_container **ln_md_containers;
/* Event Queue container */
struct lnet_res_container ln_eq_container;
- wait_queue_head_t ln_eq_waitq;
spinlock_t ln_eq_wait_lock;
unsigned int ln_remote_nets_hbits;
* ln_api_mutex.
*/
struct lnet_handle_md ln_ping_target_md;
- struct lnet_handle_eq ln_ping_target_eq;
+ lnet_handler_t ln_ping_target_handler;
struct lnet_ping_buffer *ln_ping_target;
atomic_t ln_ping_target_seqno;
* buffer may linger a while after it has been unlinked, in
* which case the event handler cleans up.
*/
- struct lnet_handle_eq ln_push_target_eq;
+ lnet_handler_t ln_push_target_handler;
struct lnet_handle_md ln_push_target_md;
struct lnet_ping_buffer *ln_push_target;
int ln_push_target_nnis;
/* discovery event queue handle */
- struct lnet_handle_eq ln_dc_eqh;
+ lnet_handler_t ln_dc_handler;
/* discovery requests */
struct list_head ln_dc_request;
/* discovery working list */
/* uniquely identifies this ni in this epoch */
__u64 ln_interface_cookie;
/* registered LNDs */
- struct list_head ln_lnds;
+ const struct lnet_lnd *ln_lnds[NUM_LNDS];
/* test protocol compatibility flags */
- int ln_testprotocompat;
+ unsigned long ln_testprotocompat;
/* 0 - load the NIs from the mod params
* 1 - do not load the NIs from the mod params
* operations on the MD complete or when LNet has shut down.
*/
struct list_head **ln_mt_zombie_rstqs;
- /* recovery eq handler */
- struct lnet_handle_eq ln_mt_eqh;
+ /* recovery handler */
+ lnet_handler_t ln_mt_handler;
/*
* Completed when the discovery and monitor threads can enter their