LNET_MSG_STATUS_REMOTE_ERROR,
LNET_MSG_STATUS_REMOTE_DROPPED,
LNET_MSG_STATUS_REMOTE_TIMEOUT,
- LNET_MSG_STATUS_NETWORK_TIMEOUT
+ LNET_MSG_STATUS_NETWORK_TIMEOUT,
+ LNET_MSG_STATUS_END,
+};
+
+struct lnet_rsp_tracker {
+ /* chain on the waiting list */
+ struct list_head rspt_on_list;
+ /* cpt to lock */
+ int rspt_cpt;
+ /* nid of next hop */
+ lnet_nid_t rspt_next_hop_nid;
+ /* deadline of the REPLY/ACK */
+ ktime_t rspt_deadline;
+ /* parent MD */
+ struct lnet_handle_md rspt_mdh;
};
struct lnet_msg {
enum lnet_msg_hstatus msg_health_status;
/* This is a recovery message */
bool msg_recovery;
+ /* the number of times a transmission has been retried */
+ int msg_retry_count;
/* flag to indicate that we do not want to resend this message */
bool msg_no_resend;
};
struct lnet_libmd {
- struct list_head md_list;
- struct lnet_libhandle md_lh;
- struct lnet_me *md_me;
- char *md_start;
- unsigned int md_offset;
- unsigned int md_length;
- unsigned int md_max_size;
- int md_threshold;
- int md_refcount;
- unsigned int md_options;
- unsigned int md_flags;
- unsigned int md_niov; /* # frags at end of struct */
- void *md_user_ptr;
- struct lnet_eq *md_eq;
- struct lnet_handle_md md_bulk_handle;
+ struct list_head md_list;
+ struct lnet_libhandle md_lh;
+ struct lnet_me *md_me;
+ char *md_start;
+ unsigned int md_offset;
+ unsigned int md_length;
+ unsigned int md_max_size;
+ int md_threshold;
+ int md_refcount;
+ unsigned int md_options;
+ unsigned int md_flags;
+ unsigned int md_niov; /* # frags at end of struct */
+ void *md_user_ptr;
+ struct lnet_rsp_tracker *md_rspt_ptr;
+ struct lnet_eq *md_eq;
+ struct lnet_handle_md md_bulk_handle;
union {
- struct kvec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
+ struct kvec iov[LNET_MAX_IOV];
+ lnet_kiov_t kiov[LNET_MAX_IOV];
} md_iov;
};
LNET_NET_STATE_DELETING
};
-#define LNET_NI_STATE_INIT (1 << 0)
-#define LNET_NI_STATE_ACTIVE (1 << 1)
-#define LNET_NI_STATE_FAILED (1 << 2)
-#define LNET_NI_STATE_RECOVERY_PENDING (1 << 3)
-#define LNET_NI_STATE_DELETING (1 << 4)
+enum lnet_ni_state {
+ /* initial state when NI is created */
+ LNET_NI_STATE_INIT = 0,
+ /* set when NI is brought up */
+ LNET_NI_STATE_ACTIVE,
+ /* set when NI is being shutdown */
+ LNET_NI_STATE_DELETING,
+};
+
+#define LNET_NI_RECOVERY_PENDING BIT(0)
+#define LNET_NI_RECOVERY_FAILED BIT(1)
enum lnet_stats_type {
LNET_STATS_TYPE_SEND = 0,
struct lnet_comm_count el_drop_stats;
};
+struct lnet_health_local_stats {
+ atomic_t hlt_local_interrupt;
+ atomic_t hlt_local_dropped;
+ atomic_t hlt_local_aborted;
+ atomic_t hlt_local_no_route;
+ atomic_t hlt_local_timeout;
+ atomic_t hlt_local_error;
+};
+
+struct lnet_health_remote_stats {
+ atomic_t hlt_remote_dropped;
+ atomic_t hlt_remote_timeout;
+ atomic_t hlt_remote_error;
+ atomic_t hlt_network_timeout;
+};
+
struct lnet_net {
/* chain on the ln_nets */
struct list_head net_list;
/* chain on the lnet_net structure */
struct list_head ni_netlist;
- /* chain on net_ni_cpt */
- struct list_head ni_cptlist;
-
/* chain on the recovery queue */
struct list_head ni_recovery;
/* my health status */
struct lnet_ni_status *ni_status;
- /* NI FSM */
- __u32 ni_state;
+ /* NI FSM. Protected by lnet_ni_lock() */
+ enum lnet_ni_state ni_state;
+
+ /* Recovery state. Protected by lnet_ni_lock() */
+ __u32 ni_recovery_state;
/* per NI LND tunables */
struct lnet_lnd_tunables ni_lnd_tunables;
/* NI statistics */
struct lnet_element_stats ni_stats;
+ struct lnet_health_local_stats ni_hstats;
/* physical device CPT */
int ni_dev_cpt;
atomic_t ni_healthv;
/*
+ * Set to 1 by the LND when it receives an event telling it the device
+ * has gone into a fatal state. Set to 0 when the LND receives an
+ * even telling it the device is back online.
+ */
+ atomic_t ni_fatal_error_on;
+
+ /*
* equivalent interfaces to use
* This is an array because socklnd bonding can still be configured
*/
struct lnet_peer_net *lpni_peer_net;
/* statistics kept on each peer NI */
struct lnet_element_stats lpni_stats;
+ struct lnet_health_remote_stats lpni_hstats;
/* spin lock protecting credits and lpni_txq / lpni_rtrq */
spinlock_t lpni_lock;
/* # tx credits available */
#define LNET_PEER_NI_NON_MR_PREF (1 << 0)
/* peer is being recovered. */
#define LNET_PEER_NI_RECOVERY_PENDING (1 << 1)
+/* recovery ping failed */
+#define LNET_PEER_NI_RECOVERY_FAILED (1 << 2)
/* peer is being deleted */
-#define LNET_PEER_NI_DELETING (1 << 2)
+#define LNET_PEER_NI_DELETING (1 << 3)
struct lnet_peer {
/* chain on pt_peer_list */
struct list_head ln_mt_localNIRecovq;
/* local NIs to recover */
struct list_head ln_mt_peerNIRecovq;
+ /*
+ * An array of queues for GET/PUT waiting for REPLY/ACK respectively.
+ * There are CPT number of queues. Since response trackers will be
+ * added on the fast path we can't afford to grab the exclusive
+ * net lock to protect these queues. The CPT will be calculated
+ * based on the mdh cookie.
+ */
+ struct list_head **ln_mt_rstq;
/* recovery eq handler */
struct lnet_handle_eq ln_mt_eqh;