*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/include/lnet/lib-types.h
*
#include <linux/uio.h>
#include <linux/semaphore.h>
#include <linux/types.h>
+#include <linux/kref.h>
+#include <net/genetlink.h>
+#include <uapi/linux/lnet/lnet-nl.h>
#include <uapi/linux/lnet/lnet-dlc.h>
#include <uapi/linux/lnet/lnetctl.h>
#include <uapi/linux/lnet/nidstr.h>
enum lnet_msg_hstatus msg_health_status;
/* This is a recovery message */
bool msg_recovery;
+ /* force an RDMA even if the message size is < 4K */
+ bool msg_rdma_force;
/* the number of times a transmission has been retried */
int msg_retry_count;
/* flag to indicate that we do not want to resend this message */
/* accept a new connection */
int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
+
+ /* get dma_dev priority */
+ unsigned int (*lnd_get_dev_prio)(struct lnet_ni *ni,
+ unsigned int dev_idx);
};
struct lnet_tx_queue {
/* Recovery state. Protected by lnet_ni_lock() */
__u32 ni_recovery_state;
+ /* When to send the next recovery ping */
+ time64_t ni_next_ping;
+ /* How many pings sent during current recovery period did not receive
+ * a reply. NB: reset whenever _any_ message arrives on this NI
+ */
+ unsigned int ni_ping_count;
+
/* per NI LND tunables */
struct lnet_lnd_tunables ni_lnd_tunables;
__u32 ni_sel_priority;
/*
- * equivalent interfaces to use
- * This is an array because socklnd bonding can still be configured
+ * equivalent interface to use
*/
- char *ni_interfaces[LNET_INTERFACES_NUM];
+ char *ni_interface;
struct net *ni_net_ns; /* original net namespace */
};
/* peer's NID */
lnet_nid_t lpni_nid;
/* # refs */
- atomic_t lpni_refcount;
+ struct kref lpni_kref;
/* health value for the peer */
atomic_t lpni_healthv;
/* recovery ping mdh */
/* source NID to use during discovery */
lnet_nid_t lp_disc_src_nid;
+ /* destination NID to use during discovery */
+ lnet_nid_t lp_disc_dst_nid;
/* net to perform discovery on */
__u32 lp_disc_net_id;
/* peer net health */
int lpn_healthv;
- /* time of last router net check attempt */
- time64_t lpn_rtrcheck_timestamp;
+ /* time of next router ping on this net */
+ time64_t lpn_next_ping;
/* selection sequence number */
__u32 lpn_seq;
int lr_seq; /* sequence for round-robin */
__u32 lr_hops; /* how far I am */
unsigned int lr_priority; /* route priority */
- bool lr_alive; /* cached route aliveness */
+ atomic_t lr_alive; /* cached route aliveness */
bool lr_single_hop; /* this route is single-hop */
};
struct list_head ln_udsp_list;
};
+static const struct nla_policy scalar_attr_policy[LN_SCALAR_CNT + 1] = {
+ [LN_SCALAR_ATTR_LIST] = { .type = NLA_NESTED },
+ [LN_SCALAR_ATTR_LIST_SIZE] = { .type = NLA_U16 },
+ [LN_SCALAR_ATTR_INDEX] = { .type = NLA_U16 },
+ [LN_SCALAR_ATTR_NLA_TYPE] = { .type = NLA_U16 },
+ [LN_SCALAR_ATTR_VALUE] = { .type = NLA_STRING },
+ [LN_SCALAR_ATTR_KEY_FORMAT] = { .type = NLA_U16 },
+};
+
+int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq,
+ const struct genl_family *family, int flags,
+ u8 cmd, const struct ln_key_list *data[]);
+
+/* Special workaround for pre-4.19 kernels to send error messages
+ * from dumpit routines. Newer kernels will send message with
+ * NL_SET_ERR_MSG information by default if NETLINK_EXT_ACK is set.
+ */
+static inline int lnet_nl_send_error(struct sk_buff *msg, int portid, int seq,
+ int error)
+{
+#ifndef HAVE_NL_DUMP_WITH_EXT_ACK
+ struct nlmsghdr *nlh;
+
+ if (!error)
+ return 0;
+
+ nlh = nlmsg_put(msg, portid, seq, NLMSG_ERROR, sizeof(error), 0);
+ if (!nlh)
+ return -ENOMEM;
+#ifdef HAVE_NL_PARSE_WITH_EXT_ACK
+ netlink_ack(msg, nlh, error, NULL);
+#else
+ netlink_ack(msg, nlh, error);
+#endif
+ return nlmsg_len(nlh);
+#else
+ return error;
+#endif
+}
+
#endif