#define LNET_ROUTE_ATTR_MAX (__LNET_ROUTE_ATTR_MAX_PLUS_ONE - 1)
+/** enum lnet_peer_ni_attrs - LNet peer NI netlink properties
+ * attributes that describe LNet peer 'NI'.
+ * These values are used to piece together
+ * messages for sending and receiving.
+ *
+ * @LNET_PEER_NI_ATTR_UNSPEC: unspecified attribute to catch errors
+ *
+ * @LNET_PEER_NI_ATTR_HDR: grouping for LNet peer data
+ * (NLA_NUL_STRING)
+ * @LNET_PEER_NI_ATTR_PRIMARY_NID: primary NID of this peer (NLA_STRING)
+ * @LNET_PEER_NI_ATTR_MULTIRAIL: Do we support MultiRail ? (NLA_FLAG)
+ * @LNET_PEER_NI_ATTR_STATE: Bitfields of the peer state (NLA_U32)
+ * @LNET_PEER_NI_ATTR_PEER_NI_LIST: List of remote peers we can reach
+ * (NLA_NESTED)
+ */
+enum lnet_peer_ni_attrs {
+ LNET_PEER_NI_ATTR_UNSPEC = 0,
+
+ LNET_PEER_NI_ATTR_HDR,
+ LNET_PEER_NI_ATTR_PRIMARY_NID,
+ LNET_PEER_NI_ATTR_MULTIRAIL,
+ LNET_PEER_NI_ATTR_STATE,
+ LNET_PEER_NI_ATTR_PEER_NI_LIST,
+ __LNET_PEER_NI_ATTR_MAX_PLUS_ONE,
+};
+
+#define LNET_PEER_NI_ATTR_MAX (__LNET_PEER_NI_ATTR_MAX_PLUS_ONE - 1)
+
+/** enum lnet_peer_ni_list_attrs - LNet remote peer netlink
+ * properties attributes that
+ * describe remote LNet peer 'NI'.
+ * These values are used to piece
+ * together messages for sending
+ * and receiving.
+ *
+ * @LNET_PEER_NI_LIST_ATTR_UNSPEC: unspecified attribute to catch
+ * errors
+ *
+ * @LNET_PEER_NI_LIST_ATTR_NID: remote peer's NID (NLA_STRING)
+ * @LNET_PEER_NI_LIST_ATTR_UDSP_INFO: remote peer's UDSP info
+ * (NLA_NESTED)
+ * @LNET_PEER_NI_LIST_ATTR_STATE: state of remote peer
+ * (NLA_STRING)
+ *
+ * @LNET_PEER_NI_LIST_ATTR_MAX_TX_CREDITS: Maximum TX credits for remote
+ * peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_ATTR_CUR_TX_CREDITS: Current TX credits for remote
+ * peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_ATTR_MIN_TX_CREDITS: Minimum TX credits for remote
+ * peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_ATTR_QUEUE_BUF_COUNT: Size of TX queue buffer
+ * (NLA_U32)
+ * @LNET_PEER_NI_LIST_ATTR_CUR_RTR_CREDITS: Current router credits for
+ * remote peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_ATTR_MIN_RTR_CREDITS: Minimum router credits for
+ * remote peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_ATTR_REFCOUNT: Remote peer reference count
+ * (NLA_U32)
+ * @LNET_PEER_NI_LIST_ATTR_STATS_COUNT: Remote peer general stats,
+ * reports sent, recieved, and
+ * dropped packets. (NLA_NESTED)
+ *
+ * @LNET_PEER_NI_LIST_ATTR_SENT_STATS: Remote peer sent stats,
+ * reports gets, puts, acks, and
+ * hello packets. (NLA_NESTED)
+ * @LNET_PEER_NI_LIST_ATTR_RECV_STATS: Remote peer recieved stats,
+ * reports gets, puts, acks, and
+ * hello packets. (NLA_NESTED)
+ * @LNET_PEER_NI_LIST_ATTR_DROP_STATS: Remote peer dropped stats,
+ * reports gets, puts, acks, and
+ * hello packets. (NLA_NESTED)
+ * @LNET_PEER_NI_LIST_ATTR_HEALTH_STATS: Report the stats about the
+ * health of the remote peer.
+ * (NLA_NESTED)
+ */
+enum lnet_peer_ni_list_attr {
+ LNET_PEER_NI_LIST_ATTR_UNSPEC = 0,
+
+ LNET_PEER_NI_LIST_ATTR_NID,
+ LNET_PEER_NI_LIST_ATTR_UDSP_INFO,
+ LNET_PEER_NI_LIST_ATTR_STATE,
+
+ LNET_PEER_NI_LIST_ATTR_MAX_TX_CREDITS,
+ LNET_PEER_NI_LIST_ATTR_CUR_TX_CREDITS,
+ LNET_PEER_NI_LIST_ATTR_MIN_TX_CREDITS,
+ LNET_PEER_NI_LIST_ATTR_QUEUE_BUF_COUNT,
+ LNET_PEER_NI_LIST_ATTR_CUR_RTR_CREDITS,
+ LNET_PEER_NI_LIST_ATTR_MIN_RTR_CREDITS,
+ LNET_PEER_NI_LIST_ATTR_REFCOUNT,
+ LNET_PEER_NI_LIST_ATTR_STATS_COUNT,
+
+ LNET_PEER_NI_LIST_ATTR_SENT_STATS,
+ LNET_PEER_NI_LIST_ATTR_RECV_STATS,
+ LNET_PEER_NI_LIST_ATTR_DROP_STATS,
+ LNET_PEER_NI_LIST_ATTR_HEALTH_STATS,
+
+ __LNET_PEER_NI_LIST_ATTR_MAX_PLUS_ONE,
+};
+
+#define LNET_PEER_NI_LIST_ATTR_MAX (__LNET_PEER_NI_LIST_ATTR_MAX_PLUS_ONE - 1)
+
+/** enum lnet_peer_ni_list_stats_count - LNet remote peer traffic
+ * stats netlink properties
+ * attributes that provide
+ * traffic stats on the
+ * remote LNet peer 'NI'.
+ * These values are used to
+ * piece together messages
+ * for sending and receiving.
+ *
+ * @LNET_PEER_NI_LIST_STATS_COUNT_ATTR_UNSPEC: unspecified attribute to
+ * catch errors
+ *
+ * @LNET_PEER_NI_LIST_STATS_COUNT_ATTR_SEND_COUNT: Number of sent packets for
+ * remote peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_STATS_COUNT_ATTR_RECV_COUNT: Numebr of recieved packets
+ * for remote peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_STATS_COUNT_ATTR_DROP_COUNT: Number of dropped packets
+ * for remote peer (NLA_U32)
+ */
+enum lnet_peer_ni_list_stats_count {
+ LNET_PEER_NI_LIST_STATS_COUNT_ATTR_UNSPEC = 0,
+
+ LNET_PEER_NI_LIST_STATS_COUNT_ATTR_SEND_COUNT,
+ LNET_PEER_NI_LIST_STATS_COUNT_ATTR_RECV_COUNT,
+ LNET_PEER_NI_LIST_STATS_COUNT_ATTR_DROP_COUNT,
+ __LNET_PEER_NI_LIST_STATS_COUNT_ATTR_MAX_PLUS_ONE,
+};
+
+#define LNET_PEER_NI_LIST_STATS_COUNT_ATTR_MAX (__LNET_PEER_NI_LIST_STATS_COUNT_ATTR_MAX_PLUS_ONE - 1)
+
+/** enum lnet_peer_ni_list_stats - LNet remote peer stats netlink
+ * properties attributes that
+ * provide stats on the remote
+ * LNet peer 'NI'. These values are
+ * used to piece together messages
+ * for sending and receiving.
+ *
+ * @LNET_PEER_NI_LIST_STATS_ATTR_UNSPEC: unspecified attribute to catch
+ * errors
+ *
+ * @LNET_PEER_NI_LIST_STATS_ATTR_PUT: PUT message count for remote
+ * peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_STATS_ATTR_GET: GET message count for remote
+ * peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_STATS_ATTR_REPLY: REPLY message count for remote
+ * peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_STATS_ATTR_ACK: ACK message count for remote
+ * peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_STATS_ATTR_HEALTH: HELLO message count for remote
+ * peer (NLA_U32)
+ */
+enum lnet_peer_ni_list_stats {
+ LNET_PEER_NI_LIST_STATS_ATTR_UNSPEC = 0,
+
+ LNET_PEER_NI_LIST_STATS_ATTR_PUT,
+ LNET_PEER_NI_LIST_STATS_ATTR_GET,
+ LNET_PEER_NI_LIST_STATS_ATTR_REPLY,
+ LNET_PEER_NI_LIST_STATS_ATTR_ACK,
+ LNET_PEER_NI_LIST_STATS_ATTR_HELLO,
+ __LNET_PEER_NI_LIST_STATS_ATTR_MAX_PLUS_ONE,
+};
+
+#define LNET_PEER_NI_LIST_STATS_ATTR_MAX (__LNET_PEER_NI_LIST_STATS_ATTR_MAX_PLUS_ONE - 1)
+
+/** enum lnet_peer_ni_list_health_stats - LNet remote peer health
+ * stats netlink properties
+ * attributes that provide
+ * stats on the health of a
+ * remote LNet peer 'NI'.
+ * These values are used to
+ * piece together messages
+ * for sending and receiving.
+ *
+ * @LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_UNSPEC: unspecified attribute to
+ * catch errors
+ *
+ * @LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_VALUE: Health level of remote
+ * peer (NLA_S32)
+ * @LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_DROP: drop message state for
+ * remote peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_TIMEOUT: timeout set for remote
+ * peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_ERROR: total errors for remote
+ * peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_NETWORK_TIMEOUT: network timeout for
+ * remote peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_PING_COUNT: number of pings for
+ * remote peer (NLA_U32)
+ * @LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_NEXT_PING: timestamp for next ping
+ * sent by remote peer
+ * (NLA_S64)
+ */
+enum lnet_peer_ni_list_health_stats {
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_UNSPEC = 0,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_PAD = LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_UNSPEC,
+
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_VALUE,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_DROPPED,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_TIMEOUT,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_ERROR,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_NETWORK_TIMEOUT,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_PING_COUNT,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_NEXT_PING,
+
+ __LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_MAX_PLUS_ONE,
+};
+
+#define LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_MAX (__LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_MAX_PLUS_ONE - 1)
+
/** LNet netlink ping API */
-/** enum lnet_ping_atts - LNet ping netlink properties
+/** enum lnet_ping_attr - LNet ping netlink properties
* attributes to describe ping format
* These values are used to piece together
* messages for sending and receiving.
#define LNET_PING_ATTR_MAX (__LNET_PING_ATTR_MAX_PLUS_ONE - 1)
-/** enium lnet_ping_peer_ni_attr - LNet peer ni information reported by
+/** enum lnet_ping_peer_ni_attr - LNet peer ni information reported by
* ping command. A list of these are
* returned with a ping request.
*
}
#endif /* !HAVE_NETLINK_CALLBACK_START */
+/** LNet peer handling */
+struct lnet_genl_processid_list {
+ unsigned int lgpl_index;
+ unsigned int lgpl_count;
+ GENRADIX(struct lnet_processid) lgpl_list;
+};
+
+static inline struct lnet_genl_processid_list *
+lnet_peer_dump_ctx(struct netlink_callback *cb)
+{
+ return (struct lnet_genl_processid_list *)cb->args[0];
+}
+
+static int lnet_peer_ni_show_done(struct netlink_callback *cb)
+{
+ struct lnet_genl_processid_list *plist = lnet_peer_dump_ctx(cb);
+
+ if (plist) {
+ genradix_free(&plist->lgpl_list);
+ CFS_FREE_PTR(plist);
+ }
+ cb->args[0] = 0;
+
+ return 0;
+}
+
+/* LNet peer ->start() handler for GET requests */
+static int lnet_peer_ni_show_start(struct netlink_callback *cb)
+{
+ struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
+#ifdef HAVE_NL_PARSE_WITH_EXT_ACK
+ struct netlink_ext_ack *extack = NULL;
+#endif
+ struct lnet_genl_processid_list *plist;
+ int msg_len = genlmsg_len(gnlh);
+ int rc = 0;
+
+#ifdef HAVE_NL_DUMP_WITH_EXT_ACK
+ extack = cb->extack;
+#endif
+ mutex_lock(&the_lnet.ln_api_mutex);
+ if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ NL_SET_ERR_MSG(extack, "Network is down");
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return -ENETDOWN;
+ }
+
+ CFS_ALLOC_PTR(plist);
+ if (!plist) {
+ NL_SET_ERR_MSG(extack, "No memory for peer list");
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return -ENOMEM;
+ }
+
+ genradix_init(&plist->lgpl_list);
+ plist->lgpl_count = 0;
+ plist->lgpl_index = 0;
+ cb->args[0] = (long)plist;
+
+ if (!msg_len) {
+ struct lnet_peer_table *ptable;
+ int cpt;
+
+ cfs_percpt_for_each(ptable, cpt, the_lnet.ln_peer_tables) {
+ struct lnet_peer *lp;
+
+ list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
+ struct lnet_processid *lpi;
+
+ lpi = genradix_ptr_alloc(&plist->lgpl_list,
+ plist->lgpl_count++,
+ GFP_KERNEL);
+ if (!lpi) {
+ NL_SET_ERR_MSG(extack, "failed to allocate NID");
+ GOTO(report_err, rc = -ENOMEM);
+ }
+
+ lpi->pid = LNET_PID_LUSTRE;
+ lpi->nid = lp->lp_primary_nid;
+ }
+ }
+ } else {
+ struct nlattr *params = genlmsg_data(gnlh);
+ struct nlattr *attr;
+ int rem;
+
+ nla_for_each_nested(attr, params, rem) {
+ struct nlattr *nid;
+ int rem2;
+
+ if (nla_type(attr) != LN_SCALAR_ATTR_LIST)
+ continue;
+
+ nla_for_each_nested(nid, attr, rem2) {
+ char addr[LNET_NIDSTR_SIZE];
+ struct lnet_processid *id;
+
+ if (nla_type(nid) != LN_SCALAR_ATTR_VALUE ||
+ nla_strcmp(nid, "primary nid") != 0)
+ continue;
+
+ nid = nla_next(nid, &rem2);
+ if (nla_type(nid) != LN_SCALAR_ATTR_VALUE) {
+ NL_SET_ERR_MSG(extack,
+ "invalid primary nid param");
+ GOTO(report_err, rc = -EINVAL);
+ }
+
+ rc = nla_strscpy(addr, nid, sizeof(addr));
+ if (rc < 0) {
+ NL_SET_ERR_MSG(extack,
+ "failed to get primary nid param");
+ GOTO(report_err, rc);
+ }
+
+ id = genradix_ptr_alloc(&plist->lgpl_list,
+ plist->lgpl_count++,
+ GFP_KERNEL);
+ if (!id) {
+ NL_SET_ERR_MSG(extack, "failed to allocate NID");
+ GOTO(report_err, rc = -ENOMEM);
+ }
+
+ rc = libcfs_strid(id, strim(addr));
+ if (rc < 0) {
+ NL_SET_ERR_MSG(extack, "invalid NID");
+ GOTO(report_err, rc);
+ }
+ rc = 0;
+ }
+ }
+ }
+report_err:
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ if (rc < 0)
+ lnet_peer_ni_show_done(cb);
+
+ return rc;
+}
+
+static const struct ln_key_list lnet_peer_ni_keys = {
+ .lkl_maxattr = LNET_PEER_NI_ATTR_MAX,
+ .lkl_list = {
+ [LNET_PEER_NI_ATTR_HDR] = {
+ .lkp_value = "peer",
+ .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
+ .lkp_data_type = NLA_NUL_STRING,
+ },
+ [LNET_PEER_NI_ATTR_PRIMARY_NID] = {
+ .lkp_value = "primary nid",
+ .lkp_data_type = NLA_STRING,
+ },
+ [LNET_PEER_NI_ATTR_MULTIRAIL] = {
+ .lkp_value = "Multi-Rail",
+ .lkp_data_type = NLA_FLAG
+ },
+ [LNET_PEER_NI_ATTR_STATE] = {
+ .lkp_value = "peer state",
+ .lkp_data_type = NLA_U32
+ },
+ [LNET_PEER_NI_ATTR_PEER_NI_LIST] = {
+ .lkp_value = "peer ni",
+ .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
+ .lkp_data_type = NLA_NESTED,
+ },
+ },
+};
+
+static const struct ln_key_list lnet_peer_ni_list = {
+ .lkl_maxattr = LNET_PEER_NI_LIST_ATTR_MAX,
+ .lkl_list = {
+ [LNET_PEER_NI_LIST_ATTR_NID] = {
+ .lkp_value = "nid",
+ .lkp_data_type = NLA_STRING,
+ },
+ [LNET_PEER_NI_LIST_ATTR_UDSP_INFO] = {
+ .lkp_value = "udsp info",
+ .lkp_key_format = LNKF_MAPPING,
+ .lkp_data_type = NLA_NESTED,
+ },
+ [LNET_PEER_NI_LIST_ATTR_STATE] = {
+ .lkp_value = "state",
+ .lkp_data_type = NLA_STRING,
+ },
+ [LNET_PEER_NI_LIST_ATTR_MAX_TX_CREDITS] = {
+ .lkp_value = "max_ni_tx_credits",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_ATTR_CUR_TX_CREDITS] = {
+ .lkp_value = "available_tx_credits",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_ATTR_MIN_TX_CREDITS] = {
+ .lkp_value = "min_tx_credits",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_ATTR_QUEUE_BUF_COUNT] = {
+ .lkp_value = "tx_q_num_of_buf",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_ATTR_CUR_RTR_CREDITS] = {
+ .lkp_value = "available_rtr_credits",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_ATTR_MIN_RTR_CREDITS] = {
+ .lkp_value = "min_rtr_credits",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_ATTR_REFCOUNT] = {
+ .lkp_value = "refcount",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_ATTR_STATS_COUNT] = {
+ .lkp_value = "statistics",
+ .lkp_key_format = LNKF_MAPPING,
+ .lkp_data_type = NLA_NESTED
+ },
+ [LNET_PEER_NI_LIST_ATTR_SENT_STATS] = {
+ .lkp_value = "sent_stats",
+ .lkp_key_format = LNKF_MAPPING,
+ .lkp_data_type = NLA_NESTED
+ },
+ [LNET_PEER_NI_LIST_ATTR_RECV_STATS] = {
+ .lkp_value = "received_stats",
+ .lkp_key_format = LNKF_MAPPING,
+ .lkp_data_type = NLA_NESTED
+ },
+ [LNET_PEER_NI_LIST_ATTR_DROP_STATS] = {
+ .lkp_value = "dropped_stats",
+ .lkp_key_format = LNKF_MAPPING,
+ .lkp_data_type = NLA_NESTED
+ },
+ [LNET_PEER_NI_LIST_ATTR_HEALTH_STATS] = {
+ .lkp_value = "health stats",
+ .lkp_key_format = LNKF_MAPPING,
+ .lkp_data_type = NLA_NESTED
+ },
+ },
+};
+
+static const struct ln_key_list lnet_peer_ni_list_stats_count = {
+ .lkl_maxattr = LNET_PEER_NI_LIST_STATS_COUNT_ATTR_MAX,
+ .lkl_list = {
+ [LNET_PEER_NI_LIST_STATS_COUNT_ATTR_SEND_COUNT] = {
+ .lkp_value = "send_count",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_STATS_COUNT_ATTR_RECV_COUNT] = {
+ .lkp_value = "recv_count",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_STATS_COUNT_ATTR_DROP_COUNT] = {
+ .lkp_value = "drop_count",
+ .lkp_data_type = NLA_U32,
+ },
+ },
+};
+
+static const struct ln_key_list lnet_peer_ni_list_stats = {
+ .lkl_maxattr = LNET_PEER_NI_LIST_STATS_ATTR_MAX,
+ .lkl_list = {
+ [LNET_PEER_NI_LIST_STATS_ATTR_PUT] = {
+ .lkp_value = "put",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_STATS_ATTR_GET] = {
+ .lkp_value = "get",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_STATS_ATTR_REPLY] = {
+ .lkp_value = "reply",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_STATS_ATTR_ACK] = {
+ .lkp_value = "ack",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_STATS_ATTR_HELLO] = {
+ .lkp_value = "hello",
+ .lkp_data_type = NLA_U32,
+ },
+ },
+};
+
+static const struct ln_key_list lnet_peer_ni_list_health = {
+ .lkl_maxattr = LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_MAX,
+ .lkl_list = {
+ [LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_VALUE] = {
+ .lkp_value = "health value",
+ .lkp_data_type = NLA_S32,
+ },
+ [LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_DROPPED] = {
+ .lkp_value = "dropped",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_TIMEOUT] = {
+ .lkp_value = "timeout",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_ERROR] = {
+ .lkp_value = "error",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_NETWORK_TIMEOUT] = {
+ .lkp_value = "network timeout",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_PING_COUNT] = {
+ .lkp_value = "ping_count",
+ .lkp_data_type = NLA_U32,
+ },
+ [LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_NEXT_PING] = {
+ .lkp_value = "next_ping",
+ .lkp_data_type = NLA_S64,
+ },
+ },
+};
+
+static int lnet_peer_ni_show_dump(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct lnet_genl_processid_list *plist = lnet_peer_dump_ctx(cb);
+ struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
+#ifdef HAVE_NL_PARSE_WITH_EXT_ACK
+ struct netlink_ext_ack *extack = NULL;
+#endif
+ int portid = NETLINK_CB(cb->skb).portid;
+ int seq = cb->nlh->nlmsg_seq;
+ int idx = plist->lgpl_index;
+ int rc = 0;
+
+#ifdef HAVE_NL_DUMP_WITH_EXT_ACK
+ extack = cb->extack;
+#endif
+ if (!plist->lgpl_count) {
+ NL_SET_ERR_MSG(extack, "No peers found");
+ GOTO(send_error, rc = -ENOENT);
+ }
+
+ if (!idx) {
+ const struct ln_key_list *all[] = {
+ &lnet_peer_ni_keys, &lnet_peer_ni_list,
+ &udsp_info_list, &udsp_info_pref_nids_list,
+ &udsp_info_pref_nids_list,
+ &lnet_peer_ni_list_stats_count,
+ &lnet_peer_ni_list_stats, /* send_stats */
+ &lnet_peer_ni_list_stats, /* recv_stats */
+ &lnet_peer_ni_list_stats, /* drop stats */
+ &lnet_peer_ni_list_health,
+ NULL
+ };
+
+ rc = lnet_genl_send_scalar_list(msg, portid, seq,
+ &lnet_family,
+ NLM_F_CREATE | NLM_F_MULTI,
+ LNET_CMD_PEERS, all);
+ if (rc < 0) {
+ NL_SET_ERR_MSG(extack, "failed to send key table");
+ GOTO(send_error, rc);
+ }
+ }
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ NL_SET_ERR_MSG(extack, "Network is down");
+ GOTO(unlock_api_mutex, rc = -ENETDOWN);
+ }
+
+ while (idx < plist->lgpl_count) {
+ struct lnet_processid *id;
+ struct lnet_peer_ni *lpni;
+ struct nlattr *nid_list;
+ struct lnet_peer *lp;
+ int count = 1;
+ void *hdr;
+
+ id = genradix_ptr(&plist->lgpl_list, idx++);
+ if (nid_is_lo0(&id->nid))
+ continue;
+
+ hdr = genlmsg_put(msg, portid, seq, &lnet_family,
+ NLM_F_MULTI, LNET_CMD_PEERS);
+ if (!hdr) {
+ NL_SET_ERR_MSG(extack, "failed to send values");
+ genlmsg_cancel(msg, hdr);
+ GOTO(unlock_api_mutex, rc = -EMSGSIZE);
+ }
+
+ lp = lnet_find_peer(&id->nid);
+ if (!lp) {
+ NL_SET_ERR_MSG(extack, "cannot find peer");
+ GOTO(unlock_api_mutex, rc = -ENOENT);
+ }
+
+ if (idx == 1)
+ nla_put_string(msg, LNET_PEER_NI_ATTR_HDR, "");
+
+ nla_put_string(msg, LNET_PEER_NI_ATTR_PRIMARY_NID,
+ libcfs_nidstr(&lp->lp_primary_nid));
+ if (lnet_peer_is_multi_rail(lp))
+ nla_put_flag(msg, LNET_PEER_NI_ATTR_MULTIRAIL);
+
+ if (gnlh->version >= 3)
+ nla_put_u32(msg, LNET_PEER_NI_ATTR_STATE, lp->lp_state);
+
+ nid_list = nla_nest_start(msg, LNET_PEER_NI_ATTR_PEER_NI_LIST);
+ while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
+ struct nlattr *peer_nid = nla_nest_start(msg, count++);
+
+ nla_put_string(msg, LNET_PEER_NI_LIST_ATTR_NID,
+ libcfs_nidstr(&lpni->lpni_nid));
+
+ if (gnlh->version >= 4) {
+ rc = lnet_udsp_info_send(msg,
+ LNET_PEER_NI_LIST_ATTR_UDSP_INFO,
+ &lpni->lpni_nid, true);
+ if (rc < 0) {
+ lnet_peer_decref_locked(lp);
+ NL_SET_ERR_MSG(extack,
+ "failed to get UDSP info");
+ GOTO(unlock_api_mutex, rc);
+ }
+ }
+
+ if (lnet_isrouter(lpni) ||
+ lnet_peer_aliveness_enabled(lpni)) {
+ nla_put_string(msg, LNET_PEER_NI_LIST_ATTR_STATE,
+ lnet_is_peer_ni_alive(lpni) ?
+ "up" : "down");
+ } else {
+ nla_put_string(msg, LNET_PEER_NI_LIST_ATTR_STATE,
+ "NA");
+ }
+
+ if (gnlh->version) {
+ struct lnet_ioctl_element_msg_stats lpni_msg_stats;
+ struct nlattr *send_stats_list, *send_stats;
+ struct nlattr *recv_stats_list, *recv_stats;
+ struct nlattr *drop_stats_list, *drop_stats;
+ struct nlattr *health_list, *health_stats;
+ struct lnet_ioctl_element_stats stats;
+ struct nlattr *stats_attr, *ni_stats;
+
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_ATTR_MAX_TX_CREDITS,
+ lpni->lpni_net ?
+ lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_ATTR_CUR_TX_CREDITS,
+ lpni->lpni_txcredits);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_ATTR_MIN_TX_CREDITS,
+ lpni->lpni_mintxcredits);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_ATTR_QUEUE_BUF_COUNT,
+ lpni->lpni_txqnob);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_ATTR_CUR_RTR_CREDITS,
+ lpni->lpni_rtrcredits);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_ATTR_MIN_RTR_CREDITS,
+ lpni->lpni_minrtrcredits);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_ATTR_REFCOUNT,
+ kref_read(&lpni->lpni_kref));
+
+ memset(&stats, 0, sizeof(stats));
+ stats.iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
+ LNET_STATS_TYPE_SEND);
+ stats.iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
+ LNET_STATS_TYPE_RECV);
+ stats.iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
+ LNET_STATS_TYPE_DROP);
+
+ stats_attr = nla_nest_start(msg,
+ LNET_PEER_NI_LIST_ATTR_STATS_COUNT);
+ ni_stats = nla_nest_start(msg, 0);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_COUNT_ATTR_SEND_COUNT,
+ stats.iel_send_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_COUNT_ATTR_RECV_COUNT,
+ stats.iel_recv_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_COUNT_ATTR_DROP_COUNT,
+ stats.iel_drop_count);
+ nla_nest_end(msg, ni_stats);
+ nla_nest_end(msg, stats_attr);
+
+ if (gnlh->version < 2)
+ goto skip_msg_stats;
+
+ lnet_usr_translate_stats(&lpni_msg_stats, &lpni->lpni_stats);
+
+ send_stats_list = nla_nest_start(msg,
+ LNET_PEER_NI_LIST_ATTR_SENT_STATS);
+ send_stats = nla_nest_start(msg, 0);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_PUT,
+ lpni_msg_stats.im_send_stats.ico_put_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_GET,
+ lpni_msg_stats.im_send_stats.ico_get_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_REPLY,
+ lpni_msg_stats.im_send_stats.ico_reply_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_ACK,
+ lpni_msg_stats.im_send_stats.ico_ack_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_HELLO,
+ lpni_msg_stats.im_send_stats.ico_hello_count);
+ nla_nest_end(msg, send_stats);
+ nla_nest_end(msg, send_stats_list);
+
+ recv_stats_list = nla_nest_start(msg,
+ LNET_PEER_NI_LIST_ATTR_RECV_STATS);
+ recv_stats = nla_nest_start(msg, 0);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_PUT,
+ lpni_msg_stats.im_recv_stats.ico_put_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_GET,
+ lpni_msg_stats.im_recv_stats.ico_get_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_REPLY,
+ lpni_msg_stats.im_recv_stats.ico_reply_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_ACK,
+ lpni_msg_stats.im_recv_stats.ico_ack_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_HELLO,
+ lpni_msg_stats.im_recv_stats.ico_hello_count);
+ nla_nest_end(msg, recv_stats);
+ nla_nest_end(msg, recv_stats_list);
+
+ drop_stats_list = nla_nest_start(msg,
+ LNET_PEER_NI_LIST_ATTR_DROP_STATS);
+ drop_stats = nla_nest_start(msg, 0);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_PUT,
+ lpni_msg_stats.im_drop_stats.ico_put_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_GET,
+ lpni_msg_stats.im_drop_stats.ico_get_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_REPLY,
+ lpni_msg_stats.im_drop_stats.ico_reply_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_ACK,
+ lpni_msg_stats.im_drop_stats.ico_ack_count);
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_STATS_ATTR_HELLO,
+ lpni_msg_stats.im_drop_stats.ico_hello_count);
+ nla_nest_end(msg, drop_stats);
+ nla_nest_end(msg, drop_stats_list);
+
+ health_list = nla_nest_start(msg,
+ LNET_PEER_NI_LIST_ATTR_HEALTH_STATS);
+ health_stats = nla_nest_start(msg, 0);
+ nla_put_s32(msg,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_VALUE,
+ atomic_read(&lpni->lpni_healthv));
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_DROPPED,
+ atomic_read(&lpni->lpni_hstats.hlt_remote_dropped));
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_TIMEOUT,
+ atomic_read(&lpni->lpni_hstats.hlt_remote_timeout));
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_ERROR,
+ atomic_read(&lpni->lpni_hstats.hlt_remote_error));
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_NETWORK_TIMEOUT,
+ atomic_read(&lpni->lpni_hstats.hlt_network_timeout));
+ nla_put_u32(msg,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_PING_COUNT,
+ lpni->lpni_ping_count);
+ nla_put_s64(msg,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_NEXT_PING,
+ lpni->lpni_next_ping,
+ LNET_PEER_NI_LIST_HEALTH_STATS_ATTR_PAD);
+ nla_nest_end(msg, health_stats);
+ nla_nest_end(msg, health_list);
+ }
+skip_msg_stats:
+ nla_nest_end(msg, peer_nid);
+ }
+ nla_nest_end(msg, nid_list);
+
+ genlmsg_end(msg, hdr);
+ lnet_peer_decref_locked(lp);
+ }
+ plist->lgpl_index = idx;
+unlock_api_mutex:
+ mutex_unlock(&the_lnet.ln_api_mutex);
+send_error:
+ return lnet_nl_send_error(cb->skb, portid, seq, rc);
+};
+
+#ifndef HAVE_NETLINK_CALLBACK_START
+static int lnet_old_peer_ni_show_dump(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ if (!cb->args[0]) {
+ int rc = lnet_peer_ni_show_start(cb);
+
+ if (rc < 0)
+ return rc;
+ }
+
+ return lnet_peer_ni_show_dump(msg, cb);
+}
+#endif
+
static inline struct lnet_genl_ping_list *
lnet_ping_dump_ctx(struct netlink_callback *cb)
{
static const struct genl_multicast_group lnet_mcast_grps[] = {
{ .name = "ip2net", },
{ .name = "net", },
+ { .name = "peer", },
{ .name = "route", },
{ .name = "ping", },
};
.doit = lnet_net_cmd,
},
{
+ .cmd = LNET_CMD_PEERS,
+#ifdef HAVE_NETLINK_CALLBACK_START
+ .start = lnet_peer_ni_show_start,
+ .dumpit = lnet_peer_ni_show_dump,
+#else
+ .dumpit = lnet_old_peer_ni_show_dump,
+#endif
+ .done = lnet_peer_ni_show_done,
+ },
+ {
.cmd = LNET_CMD_ROUTES,
#ifdef HAVE_NETLINK_CALLBACK_START
.start = lnet_route_show_start,
return rc;
}
+static int yaml_lnet_peer_display(yaml_parser_t *reply, bool list_only)
+{
+ yaml_emitter_t debug;
+ int rc;
+
+ rc = yaml_emitter_initialize(&debug);
+ if (rc == 0)
+ goto out_err;
+
+ yaml_emitter_set_indent(&debug, 6);
+ yaml_emitter_set_output_file(&debug, stdout);
+
+ if (list_only) {
+ bool done = false;
+
+ while (!done) {
+ yaml_event_t event;
+ char *value;
+
+ rc = yaml_parser_parse(reply, &event);
+ if (rc == 0)
+ goto report_reply_error;
+
+ if (event.type != YAML_SCALAR_EVENT)
+ goto merge_event;
+
+ value = (char *)event.data.scalar.value;
+ if (strcmp(value, "peer") == 0) {
+ yaml_event_delete(&event);
+
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_STR_TAG,
+ (yaml_char_t *)"peer list",
+ strlen("peer list"),
+ 1, 0,
+ YAML_PLAIN_SCALAR_STYLE);
+ } else if (strcmp(value, "primary nid") == 0) {
+ yaml_event_delete(&event);
+
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_STR_TAG,
+ (yaml_char_t *)"nid",
+ strlen("nid"),
+ 1, 0,
+ YAML_PLAIN_SCALAR_STYLE);
+ rc = yaml_emitter_emit(&debug, &event);
+ if (rc == 0)
+ break;
+
+ /* Now print NID address */
+ rc = yaml_parser_parse(reply, &event);
+ if (rc == 0)
+ goto report_reply_error;
+
+ rc = yaml_emitter_emit(&debug, &event);
+ if (rc == 0)
+ break;
+
+ /* skip reset */
+ while (event.type != YAML_MAPPING_END_EVENT) {
+ rc = yaml_parser_parse(reply, &event);
+ if (rc == 0)
+ goto report_reply_error;
+ }
+
+ /* we can have map end, seq end, map end or
+ * just map end event. If we see seq end event
+ * then skip to next mapping end event
+ */
+ rc = yaml_parser_parse(reply, &event);
+ if (rc == 0)
+ goto report_reply_error;
+
+ if (event.type == YAML_SEQUENCE_END_EVENT) {
+ yaml_event_delete(&event);
+
+ rc = yaml_parser_parse(reply, &event);
+ if (rc == 0)
+ goto report_reply_error;
+ }
+ }
+merge_event:
+ rc = yaml_emitter_emit(&debug, &event);
+ if (rc == 0)
+ break;
+
+ done = (event.type == YAML_DOCUMENT_END_EVENT);
+ }
+ } else {
+ yaml_document_t errmsg;
+
+ rc = yaml_parser_load(reply, &errmsg);
+ if (rc == 1)
+ rc = yaml_emitter_dump(&debug, &errmsg);
+ yaml_document_delete(&errmsg);
+ }
+out_err:
+ if (rc == 0)
+ yaml_emitter_log_error(&debug, stderr);
+report_reply_error:
+ yaml_emitter_delete(&debug);
+
+ return rc;
+}
+
+static int yaml_lnet_peer(char *prim_nid, char *nidstr, bool disable_mr,
+ int health_value, int state, bool list_only,
+ int version, int flags)
+{
+ struct nl_sock *sk = NULL;
+ const char *msg = NULL;
+ yaml_emitter_t output;
+ yaml_parser_t reply;
+ yaml_event_t event;
+ int rc;
+
+ /* Create Netlink emitter to send request to kernel */
+ sk = nl_socket_alloc();
+ if (!sk)
+ return -EOPNOTSUPP;
+
+ /* Setup parser to receive Netlink packets */
+ rc = yaml_parser_initialize(&reply);
+ if (rc == 0) {
+ nl_socket_free(sk);
+ return -EOPNOTSUPP;
+ }
+
+ rc = yaml_parser_set_input_netlink(&reply, sk, false);
+ if (rc == 0) {
+ msg = yaml_parser_get_reader_error(&reply);
+ goto free_reply;
+ }
+
+ /* Create Netlink emitter to send request to kernel */
+ rc = yaml_emitter_initialize(&output);
+ if (rc == 0) {
+ msg = "failed to initialize emitter";
+ goto free_reply;
+ }
+
+ rc = yaml_emitter_set_output_netlink(&output, sk, LNET_GENL_NAME,
+ version, LNET_CMD_PEERS, flags);
+ if (rc == 0)
+ goto emitter_error;
+
+ yaml_emitter_open(&output);
+ yaml_document_start_event_initialize(&event, NULL, NULL, NULL, 0);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ yaml_mapping_start_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_MAP_TAG,
+ 1, YAML_ANY_MAPPING_STYLE);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_STR_TAG,
+ (yaml_char_t *)"peer",
+ strlen("peer"), 1, 0,
+ YAML_PLAIN_SCALAR_STYLE);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ if (prim_nid) {
+ yaml_sequence_start_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_SEQ_TAG,
+ 1,
+ YAML_BLOCK_SEQUENCE_STYLE);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ yaml_mapping_start_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_MAP_TAG,
+ 1,
+ YAML_BLOCK_MAPPING_STYLE);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_STR_TAG,
+ (yaml_char_t *)"primary nid",
+ strlen("primary nid"), 1, 0,
+ YAML_PLAIN_SCALAR_STYLE);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_STR_TAG,
+ (yaml_char_t *)prim_nid,
+ strlen(prim_nid), 1, 0,
+ YAML_PLAIN_SCALAR_STYLE);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ yaml_mapping_end_event_initialize(&event);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ yaml_sequence_end_event_initialize(&event);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+ } else {
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_STR_TAG,
+ (yaml_char_t *)"",
+ strlen(""), 1, 0,
+ YAML_PLAIN_SCALAR_STYLE);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+ }
+
+ yaml_mapping_end_event_initialize(&event);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ yaml_document_end_event_initialize(&event, 0);
+ rc = yaml_emitter_emit(&output, &event);
+ if (rc == 0)
+ goto emitter_error;
+
+ rc = yaml_emitter_close(&output);
+emitter_error:
+ if (rc == 0) {
+ yaml_emitter_log_error(&output, stderr);
+ rc = -EINVAL;
+ } else {
+ rc = yaml_lnet_peer_display(&reply, list_only);
+ if (rc == 0) {
+ msg = yaml_parser_get_reader_error(&reply);
+ /* If we didn't find any peers just be silent */
+ if (msg && strcmp(msg, "No peers found") == 0)
+ rc = 1;
+ }
+
+ }
+ yaml_emitter_delete(&output);
+free_reply:
+ if (rc == 0) {
+ yaml_lnet_print_error(flags, "peer", msg);
+ rc = -EINVAL;
+ }
+ yaml_parser_delete(&reply);
+ nl_socket_free(sk);
+
+ return rc == 1 ? 0 : rc;
+}
+
static int jt_set_peer_ni_value(int argc, char **argv)
{
return set_value_helper(argc, argv, lustre_lnet_config_peer_ni_healthv);
int rc, opt;
struct cYAML *err_rc = NULL, *show_rc = NULL;
long int detail = 0;
-
const char *const short_opts = "hn:v::";
const struct option long_opts[] = {
- { .name = "help", .has_arg = no_argument, .val = 'h' },
- { .name = "nid", .has_arg = required_argument, .val = 'n' },
- { .name = "verbose", .has_arg = optional_argument, .val = 'v' },
- { .name = NULL } };
+ { .name = "help", .has_arg = no_argument, .val = 'h' },
+ { .name = "nid", .has_arg = required_argument, .val = 'n' },
+ { .name = "verbose", .has_arg = optional_argument, .val = 'v' },
+ { .name = NULL }
+ };
rc = check_cmd(peer_cmds, "peer", "show", 1, argc, argv);
if (rc)
}
}
+ rc = yaml_lnet_peer(nid, NULL, false, -1, -1, false, detail,
+ NLM_F_DUMP);
+ if (rc <= 0) {
+ if (rc == -EOPNOTSUPP)
+ goto old_api;
+ return rc;
+ }
+old_api:
rc = lustre_lnet_show_peer(nid, (int) detail, -1, &show_rc, &err_rc,
false);
if (rc)
return rc;
- rc = lustre_lnet_list_peer(-1, &list_rc, &err_rc);
-
+ rc = yaml_lnet_peer(NULL, NULL, false, -1, -1, true, 0, NLM_F_DUMP);
+ if (rc <= 0) {
+ if (rc == -EOPNOTSUPP)
+ goto old_api;
+ return rc;
+ }
+old_api:
if (rc != LUSTRE_CFG_RC_NO_ERR)
cYAML_print_tree2file(stderr, err_rc);
else if (list_rc)