*
* Copyright (c) 2015, James Simmons
*
- * Copyright (c) 2016, Intel Corporation.
+ * Copyright (c) 2016, 2017, Intel Corporation.
*
* Author:
* James Simmons <jsimmons@infradead.org>
#include <stdio.h>
#include <string.h>
#include <libcfs/util/ioctl.h>
+#include "liblnd.h"
#include "liblnetconfig.h"
-#include "cyaml.h"
static int
lustre_o2iblnd_show_tun(struct cYAML *lndparams,
lnd_cfg->lnd_fmr_cache) == NULL)
return LUSTRE_CFG_RC_OUT_OF_MEM;
+ if (cYAML_create_number(lndparams, "ntx",
+ lnd_cfg->lnd_ntx) == NULL)
+ return LUSTRE_CFG_RC_OUT_OF_MEM;
+
+ if (cYAML_create_number(lndparams, "conns_per_peer",
+ lnd_cfg->lnd_conns_per_peer) == NULL)
+ return LUSTRE_CFG_RC_OUT_OF_MEM;
+
+ return LUSTRE_CFG_RC_NO_ERR;
+}
+
+
+static int
+lustre_socklnd_show_tun(struct cYAML *lndparams,
+ struct lnet_ioctl_config_socklnd_tunables *lnd_cfg)
+{
if (cYAML_create_number(lndparams, "conns_per_peer",
lnd_cfg->lnd_conns_per_peer) == NULL)
return LUSTRE_CFG_RC_OUT_OF_MEM;
lustre_net_show_tunables(struct cYAML *tunables,
struct lnet_ioctl_config_lnd_cmn_tunables *cmn)
{
-
-
if (cYAML_create_number(tunables, "peer_timeout",
cmn->lct_peer_timeout)
== NULL)
__u32 net_type,
struct lnet_lnd_tunables *lnd)
{
- int rc = LUSTRE_CFG_RC_NO_ERR;
+ int rc = LUSTRE_CFG_RC_NO_MATCH;
if (net_type == O2IBLND)
rc = lustre_o2iblnd_show_tun(lnd_tunables,
&lnd->lnd_tun_u.lnd_o2ib);
+ else if (net_type == SOCKLND)
+ rc = lustre_socklnd_show_tun(lnd_tunables,
+ &lnd->lnd_tun_u.lnd_sock);
return rc;
}
struct cYAML *map_on_demand = NULL, *concurrent_sends = NULL;
struct cYAML *fmr_pool_size = NULL, *fmr_cache = NULL;
struct cYAML *fmr_flush_trigger = NULL, *lndparams = NULL;
- struct cYAML *conns_per_peer = NULL;
+ struct cYAML *conns_per_peer = NULL, *ntx = NULL;
lndparams = cYAML_get_object_item(tree, "lnd tunables");
if (!lndparams)
lnd_cfg->lnd_fmr_cache =
(fmr_cache) ? fmr_cache->cy_valueint : 0;
+ ntx = cYAML_get_object_item(lndparams, "ntx");
+ lnd_cfg->lnd_ntx = (ntx) ? ntx->cy_valueint : 0;
+
conns_per_peer = cYAML_get_object_item(lndparams, "conns_per_peer");
lnd_cfg->lnd_conns_per_peer =
(conns_per_peer) ? conns_per_peer->cy_valueint : 1;
+
}
+static void
+yaml_extract_sock_tun(struct cYAML *tree,
+ struct lnet_ioctl_config_socklnd_tunables *lnd_cfg)
+{
+ struct cYAML *conns_per_peer = NULL, *lndparams = NULL;
+
+ lndparams = cYAML_get_object_item(tree, "lnd tunables");
+ if (!lndparams)
+ return;
+
+ conns_per_peer = cYAML_get_object_item(lndparams, "conns_per_peer");
+ lnd_cfg->lnd_conns_per_peer =
+ (conns_per_peer) ? conns_per_peer->cy_valueint : 1;
+}
+
void
lustre_yaml_extract_lnd_tunables(struct cYAML *tree,
__u32 net_type,
if (net_type == O2IBLND)
yaml_extract_o2ib_tun(tree,
&tun->lnd_tun_u.lnd_o2ib);
+ else if (net_type == SOCKLND)
+ yaml_extract_sock_tun(tree,
+ &tun->lnd_tun_u.lnd_sock);
}