if (cmd != LNET_CMD_NETS)
return -EOPNOTSUPP;
+ if (!attr)
+ return 0;
+
if (nla_type(attr) != LN_SCALAR_ATTR_INT_VALUE)
return -EINVAL;
if (cmd != LNET_CMD_NETS)
return -EOPNOTSUPP;
+ if (!attr)
+ return 0;
+
lnd_kfi = &tunables->lnd_tun_u.lnd_kfi;
switch (type) {
return 0;
}
+static inline void
+kiblnd_nl_set_default(int cmd, int type, void *data)
+{
+ struct lnet_lnd_tunables *tunables = data;
+ struct lnet_ioctl_config_o2iblnd_tunables *lt;
+ struct lnet_ioctl_config_o2iblnd_tunables *df;
+
+ lt = &tunables->lnd_tun_u.lnd_o2ib;
+ df = &kib_default_tunables;
+ switch (type) {
+ case LNET_NET_O2IBLND_TUNABLES_ATTR_HIW_PEER_CREDITS:
+ lt->lnd_peercredits_hiw = df->lnd_peercredits_hiw;
+ break;
+ case LNET_NET_O2IBLND_TUNABLES_ATTR_MAP_ON_DEMAND:
+ lt->lnd_map_on_demand = df->lnd_map_on_demand;
+ break;
+ case LNET_NET_O2IBLND_TUNABLES_ATTR_CONCURRENT_SENDS:
+ lt->lnd_concurrent_sends = df->lnd_concurrent_sends;
+ break;
+ case LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_POOL_SIZE:
+ lt->lnd_fmr_pool_size = df->lnd_fmr_pool_size;
+ break;
+ case LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_FLUSH_TRIGGER:
+ lt->lnd_fmr_flush_trigger = df->lnd_fmr_flush_trigger;
+ break;
+ case LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_CACHE:
+ lt->lnd_fmr_cache = df->lnd_fmr_cache;
+ break;
+ case LNET_NET_O2IBLND_TUNABLES_ATTR_NTX:
+ lt->lnd_ntx = df->lnd_ntx;
+ break;
+ case LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TIMEOUT:
+ lt->lnd_timeout = df->lnd_timeout;
+ break;
+ case LNET_NET_O2IBLND_TUNABLES_ATTR_CONNS_PER_PEER:
+ lt->lnd_conns_per_peer = df->lnd_conns_per_peer;
+ fallthrough;
+ default:
+ break;
+ }
+
+}
+
static int
kiblnd_nl_set(int cmd, struct nlattr *attr, int type, void *data)
{
if (cmd != LNET_CMD_NETS)
return -EOPNOTSUPP;
+ if (!attr) {
+ kiblnd_nl_set_default(cmd, type, data);
+ return 0;
+ }
+
if (nla_type(attr) != LN_SCALAR_ATTR_INT_VALUE)
return -EINVAL;
};
extern struct kib_tunables kiblnd_tunables;
+extern struct lnet_ioctl_config_o2iblnd_tunables kib_default_tunables;
#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
.kib_use_fastreg_gaps = &use_fastreg_gaps,
};
-static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
+struct lnet_ioctl_config_o2iblnd_tunables kib_default_tunables;
/* # messages/RDMAs in-flight */
int
*/
if (!ni->ni_lnd_tunables_set)
memcpy(&ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib,
- &default_tunables, sizeof(*tunables));
+ &kib_default_tunables, sizeof(*tunables));
tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
int
kiblnd_tunables_init(void)
{
- default_tunables.lnd_version = CURRENT_LND_VERSION;
- default_tunables.lnd_peercredits_hiw = peer_credits_hiw;
- default_tunables.lnd_map_on_demand = map_on_demand;
- default_tunables.lnd_concurrent_sends = concurrent_sends;
- default_tunables.lnd_fmr_pool_size = fmr_pool_size;
- default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
- default_tunables.lnd_fmr_cache = fmr_cache;
- default_tunables.lnd_ntx = ntx;
- default_tunables.lnd_conns_per_peer = conns_per_peer;
+ kib_default_tunables.lnd_version = CURRENT_LND_VERSION;
+ kib_default_tunables.lnd_peercredits_hiw = peer_credits_hiw;
+ kib_default_tunables.lnd_map_on_demand = map_on_demand;
+ kib_default_tunables.lnd_concurrent_sends = concurrent_sends;
+ kib_default_tunables.lnd_fmr_pool_size = fmr_pool_size;
+ kib_default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
+ kib_default_tunables.lnd_fmr_cache = fmr_cache;
+ kib_default_tunables.lnd_ntx = ntx;
+ kib_default_tunables.lnd_conns_per_peer = conns_per_peer;
return 0;
}
return 0;
}
+static inline void
+ksocknal_nl_set_default(int cmd, int type, void *data)
+{
+ struct lnet_lnd_tunables *tunables = data;
+ struct lnet_ioctl_config_socklnd_tunables *lt;
+ struct lnet_ioctl_config_socklnd_tunables *df;
+
+ lt = &tunables->lnd_tun_u.lnd_sock;
+ df = &ksock_default_tunables;
+ switch (type) {
+ case LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER:
+ lt->lnd_conns_per_peer = df->lnd_conns_per_peer;
+ break;
+ case LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TIMEOUT:
+ lt->lnd_timeout = df->lnd_timeout;
+ fallthrough;
+ default:
+ break;
+ }
+}
+
static int
ksocknal_nl_set(int cmd, struct nlattr *attr, int type, void *data)
{
if (cmd != LNET_CMD_NETS)
return -EOPNOTSUPP;
+ if (!attr) {
+ ksocknal_nl_set_default(cmd, type, data);
+ return 0;
+ }
+
if (nla_type(attr) != LN_SCALAR_ATTR_INT_VALUE)
return -EINVAL;
extern struct ksock_nal_data ksocknal_data;
extern struct ksock_tunables ksocknal_tunables;
+extern struct lnet_ioctl_config_socklnd_tunables ksock_default_tunables;
#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
}
struct ksock_tunables ksocknal_tunables;
-static struct lnet_ioctl_config_socklnd_tunables default_tunables;
+struct lnet_ioctl_config_socklnd_tunables ksock_default_tunables;
#ifdef HAVE_ETHTOOL_LINK_SETTINGS
static int ksocklnd_ni_get_eth_intf_speed(struct lnet_ni *ni)
int ksocknal_tunables_init(void)
{
- default_tunables.lnd_version = CURRENT_LND_VERSION;
- default_tunables.lnd_conns_per_peer = conns_per_peer;
+ ksock_default_tunables.lnd_version = CURRENT_LND_VERSION;
+ ksock_default_tunables.lnd_conns_per_peer = conns_per_peer;
/* initialize ksocknal_tunables structure */
ksocknal_tunables.ksnd_timeout = &sock_timeout;
/* If no tunables specified, setup default tunables */
if (!ni->ni_lnd_tunables_set)
memcpy(&ni->ni_lnd_tunables.lnd_tun_u.lnd_sock,
- &default_tunables, sizeof(*tunables));
+ &ksock_default_tunables, sizeof(*tunables));
tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_sock;
return rc;
}
+static inline void
+lnet_genl_init_tunables(const struct lnet_lnd *lnd,
+ struct lnet_ioctl_config_lnd_tunables *tun)
+{
+ const struct ln_key_list *list = lnd ? lnd->lnd_keys : NULL;
+ int i;
+
+ tun->lt_cmn.lct_peer_timeout = -1;
+ tun->lt_cmn.lct_peer_tx_credits = -1;
+ tun->lt_cmn.lct_peer_rtr_credits = -1;
+ tun->lt_cmn.lct_max_tx_credits = -1;
+
+ if (!list || !lnd->lnd_nl_set || !list->lkl_maxattr)
+ return;
+
+ /* init lnd tunables with default values */
+ for (i = 1; i <= list->lkl_maxattr; i++)
+ lnd->lnd_nl_set(LNET_CMD_NETS, NULL, i, &tun->lt_tun);
+}
+
static int
lnet_genl_parse_local_ni(struct nlattr *entry, struct genl_info *info,
int net_id, struct lnet_ioctl_config_ni *conf,
{
struct lnet_ioctl_config_lnd_tunables *tun;
struct lnet_nid nid = LNET_ANY_NID;
+ const struct lnet_lnd *lnd = NULL;
struct nlattr *settings;
int healthv = -1;
int rem3, rc = 0;
+ if (net_id != LNET_NET_ANY) {
+ lnd = lnet_load_lnd(LNET_NETTYP(net_id));
+ if (IS_ERR(lnd)) {
+ GENL_SET_ERR_MSG(info, "LND type not supported");
+ RETURN(PTR_ERR(lnd));
+ }
+ }
+
LIBCFS_ALLOC(tun, sizeof(struct lnet_ioctl_config_lnd_tunables));
if (!tun) {
GENL_SET_ERR_MSG(info, "cannot allocate memory for tunables");
}
/* Use LND defaults */
- tun->lt_cmn.lct_peer_timeout = -1;
- tun->lt_cmn.lct_peer_tx_credits = -1;
- tun->lt_cmn.lct_peer_rtr_credits = -1;
- tun->lt_cmn.lct_max_tx_credits = -1;
+ lnet_genl_init_tunables(lnd, tun);
conf->lic_ncpts = 0;
nla_for_each_nested(settings, entry, rem3) {
GOTO(out, rc);
}
} else if ((nla_strcmp(settings, "lnd tunables") == 0)) {
- const struct lnet_lnd *lnd;
-
settings = nla_next(settings, &rem3);
if (nla_type(settings) !=
LN_SCALAR_ATTR_LIST) {