__u32 lnd_fmr_flush_trigger;
__u32 lnd_fmr_cache;
__u16 lnd_conns_per_peer;
- __u16 pad;
+ __u16 lnd_ntx;
};
struct lnet_lnd_tunables {
LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
}
-static int kiblnd_tx_pool_size(int ncpts)
+static int kiblnd_tx_pool_size(struct lnet_ni *ni, int ncpts)
{
- int ntx = *kiblnd_tunables.kib_ntx / ncpts;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int ntx;
+
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ ntx = tunables->lnd_ntx / ncpts;
return max(IBLND_TX_POOL, ntx);
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
#endif
- if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
+ if (tunables->lnd_fmr_pool_size < tunables->lnd_ntx / 4) {
CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
tunables->lnd_fmr_pool_size,
- *kiblnd_tunables.kib_ntx / 4);
+ tunables->lnd_ntx / 4);
rc = -EINVAL;
goto failed;
}
cpt = (cpts == NULL) ? i : cpts[i];
rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
cpt, net, "TX",
- kiblnd_tx_pool_size(ncpts),
+ kiblnd_tx_pool_size(ni, ncpts),
kiblnd_create_tx_pool,
kiblnd_destroy_tx_pool,
kiblnd_tx_init, NULL);
.kib_cksum = &cksum,
.kib_timeout = &timeout,
.kib_keepalive = &keepalive,
- .kib_ntx = &ntx,
.kib_default_ipif = &ipif_name,
.kib_retry_count = &retry_count,
.kib_rnr_retry_count = &rnr_retry_count,
tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
if (!tunables->lnd_fmr_cache)
tunables->lnd_fmr_cache = fmr_cache;
+ if (!tunables->lnd_ntx)
+ tunables->lnd_ntx = ntx;
if (!tunables->lnd_conns_per_peer) {
tunables->lnd_conns_per_peer = (conns_per_peer) ?
conns_per_peer : 1;
default_tunables.lnd_fmr_pool_size = fmr_pool_size;
default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
default_tunables.lnd_fmr_cache = fmr_cache;
+ default_tunables.lnd_ntx = ntx;
default_tunables.lnd_conns_per_peer = conns_per_peer;
return 0;
}
list_for_each_entry(intf_descr, intf_list,
intf_on_network) {
- if (i == 0 && tunables != NULL)
+ if (tunables != NULL)
len = sizeof(struct lnet_ioctl_config_ni) +
sizeof(struct lnet_ioctl_config_lnd_tunables);
else
if (!data)
return LUSTRE_CFG_RC_OUT_OF_MEM;
conf = (struct lnet_ioctl_config_ni*) data;
- if (i == 0 && tunables != NULL)
+ if (tunables != NULL)
tun = (struct lnet_ioctl_config_lnd_tunables*)
conf->lic_bulk;
conf->lic_ncpts = count;
- if (i == 0 && tunables != NULL)
- /* TODO put in the LND tunables */
+ if (tunables != NULL)
memcpy(tun, tunables, sizeof(*tunables));
rc = l_ioctl(LNET_DEV_ID, IOC_LIBCFS_ADD_LOCAL_NI, data);
lnd_cfg->lnd_fmr_cache) == NULL)
return LUSTRE_CFG_RC_OUT_OF_MEM;
+ if (cYAML_create_number(lndparams, "ntx",
+ lnd_cfg->lnd_ntx) == NULL)
+ return LUSTRE_CFG_RC_OUT_OF_MEM;
+
if (cYAML_create_number(lndparams, "conns_per_peer",
lnd_cfg->lnd_conns_per_peer) == NULL)
return LUSTRE_CFG_RC_OUT_OF_MEM;
struct cYAML *map_on_demand = NULL, *concurrent_sends = NULL;
struct cYAML *fmr_pool_size = NULL, *fmr_cache = NULL;
struct cYAML *fmr_flush_trigger = NULL, *lndparams = NULL;
- struct cYAML *conns_per_peer = NULL;
+ struct cYAML *conns_per_peer = NULL, *ntx = NULL;
lndparams = cYAML_get_object_item(tree, "lnd tunables");
if (!lndparams)
lnd_cfg->lnd_fmr_cache =
(fmr_cache) ? fmr_cache->cy_valueint : 0;
+ ntx = cYAML_get_object_item(lndparams, "ntx");
+ lnd_cfg->lnd_ntx = (ntx) ? ntx->cy_valueint : 0;
+
conns_per_peer = cYAML_get_object_item(lndparams, "conns_per_peer");
lnd_cfg->lnd_conns_per_peer =
(conns_per_peer) ? conns_per_peer->cy_valueint : 1;