module_param(cksum, int, 0644);
MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
-static int timeout = 50;
+static int timeout;
module_param(timeout, int, 0644);
MODULE_PARM_DESC(timeout, "timeout (seconds)");
MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
/* NB: this value is shared by all CPTs */
-static int credits = 256;
+static int credits = DEFAULT_CREDITS;
module_param(credits, int, 0444);
MODULE_PARM_DESC(credits, "# concurrent sends");
-static int peer_credits = 8;
+static int peer_credits = DEFAULT_PEER_CREDITS;
module_param(peer_credits, int, 0444);
MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
static int concurrent_sends;
module_param(concurrent_sends, int, 0444);
-MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing (obsolete)");
+MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
+
+static int use_fastreg_gaps;
+module_param(use_fastreg_gaps, int, 0444);
+MODULE_PARM_DESC(use_fastreg_gaps, "Enable discontiguous fastreg fragment support. Expect performance drop");
/*
* map_on_demand is a flag used to determine if we can use FMR or FastReg.
module_param(wrq_sge, uint, 0444);
MODULE_PARM_DESC(wrq_sge, "# scatter/gather element per work request");
-kib_tunables_t kiblnd_tunables = {
+struct kib_tunables kiblnd_tunables = {
.kib_dev_failover = &dev_failover,
.kib_service = &service,
.kib_cksum = &cksum,
.kib_use_priv_port = &use_privileged_port,
.kib_nscheds = &nscheds,
.kib_wrq_sge = &wrq_sge,
+ .kib_use_fastreg_gaps = &use_fastreg_gaps,
};
static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
/* Current API version */
tunables->lnd_version = CURRENT_LND_VERSION;
- if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
+ if (*kiblnd_tunables.kib_ib_mtu &&
+ ib_mtu_enum_to_int(ib_mtu_int_to_enum(*kiblnd_tunables.kib_ib_mtu)) !=
+ *kiblnd_tunables.kib_ib_mtu) {
CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
*kiblnd_tunables.kib_ib_mtu);
return -EINVAL;
if (tunables->lnd_peercredits_hiw >= net_tunables->lct_peer_tx_credits)
tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits - 1;
+ if (tunables->lnd_concurrent_sends == 0)
+ tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits;
+
+ if (tunables->lnd_concurrent_sends > net_tunables->lct_peer_tx_credits * 2)
+ tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits * 2;
+
+ if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits / 2)
+ tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits / 2;
+
+ if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits) {
+ CWARN("Concurrent sends %d is lower than message "
+ "queue size: %d, performance may drop slightly.\n",
+ tunables->lnd_concurrent_sends,
+ net_tunables->lct_peer_tx_credits);
+ }
+
if (!tunables->lnd_fmr_pool_size)
tunables->lnd_fmr_pool_size = fmr_pool_size;
if (!tunables->lnd_fmr_flush_trigger)
default_tunables.lnd_version = CURRENT_LND_VERSION;
default_tunables.lnd_peercredits_hiw = peer_credits_hiw;
default_tunables.lnd_map_on_demand = map_on_demand;
+ default_tunables.lnd_concurrent_sends = concurrent_sends;
default_tunables.lnd_fmr_pool_size = fmr_pool_size;
default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
default_tunables.lnd_fmr_cache = fmr_cache;