+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
+
+ /*
+ * if there was no tunables specified, setup the tunables to be
+ * defaulted
+ */
+ if (!ni->ni_lnd_tunables_set)
+ memcpy(&ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib,
+ &default_tunables, sizeof(*tunables));
+
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+
+ /* Current API version */
+ tunables->lnd_version = CURRENT_LND_VERSION;
+
+ if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
+ CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
+ *kiblnd_tunables.kib_ib_mtu);
+ return -EINVAL;
+ }
+
+ net_tunables = &ni->ni_net->net_tunables;
+
+ if (net_tunables->lct_peer_timeout == -1)
+ net_tunables->lct_peer_timeout = peer_timeout;
+
+ if (net_tunables->lct_max_tx_credits == -1)
+ net_tunables->lct_max_tx_credits = credits;
+
+ if (net_tunables->lct_peer_tx_credits == -1)
+ net_tunables->lct_peer_tx_credits = peer_credits;
+
+ if (net_tunables->lct_peer_rtr_credits == -1)
+ net_tunables->lct_peer_rtr_credits = peer_buffer_credits;
+
+ if (net_tunables->lct_peer_tx_credits < IBLND_CREDITS_DEFAULT)
+ net_tunables->lct_peer_tx_credits = IBLND_CREDITS_DEFAULT;
+
+ if (net_tunables->lct_peer_tx_credits > IBLND_CREDITS_MAX)
+ net_tunables->lct_peer_tx_credits = IBLND_CREDITS_MAX;
+
+ if (net_tunables->lct_peer_tx_credits >
+ net_tunables->lct_max_tx_credits)
+ net_tunables->lct_peer_tx_credits =
+ net_tunables->lct_max_tx_credits;
+
+ if (!tunables->lnd_peercredits_hiw)
+ tunables->lnd_peercredits_hiw = peer_credits_hiw;
+
+ if (tunables->lnd_peercredits_hiw < net_tunables->lct_peer_tx_credits / 2)
+ tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits / 2;
+
+ if (tunables->lnd_peercredits_hiw >= net_tunables->lct_peer_tx_credits)
+ tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits - 1;
+
+ if (tunables->lnd_map_on_demand < IBLND_MIN_MAP_ON_DEMAND ||
+ tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
+ /* Use the default */
+ CWARN("Invalid map_on_demand (%d), expects %d - %d. Using default of %d\n",
+ tunables->lnd_map_on_demand, IBLND_MIN_MAP_ON_DEMAND,
+ IBLND_MAX_RDMA_FRAGS, IBLND_DEFAULT_MAP_ON_DEMAND);
+ tunables->lnd_map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND;
+ }
+
+ if (tunables->lnd_map_on_demand == 1) {
+ /* don't make sense to create map if only one fragment */
+ tunables->lnd_map_on_demand = 2;
+ }
+
+ if (tunables->lnd_concurrent_sends == 0) {
+ if (tunables->lnd_map_on_demand > 0 &&
+ tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) {
+ tunables->lnd_concurrent_sends =
+ net_tunables->lct_peer_tx_credits * 2;
+ } else {
+ tunables->lnd_concurrent_sends =
+ net_tunables->lct_peer_tx_credits;
+ }
+ }
+
+ if (tunables->lnd_concurrent_sends > net_tunables->lct_peer_tx_credits * 2)
+ tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits * 2;
+
+ if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits / 2)
+ tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits / 2;
+
+ if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits) {
+ CWARN("Concurrent sends %d is lower than message "
+ "queue size: %d, performance may drop slightly.\n",
+ tunables->lnd_concurrent_sends,
+ net_tunables->lct_peer_tx_credits);
+ }
+
+ if (!tunables->lnd_fmr_pool_size)
+ tunables->lnd_fmr_pool_size = fmr_pool_size;
+ if (!tunables->lnd_fmr_flush_trigger)
+ tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
+ if (!tunables->lnd_fmr_cache)
+ tunables->lnd_fmr_cache = fmr_cache;
+ if (!tunables->lnd_conns_per_peer) {
+ tunables->lnd_conns_per_peer = (conns_per_peer) ?
+ conns_per_peer : 1;
+ }
+
+ return 0;