Whamcloud - gitweb
LU-7101 lnet: per NI map-on-demand value
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_modparams.c
index cdddcd7..61254c0 100644 (file)
@@ -135,7 +135,6 @@ static int dev_failover = 0;
 CFS_MODULE_PARM(dev_failover, "i", int, 0444,
                "HCA failover for bonding (0 off, 1 on, other values reserved)");
 
-
 static int require_privileged_port = 0;
 CFS_MODULE_PARM(require_privileged_port, "i", int, 0644,
                 "require privileged port when accepting connection");
@@ -151,25 +150,17 @@ kib_tunables_t kiblnd_tunables = {
         .kib_timeout                = &timeout,
         .kib_keepalive              = &keepalive,
         .kib_ntx                    = &ntx,
-        .kib_credits                = &credits,
-        .kib_peertxcredits          = &peer_credits,
-        .kib_peercredits_hiw        = &peer_credits_hiw,
-        .kib_peerrtrcredits         = &peer_buffer_credits,
-        .kib_peertimeout            = &peer_timeout,
         .kib_default_ipif           = &ipif_name,
         .kib_retry_count            = &retry_count,
         .kib_rnr_retry_count        = &rnr_retry_count,
-        .kib_concurrent_sends       = &concurrent_sends,
         .kib_ib_mtu                 = &ib_mtu,
-        .kib_map_on_demand          = &map_on_demand,
-        .kib_fmr_pool_size          = &fmr_pool_size,
-        .kib_fmr_flush_trigger      = &fmr_flush_trigger,
-        .kib_fmr_cache              = &fmr_cache,
         .kib_require_priv_port      = &require_privileged_port,
        .kib_use_priv_port          = &use_privileged_port,
        .kib_nscheds                = &nscheds
 };
 
+static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
+
 #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
 
 static char ipif_basename_space[32];
@@ -388,66 +379,139 @@ static void
 kiblnd_sysctl_fini (void)
 {
 }
-
 #endif
 
+/* # messages/RDMAs in-flight */
 int
-kiblnd_tunables_init (void)
+kiblnd_msg_queue_size(int version, lnet_ni_t *ni)
 {
-        if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
-                CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
-                       *kiblnd_tunables.kib_ib_mtu);
-                return -EINVAL;
-        }
-
-        if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT)
-                *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT;
-
-        if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX)
-                *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
-
-        if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
-                *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
-
-        if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
-                *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
-
-        if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
-                *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
-
-        if (*kiblnd_tunables.kib_map_on_demand < 0 ||
-            *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
-                *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
-
-        if (*kiblnd_tunables.kib_map_on_demand == 1)
-                *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
-
-        if (*kiblnd_tunables.kib_concurrent_sends == 0) {
-                if (*kiblnd_tunables.kib_map_on_demand > 0 &&
-                    *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
-                        *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
-                else
-                        *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
-        }
-
-        if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
-                *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
-
-        if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
-                *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
+       if (version == IBLND_MSG_VERSION_1)
+               return IBLND_MSG_QUEUE_SIZE_V1;
+       else if (ni)
+               return ni->ni_peertxcredits;
+       else
+               return peer_credits;
+}
 
-        if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
-                CWARN("Concurrent sends %d is lower than message queue size: %d, "
-                      "performance may drop slightly.\n",
-                      *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
-        }
+int
+kiblnd_tunables_setup(lnet_ni_t *ni)
+{
+       struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+
+       /*
+        * if there was no tunables specified, setup the tunables to be
+        * defaulted
+        */
+       if (!ni->ni_lnd_tunables) {
+               LIBCFS_ALLOC(ni->ni_lnd_tunables,
+                            sizeof(*ni->ni_lnd_tunables));
+               if (!ni->ni_lnd_tunables)
+                       return -ENOMEM;
+
+               memcpy(&ni->ni_lnd_tunables->lt_tun_u.lt_o2ib,
+                      &default_tunables, sizeof(*tunables));
+       }
+       tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+
+       /* Current API version */
+       tunables->lnd_version = 0;
+
+       if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
+               CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
+                      *kiblnd_tunables.kib_ib_mtu);
+               return -EINVAL;
+       }
+
+       if (!ni->ni_peertimeout)
+               ni->ni_peertimeout = peer_timeout;
+
+       if (!ni->ni_maxtxcredits)
+               ni->ni_maxtxcredits = credits;
+
+       if (!ni->ni_peertxcredits)
+               ni->ni_peertxcredits = peer_credits;
+
+       if (!ni->ni_peerrtrcredits)
+               ni->ni_peerrtrcredits = peer_buffer_credits;
+
+       if (ni->ni_peertxcredits < IBLND_CREDITS_DEFAULT)
+               ni->ni_peertxcredits = IBLND_CREDITS_DEFAULT;
+
+       if (ni->ni_peertxcredits > IBLND_CREDITS_MAX)
+               ni->ni_peertxcredits = IBLND_CREDITS_MAX;
+
+       if (ni->ni_peertxcredits > credits)
+               ni->ni_peertxcredits = credits;
+
+       if (!tunables->lnd_peercredits_hiw)
+               tunables->lnd_peercredits_hiw = peer_credits_hiw;
+
+       if (tunables->lnd_peercredits_hiw < ni->ni_peertxcredits / 2)
+               tunables->lnd_peercredits_hiw = ni->ni_peertxcredits / 2;
+
+       if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits)
+               tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1;
+
+       if (tunables->lnd_map_on_demand < 0 ||
+           tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
+               /* disable map-on-demand */
+               tunables->lnd_map_on_demand = 0;
+       }
+
+       if (tunables->lnd_map_on_demand == 1) {
+               /* don't make sense to create map if only one fragment */
+               tunables->lnd_map_on_demand = 2;
+       }
+
+       if (tunables->lnd_concurrent_sends == 0) {
+               if (tunables->lnd_map_on_demand > 0 &&
+                   tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) {
+                       tunables->lnd_concurrent_sends =
+                                               ni->ni_peertxcredits * 2;
+               } else {
+                       tunables->lnd_concurrent_sends = ni->ni_peertxcredits;
+               }
+       }
+
+       if (tunables->lnd_concurrent_sends > ni->ni_peertxcredits * 2)
+               tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2;
+
+       if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits / 2)
+               tunables->lnd_concurrent_sends = ni->ni_peertxcredits / 2;
+
+       if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits) {
+               CWARN("Concurrent sends %d is lower than message "
+                     "queue size: %d, performance may drop slightly.\n",
+                     tunables->lnd_concurrent_sends, ni->ni_peertxcredits);
+       }
+
+       if (!tunables->lnd_fmr_pool_size)
+               tunables->lnd_fmr_pool_size = fmr_pool_size;
+       if (!tunables->lnd_fmr_flush_trigger)
+               tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
+       if (!tunables->lnd_fmr_cache)
+               tunables->lnd_fmr_cache = fmr_cache;
+
+       return 0;
+}
 
-        kiblnd_sysctl_init();
-        return 0;
+int
+kiblnd_tunables_init(void)
+{
+       default_tunables.lnd_version = 0;
+       default_tunables.lnd_peercredits_hiw = peer_credits_hiw,
+       default_tunables.lnd_map_on_demand = map_on_demand;
+       default_tunables.lnd_concurrent_sends = concurrent_sends;
+       default_tunables.lnd_fmr_pool_size = fmr_pool_size;
+       default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
+       default_tunables.lnd_fmr_cache = fmr_cache;
+
+       kiblnd_sysctl_init();
+       return 0;
 }
 
 void
-kiblnd_tunables_fini (void)
+kiblnd_tunables_fini(void)
 {
-        kiblnd_sysctl_fini();
+       kiblnd_sysctl_fini();
 }