* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2016, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/klnds/o2iblnd/o2iblnd_modparams.c
*
module_param(cksum, int, 0644);
MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
-static int timeout = 50;
+static int timeout;
module_param(timeout, int, 0644);
MODULE_PARM_DESC(timeout, "timeout (seconds)");
MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
/* NB: this value is shared by all CPTs */
-static int credits = 256;
+static int credits = DEFAULT_CREDITS;
module_param(credits, int, 0444);
MODULE_PARM_DESC(credits, "# concurrent sends");
-static int peer_credits = 8;
+static int peer_credits = DEFAULT_PEER_CREDITS;
module_param(peer_credits, int, 0444);
MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
static int retry_count = 5;
module_param(retry_count, int, 0644);
-MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
+MODULE_PARM_DESC(retry_count, "Number of times to retry connection operations");
static int rnr_retry_count = 6;
module_param(rnr_retry_count, int, 0644);
static int concurrent_sends;
module_param(concurrent_sends, int, 0444);
-MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing (obsolete)");
+MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
+
+static int use_fastreg_gaps;
+module_param(use_fastreg_gaps, int, 0444);
+MODULE_PARM_DESC(use_fastreg_gaps, "Enable discontiguous fastreg fragment support. Expect performance drop");
/*
* map_on_demand is a flag used to determine if we can use FMR or FastReg.
* 4. Look at the comments in kiblnd_fmr_map_tx() for an explanation of
* the behavior when transmit with GAPS verses contiguous.
*/
-#ifdef HAVE_IB_GET_DMA_MR
-#define IBLND_DEFAULT_MAP_ON_DEMAND 0
+
+#ifdef HAVE_OFED_IB_GET_DMA_MR
#define MOD_STR "map on demand"
#else
-#define IBLND_DEFAULT_MAP_ON_DEMAND 1
#define MOD_STR "map on demand (obsolete)"
#endif
-static int map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND;
+static int map_on_demand = 1;
module_param(map_on_demand, int, 0444);
MODULE_PARM_DESC(map_on_demand, MOD_STR);
module_param(wrq_sge, uint, 0444);
MODULE_PARM_DESC(wrq_sge, "# scatter/gather element per work request");
-kib_tunables_t kiblnd_tunables = {
+static int tos = -1;
+static int param_set_tos(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
+static const struct kernel_param_ops param_ops_tos = {
+ .set = param_set_tos,
+ .get = param_get_int,
+};
+
+#define param_check_tos(name, p) \
+ __param_check(name, p, int)
+module_param(tos, tos, 0444);
+#else
+module_param_call(tos, param_set_tos, param_get_int, &tos, 0444);
+#endif
+MODULE_PARM_DESC(tos, "Set the type of service (=-1 to disable)");
+
+struct kib_tunables kiblnd_tunables = {
.kib_dev_failover = &dev_failover,
.kib_service = &service,
.kib_cksum = &cksum,
.kib_use_priv_port = &use_privileged_port,
.kib_nscheds = &nscheds,
.kib_wrq_sge = &wrq_sge,
+ .kib_use_fastreg_gaps = &use_fastreg_gaps,
};
-static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
+struct lnet_ioctl_config_o2iblnd_tunables kib_default_tunables;
+
+static int param_set_tos(const char *val, cfs_kernel_param_arg_t *kp)
+{
+ int rc, t;
+
+ if (!val)
+ return -EINVAL;
+
+ rc = kstrtoint(val, 0, &t);
+ if (rc)
+ return rc;
+
+ if (t < -1 || t > 0xff)
+ return -ERANGE;
+
+ *((int *)kp->arg) = t;
+
+ return 0;
+}
/* # messages/RDMAs in-flight */
int
*/
if (!ni->ni_lnd_tunables_set)
memcpy(&ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib,
- &default_tunables, sizeof(*tunables));
+ &kib_default_tunables, sizeof(*tunables));
tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
/* Current API version */
tunables->lnd_version = CURRENT_LND_VERSION;
- if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
+ if (*kiblnd_tunables.kib_ib_mtu &&
+ ib_mtu_enum_to_int(ib_mtu_int_to_enum(*kiblnd_tunables.kib_ib_mtu)) !=
+ *kiblnd_tunables.kib_ib_mtu) {
CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
*kiblnd_tunables.kib_ib_mtu);
return -EINVAL;
net_tunables->lct_peer_tx_credits =
net_tunables->lct_max_tx_credits;
-#ifndef HAVE_IB_GET_DMA_MR
+ if (tunables->lnd_map_on_demand == UINT_MAX)
+ tunables->lnd_map_on_demand = map_on_demand;
+
+#ifndef HAVE_OFED_IB_GET_DMA_MR
/*
* For kernels which do not support global memory regions, always
* enable map_on_demand
if (tunables->lnd_peercredits_hiw >= net_tunables->lct_peer_tx_credits)
tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits - 1;
+ if (tunables->lnd_concurrent_sends == 0)
+ tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits;
+
+ if (tunables->lnd_concurrent_sends > net_tunables->lct_peer_tx_credits * 2)
+ tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits * 2;
+
+ if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits / 2)
+ tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits / 2;
+
+ if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits) {
+ CWARN("Concurrent sends %d is lower than message "
+ "queue size: %d, performance may drop slightly.\n",
+ tunables->lnd_concurrent_sends,
+ net_tunables->lct_peer_tx_credits);
+ }
+
if (!tunables->lnd_fmr_pool_size)
tunables->lnd_fmr_pool_size = fmr_pool_size;
if (!tunables->lnd_fmr_flush_trigger)
tunables->lnd_fmr_cache = fmr_cache;
if (!tunables->lnd_ntx)
tunables->lnd_ntx = ntx;
- if (!tunables->lnd_conns_per_peer) {
+ if (!tunables->lnd_conns_per_peer)
tunables->lnd_conns_per_peer = (conns_per_peer) ?
conns_per_peer : 1;
- }
+ if (tunables->lnd_tos < 0)
+ tunables->lnd_tos = tos;
+
+ tunables->lnd_timeout = kiblnd_timeout();
return 0;
}
int
kiblnd_tunables_init(void)
{
- default_tunables.lnd_version = CURRENT_LND_VERSION;
- default_tunables.lnd_peercredits_hiw = peer_credits_hiw;
- default_tunables.lnd_map_on_demand = map_on_demand;
- default_tunables.lnd_fmr_pool_size = fmr_pool_size;
- default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
- default_tunables.lnd_fmr_cache = fmr_cache;
- default_tunables.lnd_ntx = ntx;
- default_tunables.lnd_conns_per_peer = conns_per_peer;
+ kib_default_tunables.lnd_version = CURRENT_LND_VERSION;
+ kib_default_tunables.lnd_peercredits_hiw = peer_credits_hiw;
+ kib_default_tunables.lnd_map_on_demand = map_on_demand;
+ kib_default_tunables.lnd_concurrent_sends = concurrent_sends;
+ kib_default_tunables.lnd_fmr_pool_size = fmr_pool_size;
+ kib_default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
+ kib_default_tunables.lnd_fmr_cache = fmr_cache;
+ kib_default_tunables.lnd_ntx = ntx;
+ kib_default_tunables.lnd_conns_per_peer = conns_per_peer;
+ kib_default_tunables.lnd_tos = tos;
return 0;
}