4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lnet/klnds/o2iblnd/o2iblnd_modparams.c
33 * Author: Eric Barton <eric@bartonsoftware.com>
38 #define CURRENT_LND_VERSION 1
40 static int service = 987;
41 module_param(service, int, 0444);
42 MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
45 module_param(cksum, int, 0644);
46 MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
49 module_param(timeout, int, 0644);
50 MODULE_PARM_DESC(timeout, "timeout (seconds)");
52 /* Number of threads in each scheduler pool which is percpt,
53 * we will estimate reasonable value based on CPUs if it's set to zero. */
55 module_param(nscheds, int, 0444);
56 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
58 static unsigned int conns_per_peer = 1;
59 module_param(conns_per_peer, uint, 0444);
60 MODULE_PARM_DESC(conns_per_peer, "number of connections per peer");
62 /* NB: this value is shared by all CPTs, it can grow at runtime */
64 module_param(ntx, int, 0444);
65 MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
67 /* NB: this value is shared by all CPTs */
68 static int credits = DEFAULT_CREDITS;
69 module_param(credits, int, 0444);
70 MODULE_PARM_DESC(credits, "# concurrent sends");
72 static int peer_credits = DEFAULT_PEER_CREDITS;
73 module_param(peer_credits, int, 0444);
74 MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
76 static int peer_credits_hiw = 0;
77 module_param(peer_credits_hiw, int, 0444);
78 MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
80 static int peer_buffer_credits = 0;
81 module_param(peer_buffer_credits, int, 0444);
82 MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
84 static int peer_timeout = DEFAULT_PEER_TIMEOUT;
85 module_param(peer_timeout, int, 0444);
86 MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
88 static char *ipif_name = "ib0";
89 module_param(ipif_name, charp, 0444);
90 MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
92 static int retry_count = 5;
93 module_param(retry_count, int, 0644);
94 MODULE_PARM_DESC(retry_count, "Number of times to retry connection operations");
96 static int rnr_retry_count = 6;
97 module_param(rnr_retry_count, int, 0644);
98 MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
100 static int keepalive = 100;
101 module_param(keepalive, int, 0644);
102 MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
105 module_param(ib_mtu, int, 0444);
106 MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
108 static int concurrent_sends;
109 module_param(concurrent_sends, int, 0444);
110 MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
112 static int use_fastreg_gaps;
113 module_param(use_fastreg_gaps, int, 0444);
114 MODULE_PARM_DESC(use_fastreg_gaps, "Enable discontiguous fastreg fragment support. Expect performance drop");
117 * map_on_demand is a flag used to determine if we can use FMR or FastReg.
118 * This is applicable for kernels which support global memory regions. For
119 * later kernels this flag is always enabled, since we will always either
121 * For kernels which support global memory regions map_on_demand defaults
122 * to 0 which means we will be using global memory regions exclusively.
123 * If it is set to a value other than 0, then we will behave as follows:
124 * 1. Always default the number of fragments to IBLND_MAX_RDMA_FRAGS
125 * 2. Create FMR/FastReg pools
126 * 3. Negotiate the supported number of fragments per connection
127 * 4. Attempt to transmit using global memory regions only if
128 * map-on-demand is not turned on, otherwise use FMR or FastReg
129 * 5. In case of transmitting tx with GAPS over FMR we will need to
130 * transmit it with multiple fragments. Look at the comments in
131 * kiblnd_fmr_map_tx() for an explanation of the behavior.
133 * For later kernels we default map_on_demand to 1 and not allow
134 * it to be set to 0, since there is no longer support for global memory
136 * 1. Default the number of fragments to IBLND_MAX_RDMA_FRAGS
137 * 2. Create FMR/FastReg pools
138 * 3. Negotiate the supported number of fragments per connection
139 * 4. Look at the comments in kiblnd_fmr_map_tx() for an explanation of
140 * the behavior when transmit with GAPS verses contiguous.
143 #ifdef HAVE_OFED_IB_GET_DMA_MR
144 #define MOD_STR "map on demand"
146 #define MOD_STR "map on demand (obsolete)"
148 static int map_on_demand = 1;
149 module_param(map_on_demand, int, 0444);
150 MODULE_PARM_DESC(map_on_demand, MOD_STR);
152 /* NB: this value is shared by all CPTs, it can grow at runtime */
153 static int fmr_pool_size = 512;
154 module_param(fmr_pool_size, int, 0444);
155 MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
157 /* NB: this value is shared by all CPTs, it can grow at runtime */
158 static int fmr_flush_trigger = 384;
159 module_param(fmr_flush_trigger, int, 0444);
160 MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
162 static int fmr_cache = 1;
163 module_param(fmr_cache, int, 0444);
164 MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
167 * 0: disable failover
168 * 1: enable failover if necessary
169 * 2: force to failover (for debug)
171 static int dev_failover = 0;
172 module_param(dev_failover, int, 0444);
173 MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
175 static int require_privileged_port;
176 module_param(require_privileged_port, int, 0644);
177 MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
179 static int use_privileged_port = 1;
180 module_param(use_privileged_port, int, 0644);
181 MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
183 static unsigned int wrq_sge = 2;
184 module_param(wrq_sge, uint, 0444);
185 MODULE_PARM_DESC(wrq_sge, "# scatter/gather element per work request");
188 static int param_set_tos(const char *val, cfs_kernel_param_arg_t *kp);
189 #ifdef HAVE_KERNEL_PARAM_OPS
190 static const struct kernel_param_ops param_ops_tos = {
191 .set = param_set_tos,
192 .get = param_get_int,
195 #define param_check_tos(name, p) \
196 __param_check(name, p, int)
197 module_param(tos, tos, 0444);
199 module_param_call(tos, param_set_tos, param_get_int, &tos, 0444);
201 MODULE_PARM_DESC(tos, "Set the type of service (=-1 to disable)");
203 struct kib_tunables kiblnd_tunables = {
204 .kib_dev_failover = &dev_failover,
205 .kib_service = &service,
207 .kib_timeout = &timeout,
208 .kib_keepalive = &keepalive,
209 .kib_default_ipif = &ipif_name,
210 .kib_retry_count = &retry_count,
211 .kib_rnr_retry_count = &rnr_retry_count,
212 .kib_ib_mtu = &ib_mtu,
213 .kib_require_priv_port = &require_privileged_port,
214 .kib_use_priv_port = &use_privileged_port,
215 .kib_nscheds = &nscheds,
216 .kib_wrq_sge = &wrq_sge,
217 .kib_use_fastreg_gaps = &use_fastreg_gaps,
220 struct lnet_ioctl_config_o2iblnd_tunables kib_default_tunables;
222 static int param_set_tos(const char *val, cfs_kernel_param_arg_t *kp)
229 rc = kstrtoint(val, 0, &t);
233 if (t < -1 || t > 0xff)
236 *((int *)kp->arg) = t;
241 /* # messages/RDMAs in-flight */
243 kiblnd_msg_queue_size(int version, struct lnet_ni *ni)
245 if (version == IBLND_MSG_VERSION_1)
246 return IBLND_MSG_QUEUE_SIZE_V1;
248 return ni->ni_net->net_tunables.lct_peer_tx_credits;
254 kiblnd_tunables_setup(struct lnet_ni *ni)
256 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
257 struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
260 * if there was no tunables specified, setup the tunables to be
263 if (!ni->ni_lnd_tunables_set)
264 memcpy(&ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib,
265 &kib_default_tunables, sizeof(*tunables));
267 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
269 /* Current API version */
270 tunables->lnd_version = CURRENT_LND_VERSION;
272 if (*kiblnd_tunables.kib_ib_mtu &&
273 ib_mtu_enum_to_int(ib_mtu_int_to_enum(*kiblnd_tunables.kib_ib_mtu)) !=
274 *kiblnd_tunables.kib_ib_mtu) {
275 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
276 *kiblnd_tunables.kib_ib_mtu);
280 net_tunables = &ni->ni_net->net_tunables;
282 if (net_tunables->lct_peer_timeout == -1)
283 net_tunables->lct_peer_timeout = peer_timeout;
285 if (net_tunables->lct_max_tx_credits == -1)
286 net_tunables->lct_max_tx_credits = credits;
288 if (net_tunables->lct_peer_tx_credits == -1)
289 net_tunables->lct_peer_tx_credits = peer_credits;
291 if (net_tunables->lct_peer_rtr_credits == -1)
292 net_tunables->lct_peer_rtr_credits = peer_buffer_credits;
294 if (net_tunables->lct_peer_tx_credits < IBLND_CREDITS_DEFAULT)
295 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_DEFAULT;
297 if (net_tunables->lct_peer_tx_credits > IBLND_CREDITS_MAX)
298 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_MAX;
300 if (net_tunables->lct_peer_tx_credits >
301 net_tunables->lct_max_tx_credits)
302 net_tunables->lct_peer_tx_credits =
303 net_tunables->lct_max_tx_credits;
305 if (tunables->lnd_map_on_demand == UINT_MAX)
306 tunables->lnd_map_on_demand = map_on_demand;
308 #ifndef HAVE_OFED_IB_GET_DMA_MR
310 * For kernels which do not support global memory regions, always
311 * enable map_on_demand
313 if (tunables->lnd_map_on_demand == 0)
314 tunables->lnd_map_on_demand = 1;
317 if (!tunables->lnd_peercredits_hiw)
318 tunables->lnd_peercredits_hiw = peer_credits_hiw;
320 if (tunables->lnd_peercredits_hiw < net_tunables->lct_peer_tx_credits / 2)
321 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits / 2;
323 if (tunables->lnd_peercredits_hiw >= net_tunables->lct_peer_tx_credits)
324 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits - 1;
326 if (tunables->lnd_concurrent_sends == 0)
327 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits;
329 if (tunables->lnd_concurrent_sends > net_tunables->lct_peer_tx_credits * 2)
330 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits * 2;
332 if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits / 2)
333 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits / 2;
335 if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits) {
336 CWARN("Concurrent sends %d is lower than message "
337 "queue size: %d, performance may drop slightly.\n",
338 tunables->lnd_concurrent_sends,
339 net_tunables->lct_peer_tx_credits);
342 if (!tunables->lnd_fmr_pool_size)
343 tunables->lnd_fmr_pool_size = fmr_pool_size;
344 if (!tunables->lnd_fmr_flush_trigger)
345 tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
346 if (!tunables->lnd_fmr_cache)
347 tunables->lnd_fmr_cache = fmr_cache;
348 if (!tunables->lnd_ntx)
349 tunables->lnd_ntx = ntx;
350 if (!tunables->lnd_conns_per_peer)
351 tunables->lnd_conns_per_peer = (conns_per_peer) ?
353 if (tunables->lnd_tos < 0)
354 tunables->lnd_tos = tos;
356 tunables->lnd_timeout = kiblnd_timeout();
362 kiblnd_tunables_init(void)
364 kib_default_tunables.lnd_version = CURRENT_LND_VERSION;
365 kib_default_tunables.lnd_peercredits_hiw = peer_credits_hiw;
366 kib_default_tunables.lnd_map_on_demand = map_on_demand;
367 kib_default_tunables.lnd_concurrent_sends = concurrent_sends;
368 kib_default_tunables.lnd_fmr_pool_size = fmr_pool_size;
369 kib_default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
370 kib_default_tunables.lnd_fmr_cache = fmr_cache;
371 kib_default_tunables.lnd_ntx = ntx;
372 kib_default_tunables.lnd_conns_per_peer = conns_per_peer;
373 kib_default_tunables.lnd_tos = tos;