4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lnet/klnds/o2iblnd/o2iblnd_modparams.c
33 * Author: Eric Barton <eric@bartonsoftware.com>
38 #define CURRENT_LND_VERSION 1
40 static int service = 987;
41 module_param(service, int, 0444);
42 MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
45 module_param(cksum, int, 0644);
46 MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
49 module_param(timeout, int, 0644);
50 MODULE_PARM_DESC(timeout, "timeout (seconds)");
52 /* Number of threads in each scheduler pool which is percpt,
53 * we will estimate reasonable value based on CPUs if it's set to zero. */
55 module_param(nscheds, int, 0444);
56 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
58 static unsigned int conns_per_peer = 1;
59 module_param(conns_per_peer, uint, 0444);
60 MODULE_PARM_DESC(conns_per_peer, "number of connections per peer");
62 /* NB: this value is shared by all CPTs, it can grow at runtime */
64 module_param(ntx, int, 0444);
65 MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
67 /* NB: this value is shared by all CPTs */
68 static int credits = DEFAULT_CREDITS;
69 module_param(credits, int, 0444);
70 MODULE_PARM_DESC(credits, "# concurrent sends");
72 static int peer_credits = DEFAULT_PEER_CREDITS;
73 module_param(peer_credits, int, 0444);
74 MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
76 static int peer_credits_hiw = 0;
77 module_param(peer_credits_hiw, int, 0444);
78 MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
80 static int peer_buffer_credits = 0;
81 module_param(peer_buffer_credits, int, 0444);
82 MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
84 static int peer_timeout = DEFAULT_PEER_TIMEOUT;
85 module_param(peer_timeout, int, 0444);
86 MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
88 static char *ipif_name = "ib0";
89 module_param(ipif_name, charp, 0444);
90 MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
92 static int retry_count = 5;
93 module_param(retry_count, int, 0644);
94 MODULE_PARM_DESC(retry_count, "Number of times to retry connection operations");
96 static int rnr_retry_count = 6;
97 module_param(rnr_retry_count, int, 0644);
98 MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
100 static int keepalive = 100;
101 module_param(keepalive, int, 0644);
102 MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
105 module_param(ib_mtu, int, 0444);
106 MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
108 static int concurrent_sends;
109 module_param(concurrent_sends, int, 0444);
110 MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
112 static int use_fastreg_gaps;
113 module_param(use_fastreg_gaps, int, 0444);
114 MODULE_PARM_DESC(use_fastreg_gaps, "Enable discontiguous fastreg fragment support. Expect performance drop");
117 * map_on_demand is a flag used to determine if we can use FMR or FastReg.
118 * This is applicable for kernels which support global memory regions. For
119 * later kernels this flag is always enabled, since we will always either
121 * For kernels which support global memory regions map_on_demand defaults
122 * to 0 which means we will be using global memory regions exclusively.
123 * If it is set to a value other than 0, then we will behave as follows:
124 * 1. Always default the number of fragments to IBLND_MAX_RDMA_FRAGS
125 * 2. Create FMR/FastReg pools
126 * 3. Negotiate the supported number of fragments per connection
127 * 4. Attempt to transmit using global memory regions only if
128 * map-on-demand is not turned on, otherwise use FMR or FastReg
129 * 5. In case of transmitting tx with GAPS over FMR we will need to
130 * transmit it with multiple fragments. Look at the comments in
131 * kiblnd_fmr_map_tx() for an explanation of the behavior.
133 * For later kernels we default map_on_demand to 1 and not allow
134 * it to be set to 0, since there is no longer support for global memory
136 * 1. Default the number of fragments to IBLND_MAX_RDMA_FRAGS
137 * 2. Create FMR/FastReg pools
138 * 3. Negotiate the supported number of fragments per connection
139 * 4. Look at the comments in kiblnd_fmr_map_tx() for an explanation of
140 * the behavior when transmit with GAPS verses contiguous.
142 #ifdef HAVE_IB_GET_DMA_MR
143 #define MOD_STR "map on demand"
145 #define MOD_STR "map on demand (obsolete)"
147 static int map_on_demand = 1;
148 module_param(map_on_demand, int, 0444);
149 MODULE_PARM_DESC(map_on_demand, MOD_STR);
151 /* NB: this value is shared by all CPTs, it can grow at runtime */
152 static int fmr_pool_size = 512;
153 module_param(fmr_pool_size, int, 0444);
154 MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
156 /* NB: this value is shared by all CPTs, it can grow at runtime */
157 static int fmr_flush_trigger = 384;
158 module_param(fmr_flush_trigger, int, 0444);
159 MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
161 static int fmr_cache = 1;
162 module_param(fmr_cache, int, 0444);
163 MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
166 * 0: disable failover
167 * 1: enable failover if necessary
168 * 2: force to failover (for debug)
170 static int dev_failover = 0;
171 module_param(dev_failover, int, 0444);
172 MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
174 static int require_privileged_port;
175 module_param(require_privileged_port, int, 0644);
176 MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
178 static int use_privileged_port = 1;
179 module_param(use_privileged_port, int, 0644);
180 MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
182 static unsigned int wrq_sge = 2;
183 module_param(wrq_sge, uint, 0444);
184 MODULE_PARM_DESC(wrq_sge, "# scatter/gather element per work request");
186 struct kib_tunables kiblnd_tunables = {
187 .kib_dev_failover = &dev_failover,
188 .kib_service = &service,
190 .kib_timeout = &timeout,
191 .kib_keepalive = &keepalive,
192 .kib_default_ipif = &ipif_name,
193 .kib_retry_count = &retry_count,
194 .kib_rnr_retry_count = &rnr_retry_count,
195 .kib_ib_mtu = &ib_mtu,
196 .kib_require_priv_port = &require_privileged_port,
197 .kib_use_priv_port = &use_privileged_port,
198 .kib_nscheds = &nscheds,
199 .kib_wrq_sge = &wrq_sge,
200 .kib_use_fastreg_gaps = &use_fastreg_gaps,
203 static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
205 /* # messages/RDMAs in-flight */
207 kiblnd_msg_queue_size(int version, struct lnet_ni *ni)
209 if (version == IBLND_MSG_VERSION_1)
210 return IBLND_MSG_QUEUE_SIZE_V1;
212 return ni->ni_net->net_tunables.lct_peer_tx_credits;
218 kiblnd_tunables_setup(struct lnet_ni *ni)
220 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
221 struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
224 * if there was no tunables specified, setup the tunables to be
227 if (!ni->ni_lnd_tunables_set)
228 memcpy(&ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib,
229 &default_tunables, sizeof(*tunables));
231 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
233 /* Current API version */
234 tunables->lnd_version = CURRENT_LND_VERSION;
236 if (*kiblnd_tunables.kib_ib_mtu &&
237 ib_mtu_enum_to_int(ib_mtu_int_to_enum(*kiblnd_tunables.kib_ib_mtu)) !=
238 *kiblnd_tunables.kib_ib_mtu) {
239 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
240 *kiblnd_tunables.kib_ib_mtu);
244 net_tunables = &ni->ni_net->net_tunables;
246 if (net_tunables->lct_peer_timeout == -1)
247 net_tunables->lct_peer_timeout = peer_timeout;
249 if (net_tunables->lct_max_tx_credits == -1)
250 net_tunables->lct_max_tx_credits = credits;
252 if (net_tunables->lct_peer_tx_credits == -1)
253 net_tunables->lct_peer_tx_credits = peer_credits;
255 if (net_tunables->lct_peer_rtr_credits == -1)
256 net_tunables->lct_peer_rtr_credits = peer_buffer_credits;
258 if (net_tunables->lct_peer_tx_credits < IBLND_CREDITS_DEFAULT)
259 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_DEFAULT;
261 if (net_tunables->lct_peer_tx_credits > IBLND_CREDITS_MAX)
262 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_MAX;
264 if (net_tunables->lct_peer_tx_credits >
265 net_tunables->lct_max_tx_credits)
266 net_tunables->lct_peer_tx_credits =
267 net_tunables->lct_max_tx_credits;
269 if (tunables->lnd_map_on_demand == UINT_MAX)
270 tunables->lnd_map_on_demand = map_on_demand;
272 #ifndef HAVE_IB_GET_DMA_MR
274 * For kernels which do not support global memory regions, always
275 * enable map_on_demand
277 if (tunables->lnd_map_on_demand == 0)
278 tunables->lnd_map_on_demand = 1;
281 if (!tunables->lnd_peercredits_hiw)
282 tunables->lnd_peercredits_hiw = peer_credits_hiw;
284 if (tunables->lnd_peercredits_hiw < net_tunables->lct_peer_tx_credits / 2)
285 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits / 2;
287 if (tunables->lnd_peercredits_hiw >= net_tunables->lct_peer_tx_credits)
288 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits - 1;
290 if (tunables->lnd_concurrent_sends == 0)
291 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits;
293 if (tunables->lnd_concurrent_sends > net_tunables->lct_peer_tx_credits * 2)
294 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits * 2;
296 if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits / 2)
297 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits / 2;
299 if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits) {
300 CWARN("Concurrent sends %d is lower than message "
301 "queue size: %d, performance may drop slightly.\n",
302 tunables->lnd_concurrent_sends,
303 net_tunables->lct_peer_tx_credits);
306 if (!tunables->lnd_fmr_pool_size)
307 tunables->lnd_fmr_pool_size = fmr_pool_size;
308 if (!tunables->lnd_fmr_flush_trigger)
309 tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
310 if (!tunables->lnd_fmr_cache)
311 tunables->lnd_fmr_cache = fmr_cache;
312 if (!tunables->lnd_ntx)
313 tunables->lnd_ntx = ntx;
314 if (!tunables->lnd_conns_per_peer) {
315 tunables->lnd_conns_per_peer = (conns_per_peer) ?
319 tunables->lnd_timeout = kiblnd_timeout();
325 kiblnd_tunables_init(void)
327 default_tunables.lnd_version = CURRENT_LND_VERSION;
328 default_tunables.lnd_peercredits_hiw = peer_credits_hiw;
329 default_tunables.lnd_map_on_demand = map_on_demand;
330 default_tunables.lnd_concurrent_sends = concurrent_sends;
331 default_tunables.lnd_fmr_pool_size = fmr_pool_size;
332 default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
333 default_tunables.lnd_fmr_cache = fmr_cache;
334 default_tunables.lnd_ntx = ntx;
335 default_tunables.lnd_conns_per_peer = conns_per_peer;