4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/o2iblnd/o2iblnd_modparams.c
34 * Author: Eric Barton <eric@bartonsoftware.com>
39 #define CURRENT_LND_VERSION 1
41 static int service = 987;
42 module_param(service, int, 0444);
43 MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
46 module_param(cksum, int, 0644);
47 MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
49 static int timeout = 50;
50 module_param(timeout, int, 0644);
51 MODULE_PARM_DESC(timeout, "timeout (seconds)");
53 /* Number of threads in each scheduler pool which is percpt,
54 * we will estimate reasonable value based on CPUs if it's set to zero. */
56 module_param(nscheds, int, 0444);
57 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
59 static unsigned int conns_per_peer = 1;
60 module_param(conns_per_peer, uint, 0444);
61 MODULE_PARM_DESC(conns_per_peer, "number of connections per peer");
63 /* NB: this value is shared by all CPTs, it can grow at runtime */
65 module_param(ntx, int, 0444);
66 MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
68 /* NB: this value is shared by all CPTs */
69 static int credits = 256;
70 module_param(credits, int, 0444);
71 MODULE_PARM_DESC(credits, "# concurrent sends");
73 static int peer_credits = 8;
74 module_param(peer_credits, int, 0444);
75 MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
77 static int peer_credits_hiw = 0;
78 module_param(peer_credits_hiw, int, 0444);
79 MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
81 static int peer_buffer_credits = 0;
82 module_param(peer_buffer_credits, int, 0444);
83 MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
85 static int peer_timeout = DEFAULT_PEER_TIMEOUT;
86 module_param(peer_timeout, int, 0444);
87 MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
89 static char *ipif_name = "ib0";
90 module_param(ipif_name, charp, 0444);
91 MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
93 static int retry_count = 5;
94 module_param(retry_count, int, 0644);
95 MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
97 static int rnr_retry_count = 6;
98 module_param(rnr_retry_count, int, 0644);
99 MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
101 static int keepalive = 100;
102 module_param(keepalive, int, 0644);
103 MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
106 module_param(ib_mtu, int, 0444);
107 MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
109 static int concurrent_sends;
110 module_param(concurrent_sends, int, 0444);
111 MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
113 static int use_fastreg_gaps;
114 module_param(use_fastreg_gaps, int, 0444);
115 MODULE_PARM_DESC(use_fastreg_gaps, "Enable discontiguous fastreg fragment support. Expect performance drop");
118 * map_on_demand is a flag used to determine if we can use FMR or FastReg.
119 * This is applicable for kernels which support global memory regions. For
120 * later kernels this flag is always enabled, since we will always either
122 * For kernels which support global memory regions map_on_demand defaults
123 * to 0 which means we will be using global memory regions exclusively.
124 * If it is set to a value other than 0, then we will behave as follows:
125 * 1. Always default the number of fragments to IBLND_MAX_RDMA_FRAGS
126 * 2. Create FMR/FastReg pools
127 * 3. Negotiate the supported number of fragments per connection
128 * 4. Attempt to transmit using global memory regions only if
129 * map-on-demand is not turned on, otherwise use FMR or FastReg
130 * 5. In case of transmitting tx with GAPS over FMR we will need to
131 * transmit it with multiple fragments. Look at the comments in
132 * kiblnd_fmr_map_tx() for an explanation of the behavior.
134 * For later kernels we default map_on_demand to 1 and not allow
135 * it to be set to 0, since there is no longer support for global memory
137 * 1. Default the number of fragments to IBLND_MAX_RDMA_FRAGS
138 * 2. Create FMR/FastReg pools
139 * 3. Negotiate the supported number of fragments per connection
140 * 4. Look at the comments in kiblnd_fmr_map_tx() for an explanation of
141 * the behavior when transmit with GAPS verses contiguous.
143 #ifdef HAVE_IB_GET_DMA_MR
144 #define IBLND_DEFAULT_MAP_ON_DEMAND 0
145 #define MOD_STR "map on demand"
147 #define IBLND_DEFAULT_MAP_ON_DEMAND 1
148 #define MOD_STR "map on demand (obsolete)"
150 static int map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND;
151 module_param(map_on_demand, int, 0444);
152 MODULE_PARM_DESC(map_on_demand, MOD_STR);
154 /* NB: this value is shared by all CPTs, it can grow at runtime */
155 static int fmr_pool_size = 512;
156 module_param(fmr_pool_size, int, 0444);
157 MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
159 /* NB: this value is shared by all CPTs, it can grow at runtime */
160 static int fmr_flush_trigger = 384;
161 module_param(fmr_flush_trigger, int, 0444);
162 MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
164 static int fmr_cache = 1;
165 module_param(fmr_cache, int, 0444);
166 MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
169 * 0: disable failover
170 * 1: enable failover if necessary
171 * 2: force to failover (for debug)
173 static int dev_failover = 0;
174 module_param(dev_failover, int, 0444);
175 MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
177 static int require_privileged_port;
178 module_param(require_privileged_port, int, 0644);
179 MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
181 static int use_privileged_port = 1;
182 module_param(use_privileged_port, int, 0644);
183 MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
185 static unsigned int wrq_sge = 2;
186 module_param(wrq_sge, uint, 0444);
187 MODULE_PARM_DESC(wrq_sge, "# scatter/gather element per work request");
189 struct kib_tunables kiblnd_tunables = {
190 .kib_dev_failover = &dev_failover,
191 .kib_service = &service,
193 .kib_timeout = &timeout,
194 .kib_keepalive = &keepalive,
195 .kib_default_ipif = &ipif_name,
196 .kib_retry_count = &retry_count,
197 .kib_rnr_retry_count = &rnr_retry_count,
198 .kib_ib_mtu = &ib_mtu,
199 .kib_require_priv_port = &require_privileged_port,
200 .kib_use_priv_port = &use_privileged_port,
201 .kib_nscheds = &nscheds,
202 .kib_wrq_sge = &wrq_sge,
203 .kib_use_fastreg_gaps = &use_fastreg_gaps,
206 static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
208 /* # messages/RDMAs in-flight */
210 kiblnd_msg_queue_size(int version, struct lnet_ni *ni)
212 if (version == IBLND_MSG_VERSION_1)
213 return IBLND_MSG_QUEUE_SIZE_V1;
215 return ni->ni_net->net_tunables.lct_peer_tx_credits;
221 kiblnd_tunables_setup(struct lnet_ni *ni)
223 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
224 struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
227 * if there was no tunables specified, setup the tunables to be
230 if (!ni->ni_lnd_tunables_set)
231 memcpy(&ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib,
232 &default_tunables, sizeof(*tunables));
234 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
236 /* Current API version */
237 tunables->lnd_version = CURRENT_LND_VERSION;
239 if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
240 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
241 *kiblnd_tunables.kib_ib_mtu);
245 net_tunables = &ni->ni_net->net_tunables;
247 if (net_tunables->lct_peer_timeout == -1)
248 net_tunables->lct_peer_timeout = peer_timeout;
250 if (net_tunables->lct_max_tx_credits == -1)
251 net_tunables->lct_max_tx_credits = credits;
253 if (net_tunables->lct_peer_tx_credits == -1)
254 net_tunables->lct_peer_tx_credits = peer_credits;
256 if (net_tunables->lct_peer_rtr_credits == -1)
257 net_tunables->lct_peer_rtr_credits = peer_buffer_credits;
259 if (net_tunables->lct_peer_tx_credits < IBLND_CREDITS_DEFAULT)
260 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_DEFAULT;
262 if (net_tunables->lct_peer_tx_credits > IBLND_CREDITS_MAX)
263 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_MAX;
265 if (net_tunables->lct_peer_tx_credits >
266 net_tunables->lct_max_tx_credits)
267 net_tunables->lct_peer_tx_credits =
268 net_tunables->lct_max_tx_credits;
270 #ifndef HAVE_IB_GET_DMA_MR
272 * For kernels which do not support global memory regions, always
273 * enable map_on_demand
275 if (tunables->lnd_map_on_demand == 0)
276 tunables->lnd_map_on_demand = 1;
279 if (!tunables->lnd_peercredits_hiw)
280 tunables->lnd_peercredits_hiw = peer_credits_hiw;
282 if (tunables->lnd_peercredits_hiw < net_tunables->lct_peer_tx_credits / 2)
283 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits / 2;
285 if (tunables->lnd_peercredits_hiw >= net_tunables->lct_peer_tx_credits)
286 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits - 1;
288 if (tunables->lnd_concurrent_sends == 0)
289 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits;
291 if (tunables->lnd_concurrent_sends > net_tunables->lct_peer_tx_credits * 2)
292 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits * 2;
294 if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits / 2)
295 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits / 2;
297 if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits) {
298 CWARN("Concurrent sends %d is lower than message "
299 "queue size: %d, performance may drop slightly.\n",
300 tunables->lnd_concurrent_sends,
301 net_tunables->lct_peer_tx_credits);
304 if (!tunables->lnd_fmr_pool_size)
305 tunables->lnd_fmr_pool_size = fmr_pool_size;
306 if (!tunables->lnd_fmr_flush_trigger)
307 tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
308 if (!tunables->lnd_fmr_cache)
309 tunables->lnd_fmr_cache = fmr_cache;
310 if (!tunables->lnd_ntx)
311 tunables->lnd_ntx = ntx;
312 if (!tunables->lnd_conns_per_peer) {
313 tunables->lnd_conns_per_peer = (conns_per_peer) ?
321 kiblnd_tunables_init(void)
323 default_tunables.lnd_version = CURRENT_LND_VERSION;
324 default_tunables.lnd_peercredits_hiw = peer_credits_hiw;
325 default_tunables.lnd_map_on_demand = map_on_demand;
326 default_tunables.lnd_concurrent_sends = concurrent_sends;
327 default_tunables.lnd_fmr_pool_size = fmr_pool_size;
328 default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
329 default_tunables.lnd_fmr_cache = fmr_cache;
330 default_tunables.lnd_ntx = ntx;
331 default_tunables.lnd_conns_per_peer = conns_per_peer;