4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/o2iblnd/o2iblnd_modparams.c
34 * Author: Eric Barton <eric@bartonsoftware.com>
39 static int service = 987;
40 module_param(service, int, 0444);
41 MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
44 module_param(cksum, int, 0644);
45 MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
47 static int timeout = 50;
48 module_param(timeout, int, 0644);
49 MODULE_PARM_DESC(timeout, "timeout (seconds)");
51 /* Number of threads in each scheduler pool which is percpt,
52 * we will estimate reasonable value based on CPUs if it's set to zero. */
54 module_param(nscheds, int, 0444);
55 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
57 /* NB: this value is shared by all CPTs, it can grow at runtime */
59 module_param(ntx, int, 0444);
60 MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
62 /* NB: this value is shared by all CPTs */
63 static int credits = 256;
64 module_param(credits, int, 0444);
65 MODULE_PARM_DESC(credits, "# concurrent sends");
67 static int peer_credits = 8;
68 module_param(peer_credits, int, 0444);
69 MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
71 static int peer_credits_hiw = 0;
72 module_param(peer_credits_hiw, int, 0444);
73 MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
75 static int peer_buffer_credits = 0;
76 module_param(peer_buffer_credits, int, 0444);
77 MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
79 static int peer_timeout = 180;
80 module_param(peer_timeout, int, 0444);
81 MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
83 static char *ipif_name = "ib0";
84 module_param(ipif_name, charp, 0444);
85 MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
87 static int retry_count = 5;
88 module_param(retry_count, int, 0644);
89 MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
91 static int rnr_retry_count = 6;
92 module_param(rnr_retry_count, int, 0644);
93 MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
95 static int keepalive = 100;
96 module_param(keepalive, int, 0644);
97 MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
100 module_param(ib_mtu, int, 0444);
101 MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
103 static int concurrent_sends;
104 module_param(concurrent_sends, int, 0444);
105 MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
107 static int map_on_demand;
108 module_param(map_on_demand, int, 0444);
109 MODULE_PARM_DESC(map_on_demand, "map on demand");
111 /* NB: this value is shared by all CPTs, it can grow at runtime */
112 static int fmr_pool_size = 512;
113 module_param(fmr_pool_size, int, 0444);
114 MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
116 /* NB: this value is shared by all CPTs, it can grow at runtime */
117 static int fmr_flush_trigger = 384;
118 module_param(fmr_flush_trigger, int, 0444);
119 MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
121 static int fmr_cache = 1;
122 module_param(fmr_cache, int, 0444);
123 MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
126 * 0: disable failover
127 * 1: enable failover if necessary
128 * 2: force to failover (for debug)
130 static int dev_failover = 0;
131 module_param(dev_failover, int, 0444);
132 MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
134 static int require_privileged_port;
135 module_param(require_privileged_port, int, 0644);
136 MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
138 static int use_privileged_port = 1;
139 module_param(use_privileged_port, int, 0644);
140 MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
142 kib_tunables_t kiblnd_tunables = {
143 .kib_dev_failover = &dev_failover,
144 .kib_service = &service,
146 .kib_timeout = &timeout,
147 .kib_keepalive = &keepalive,
149 .kib_default_ipif = &ipif_name,
150 .kib_retry_count = &retry_count,
151 .kib_rnr_retry_count = &rnr_retry_count,
152 .kib_ib_mtu = &ib_mtu,
153 .kib_require_priv_port = &require_privileged_port,
154 .kib_use_priv_port = &use_privileged_port,
155 .kib_nscheds = &nscheds
158 static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
160 /* # messages/RDMAs in-flight */
162 kiblnd_msg_queue_size(int version, lnet_ni_t *ni)
164 if (version == IBLND_MSG_VERSION_1)
165 return IBLND_MSG_QUEUE_SIZE_V1;
167 return ni->ni_net->net_tunables.lct_peer_tx_credits;
173 kiblnd_tunables_setup(lnet_ni_t *ni)
175 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
176 struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
179 * if there was no tunables specified, setup the tunables to be
182 if (!ni->ni_lnd_tunables_set)
183 memcpy(&ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib,
184 &default_tunables, sizeof(*tunables));
186 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
188 /* Current API version */
189 tunables->lnd_version = 0;
191 if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
192 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
193 *kiblnd_tunables.kib_ib_mtu);
197 net_tunables = &ni->ni_net->net_tunables;
199 if (net_tunables->lct_peer_timeout == -1)
200 net_tunables->lct_peer_timeout = peer_timeout;
202 if (net_tunables->lct_max_tx_credits == -1)
203 net_tunables->lct_max_tx_credits = credits;
205 if (net_tunables->lct_peer_tx_credits == -1)
206 net_tunables->lct_peer_tx_credits = peer_credits;
208 if (net_tunables->lct_peer_rtr_credits == -1)
209 net_tunables->lct_peer_rtr_credits = peer_buffer_credits;
211 if (net_tunables->lct_peer_tx_credits < IBLND_CREDITS_DEFAULT)
212 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_DEFAULT;
214 if (net_tunables->lct_peer_tx_credits > IBLND_CREDITS_MAX)
215 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_MAX;
217 if (net_tunables->lct_peer_tx_credits >
218 net_tunables->lct_max_tx_credits)
219 net_tunables->lct_peer_tx_credits =
220 net_tunables->lct_max_tx_credits;
222 if (!tunables->lnd_peercredits_hiw)
223 tunables->lnd_peercredits_hiw = peer_credits_hiw;
225 if (tunables->lnd_peercredits_hiw < net_tunables->lct_peer_tx_credits / 2)
226 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits / 2;
228 if (tunables->lnd_peercredits_hiw >= net_tunables->lct_peer_tx_credits)
229 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits - 1;
231 if (tunables->lnd_map_on_demand < 0 ||
232 tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
233 /* disable map-on-demand */
234 tunables->lnd_map_on_demand = 0;
237 if (tunables->lnd_map_on_demand == 1) {
238 /* don't make sense to create map if only one fragment */
239 tunables->lnd_map_on_demand = 2;
242 if (tunables->lnd_concurrent_sends == 0) {
243 if (tunables->lnd_map_on_demand > 0 &&
244 tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) {
245 tunables->lnd_concurrent_sends =
246 net_tunables->lct_peer_tx_credits * 2;
248 tunables->lnd_concurrent_sends =
249 net_tunables->lct_peer_tx_credits;
253 if (tunables->lnd_concurrent_sends > net_tunables->lct_peer_tx_credits * 2)
254 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits * 2;
256 if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits / 2)
257 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits / 2;
259 if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits) {
260 CWARN("Concurrent sends %d is lower than message "
261 "queue size: %d, performance may drop slightly.\n",
262 tunables->lnd_concurrent_sends,
263 net_tunables->lct_peer_tx_credits);
266 if (!tunables->lnd_fmr_pool_size)
267 tunables->lnd_fmr_pool_size = fmr_pool_size;
268 if (!tunables->lnd_fmr_flush_trigger)
269 tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
270 if (!tunables->lnd_fmr_cache)
271 tunables->lnd_fmr_cache = fmr_cache;
277 kiblnd_tunables_init(void)
279 default_tunables.lnd_version = 0;
280 default_tunables.lnd_peercredits_hiw = peer_credits_hiw,
281 default_tunables.lnd_map_on_demand = map_on_demand;
282 default_tunables.lnd_concurrent_sends = concurrent_sends;
283 default_tunables.lnd_fmr_pool_size = fmr_pool_size;
284 default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
285 default_tunables.lnd_fmr_cache = fmr_cache;