4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/o2iblnd/o2iblnd_modparams.c
38 * Author: Eric Barton <eric@bartonsoftware.com>
43 static int service = 987;
44 CFS_MODULE_PARM(service, "i", int, 0444,
45 "service number (within RDMA_PS_TCP)");
48 CFS_MODULE_PARM(cksum, "i", int, 0644,
49 "set non-zero to enable message (not RDMA) checksums");
51 static int timeout = 50;
52 CFS_MODULE_PARM(timeout, "i", int, 0644,
55 /* Number of threads in each scheduler pool which is percpt,
56 * we will estimate reasonable value based on CPUs if it's set to zero. */
58 CFS_MODULE_PARM(nscheds, "i", int, 0444,
59 "number of threads in each scheduler pool");
61 /* NB: this value is shared by all CPTs, it can grow at runtime */
63 CFS_MODULE_PARM(ntx, "i", int, 0444,
64 "# of message descriptors allocated for each pool");
66 /* NB: this value is shared by all CPTs */
67 static int credits = 256;
68 CFS_MODULE_PARM(credits, "i", int, 0444,
69 "# concurrent sends");
71 static int peer_credits = 8;
72 CFS_MODULE_PARM(peer_credits, "i", int, 0444,
73 "# concurrent sends to 1 peer");
75 static int peer_credits_hiw = 0;
76 CFS_MODULE_PARM(peer_credits_hiw, "i", int, 0444,
77 "when eagerly to return credits");
79 static int peer_buffer_credits = 0;
80 CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
81 "# per-peer router buffer credits");
83 static int peer_timeout = 180;
84 CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
85 "Seconds without aliveness news to declare peer dead (<=0 to disable)");
87 static char *ipif_name = "ib0";
88 CFS_MODULE_PARM(ipif_name, "s", charp, 0444,
89 "IPoIB interface name");
91 static int retry_count = 5;
92 CFS_MODULE_PARM(retry_count, "i", int, 0644,
93 "Retransmissions when no ACK received");
95 static int rnr_retry_count = 6;
96 CFS_MODULE_PARM(rnr_retry_count, "i", int, 0644,
97 "RNR retransmissions");
99 static int keepalive = 100;
100 CFS_MODULE_PARM(keepalive, "i", int, 0644,
101 "Idle time in seconds before sending a keepalive");
103 static int ib_mtu = 0;
104 CFS_MODULE_PARM(ib_mtu, "i", int, 0444,
105 "IB MTU 256/512/1024/2048/4096");
107 static int concurrent_sends = 0;
108 CFS_MODULE_PARM(concurrent_sends, "i", int, 0444,
109 "send work-queue sizing");
111 static int map_on_demand = 0;
112 CFS_MODULE_PARM(map_on_demand, "i", int, 0444,
115 /* NB: this value is shared by all CPTs, it can grow at runtime */
116 static int fmr_pool_size = 512;
117 CFS_MODULE_PARM(fmr_pool_size, "i", int, 0444,
118 "size of fmr pool on each CPT (>= ntx / 4)");
120 /* NB: this value is shared by all CPTs, it can grow at runtime */
121 static int fmr_flush_trigger = 384;
122 CFS_MODULE_PARM(fmr_flush_trigger, "i", int, 0444,
123 "# dirty FMRs that triggers pool flush");
125 static int fmr_cache = 1;
126 CFS_MODULE_PARM(fmr_cache, "i", int, 0444,
127 "non-zero to enable FMR caching");
129 /* NB: this value is shared by all CPTs, it can grow at runtime */
130 static int pmr_pool_size = 512;
131 CFS_MODULE_PARM(pmr_pool_size, "i", int, 0444,
132 "size of MR cache pmr pool on each CPT");
135 * 0: disable failover
136 * 1: enable failover if necessary
137 * 2: force to failover (for debug)
139 static int dev_failover = 0;
140 CFS_MODULE_PARM(dev_failover, "i", int, 0444,
141 "HCA failover for bonding (0 off, 1 on, other values reserved)");
144 static int require_privileged_port = 0;
145 CFS_MODULE_PARM(require_privileged_port, "i", int, 0644,
146 "require privileged port when accepting connection");
148 static int use_privileged_port = 1;
149 CFS_MODULE_PARM(use_privileged_port, "i", int, 0644,
150 "use privileged port when initiating connection");
152 kib_tunables_t kiblnd_tunables = {
153 .kib_dev_failover = &dev_failover,
154 .kib_service = &service,
156 .kib_timeout = &timeout,
157 .kib_keepalive = &keepalive,
159 .kib_credits = &credits,
160 .kib_peertxcredits = &peer_credits,
161 .kib_peercredits_hiw = &peer_credits_hiw,
162 .kib_peerrtrcredits = &peer_buffer_credits,
163 .kib_peertimeout = &peer_timeout,
164 .kib_default_ipif = &ipif_name,
165 .kib_retry_count = &retry_count,
166 .kib_rnr_retry_count = &rnr_retry_count,
167 .kib_concurrent_sends = &concurrent_sends,
168 .kib_ib_mtu = &ib_mtu,
169 .kib_map_on_demand = &map_on_demand,
170 .kib_fmr_pool_size = &fmr_pool_size,
171 .kib_fmr_flush_trigger = &fmr_flush_trigger,
172 .kib_fmr_cache = &fmr_cache,
173 .kib_pmr_pool_size = &pmr_pool_size,
174 .kib_require_priv_port = &require_privileged_port,
175 .kib_use_priv_port = &use_privileged_port,
176 .kib_nscheds = &nscheds
179 #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
181 static char ipif_basename_space[32];
183 #ifndef HAVE_SYSCTL_UNNUMBERED
191 O2IBLND_PEER_TXCREDITS,
192 O2IBLND_PEER_CREDITS_HIW,
193 O2IBLND_PEER_RTRCREDITS,
194 O2IBLND_PEER_TIMEOUT,
195 O2IBLND_IPIF_BASENAME,
197 O2IBLND_RNR_RETRY_COUNT,
199 O2IBLND_CONCURRENT_SENDS,
201 O2IBLND_MAP_ON_DEMAND,
202 O2IBLND_FMR_POOL_SIZE,
203 O2IBLND_FMR_FLUSH_TRIGGER,
205 O2IBLND_PMR_POOL_SIZE,
210 #define O2IBLND_SERVICE CTL_UNNUMBERED
211 #define O2IBLND_CKSUM CTL_UNNUMBERED
212 #define O2IBLND_TIMEOUT CTL_UNNUMBERED
213 #define O2IBLND_NTX CTL_UNNUMBERED
214 #define O2IBLND_CREDITS CTL_UNNUMBERED
215 #define O2IBLND_PEER_TXCREDITS CTL_UNNUMBERED
216 #define O2IBLND_PEER_CREDITS_HIW CTL_UNNUMBERED
217 #define O2IBLND_PEER_RTRCREDITS CTL_UNNUMBERED
218 #define O2IBLND_PEER_TIMEOUT CTL_UNNUMBERED
219 #define O2IBLND_IPIF_BASENAME CTL_UNNUMBERED
220 #define O2IBLND_RETRY_COUNT CTL_UNNUMBERED
221 #define O2IBLND_RNR_RETRY_COUNT CTL_UNNUMBERED
222 #define O2IBLND_KEEPALIVE CTL_UNNUMBERED
223 #define O2IBLND_CONCURRENT_SENDS CTL_UNNUMBERED
224 #define O2IBLND_IB_MTU CTL_UNNUMBERED
225 #define O2IBLND_MAP_ON_DEMAND CTL_UNNUMBERED
226 #define O2IBLND_FMR_POOL_SIZE CTL_UNNUMBERED
227 #define O2IBLND_FMR_FLUSH_TRIGGER CTL_UNNUMBERED
228 #define O2IBLND_FMR_CACHE CTL_UNNUMBERED
229 #define O2IBLND_PMR_POOL_SIZE CTL_UNNUMBERED
230 #define O2IBLND_DEV_FAILOVER CTL_UNNUMBERED
234 static struct ctl_table kiblnd_ctl_table[] = {
236 .ctl_name = O2IBLND_SERVICE,
237 .procname = "service",
239 .maxlen = sizeof(int),
241 .proc_handler = &proc_dointvec
244 .ctl_name = O2IBLND_CKSUM,
247 .maxlen = sizeof(int),
249 .proc_handler = &proc_dointvec
252 .ctl_name = O2IBLND_TIMEOUT,
253 .procname = "timeout",
255 .maxlen = sizeof(int),
257 .proc_handler = &proc_dointvec
260 .ctl_name = O2IBLND_NTX,
263 .maxlen = sizeof(int),
265 .proc_handler = &proc_dointvec
268 .ctl_name = O2IBLND_CREDITS,
269 .procname = "credits",
271 .maxlen = sizeof(int),
273 .proc_handler = &proc_dointvec
276 .ctl_name = O2IBLND_PEER_TXCREDITS,
277 .procname = "peer_credits",
278 .data = &peer_credits,
279 .maxlen = sizeof(int),
281 .proc_handler = &proc_dointvec
284 .ctl_name = O2IBLND_PEER_CREDITS_HIW,
285 .procname = "peer_credits_hiw",
286 .data = &peer_credits_hiw,
287 .maxlen = sizeof(int),
289 .proc_handler = &proc_dointvec
292 .ctl_name = O2IBLND_PEER_RTRCREDITS,
293 .procname = "peer_buffer_credits",
294 .data = &peer_buffer_credits,
295 .maxlen = sizeof(int),
297 .proc_handler = &proc_dointvec
300 .ctl_name = O2IBLND_PEER_TIMEOUT,
301 .procname = "peer_timeout",
302 .data = &peer_timeout,
303 .maxlen = sizeof(int),
305 .proc_handler = &proc_dointvec
308 .ctl_name = O2IBLND_IPIF_BASENAME,
309 .procname = "ipif_name",
310 .data = ipif_basename_space,
311 .maxlen = sizeof(ipif_basename_space),
313 .proc_handler = &proc_dostring
316 .ctl_name = O2IBLND_RETRY_COUNT,
317 .procname = "retry_count",
318 .data = &retry_count,
319 .maxlen = sizeof(int),
321 .proc_handler = &proc_dointvec
324 .ctl_name = O2IBLND_RNR_RETRY_COUNT,
325 .procname = "rnr_retry_count",
326 .data = &rnr_retry_count,
327 .maxlen = sizeof(int),
329 .proc_handler = &proc_dointvec
332 .ctl_name = O2IBLND_KEEPALIVE,
333 .procname = "keepalive",
335 .maxlen = sizeof(int),
337 .proc_handler = &proc_dointvec
340 .ctl_name = O2IBLND_CONCURRENT_SENDS,
341 .procname = "concurrent_sends",
342 .data = &concurrent_sends,
343 .maxlen = sizeof(int),
345 .proc_handler = &proc_dointvec
348 .ctl_name = O2IBLND_IB_MTU,
349 .procname = "ib_mtu",
351 .maxlen = sizeof(int),
353 .proc_handler = &proc_dointvec
356 .ctl_name = O2IBLND_MAP_ON_DEMAND,
357 .procname = "map_on_demand",
358 .data = &map_on_demand,
359 .maxlen = sizeof(int),
361 .proc_handler = &proc_dointvec
365 .ctl_name = O2IBLND_FMR_POOL_SIZE,
366 .procname = "fmr_pool_size",
367 .data = &fmr_pool_size,
368 .maxlen = sizeof(int),
370 .proc_handler = &proc_dointvec
373 .ctl_name = O2IBLND_FMR_FLUSH_TRIGGER,
374 .procname = "fmr_flush_trigger",
375 .data = &fmr_flush_trigger,
376 .maxlen = sizeof(int),
378 .proc_handler = &proc_dointvec
381 .ctl_name = O2IBLND_FMR_CACHE,
382 .procname = "fmr_cache",
384 .maxlen = sizeof(int),
386 .proc_handler = &proc_dointvec
389 .ctl_name = O2IBLND_PMR_POOL_SIZE,
390 .procname = "pmr_pool_size",
391 .data = &pmr_pool_size,
392 .maxlen = sizeof(int),
394 .proc_handler = &proc_dointvec
397 .ctl_name = O2IBLND_DEV_FAILOVER,
398 .procname = "dev_failover",
399 .data = &dev_failover,
400 .maxlen = sizeof(int),
402 .proc_handler = &proc_dointvec
407 static struct ctl_table kiblnd_top_ctl_table[] = {
409 .ctl_name = CTL_O2IBLND,
410 .procname = "o2iblnd",
414 .child = kiblnd_ctl_table
420 kiblnd_initstrtunable(char *space, char *str, int size)
422 strncpy(space, str, size);
427 kiblnd_sysctl_init (void)
429 kiblnd_initstrtunable(ipif_basename_space, ipif_name,
430 sizeof(ipif_basename_space));
432 kiblnd_tunables.kib_sysctl =
433 register_sysctl_table(kiblnd_top_ctl_table);
435 if (kiblnd_tunables.kib_sysctl == NULL)
436 CWARN("Can't setup /proc tunables\n");
440 kiblnd_sysctl_fini (void)
442 if (kiblnd_tunables.kib_sysctl != NULL)
443 unregister_sysctl_table(kiblnd_tunables.kib_sysctl);
449 kiblnd_sysctl_init (void)
454 kiblnd_sysctl_fini (void)
461 kiblnd_tunables_init (void)
463 if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
464 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
465 *kiblnd_tunables.kib_ib_mtu);
469 if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT)
470 *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT;
472 if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX)
473 *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
475 if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
476 *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
478 if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
479 *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
481 if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
482 *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
484 if (*kiblnd_tunables.kib_map_on_demand < 0 ||
485 *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
486 *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
488 if (*kiblnd_tunables.kib_map_on_demand == 1)
489 *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
491 if (*kiblnd_tunables.kib_concurrent_sends == 0) {
492 if (*kiblnd_tunables.kib_map_on_demand > 0 &&
493 *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
494 *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
496 *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
499 if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
500 *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
502 if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
503 *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
505 if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
506 CWARN("Concurrent sends %d is lower than message queue size: %d, "
507 "performance may drop slightly.\n",
508 *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
511 kiblnd_sysctl_init();
516 kiblnd_tunables_fini (void)
518 kiblnd_sysctl_fini();