Whamcloud - gitweb
LU-5718 o2iblnd: multiple sges for work request
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_modparams.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/o2iblnd/o2iblnd_modparams.c
33  *
34  * Author: Eric Barton <eric@bartonsoftware.com>
35  */
36
37 #include "o2iblnd.h"
38
39 static int service = 987;
40 module_param(service, int, 0444);
41 MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
42
43 static int cksum = 0;
44 module_param(cksum, int, 0644);
45 MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
46
47 static int timeout = 50;
48 module_param(timeout, int, 0644);
49 MODULE_PARM_DESC(timeout, "timeout (seconds)");
50
51 /* Number of threads in each scheduler pool which is percpt,
52  * we will estimate reasonable value based on CPUs if it's set to zero. */
53 static int nscheds;
54 module_param(nscheds, int, 0444);
55 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
56
57 /* NB: this value is shared by all CPTs, it can grow at runtime */
58 static int ntx = 512;
59 module_param(ntx, int, 0444);
60 MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
61
62 /* NB: this value is shared by all CPTs */
63 static int credits = 256;
64 module_param(credits, int, 0444);
65 MODULE_PARM_DESC(credits, "# concurrent sends");
66
67 static int peer_credits = 8;
68 module_param(peer_credits, int, 0444);
69 MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
70
71 static int peer_credits_hiw = 0;
72 module_param(peer_credits_hiw, int, 0444);
73 MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
74
75 static int peer_buffer_credits = 0;
76 module_param(peer_buffer_credits, int, 0444);
77 MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
78
79 static int peer_timeout = 180;
80 module_param(peer_timeout, int, 0444);
81 MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
82
83 static char *ipif_name = "ib0";
84 module_param(ipif_name, charp, 0444);
85 MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
86
87 static int retry_count = 5;
88 module_param(retry_count, int, 0644);
89 MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
90
91 static int rnr_retry_count = 6;
92 module_param(rnr_retry_count, int, 0644);
93 MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
94
95 static int keepalive = 100;
96 module_param(keepalive, int, 0644);
97 MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
98
99 static int ib_mtu;
100 module_param(ib_mtu, int, 0444);
101 MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
102
103 static int concurrent_sends;
104 module_param(concurrent_sends, int, 0444);
105 MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
106
107 #ifdef HAVE_IB_GET_DMA_MR
108 #define IBLND_DEFAULT_MAP_ON_DEMAND 0
109 #define IBLND_MIN_MAP_ON_DEMAND 0
110 #else
111 #define IBLND_DEFAULT_MAP_ON_DEMAND IBLND_MAX_RDMA_FRAGS
112 #define IBLND_MIN_MAP_ON_DEMAND 1
113 #endif
114 static int map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND;
115 module_param(map_on_demand, int, 0444);
116 MODULE_PARM_DESC(map_on_demand, "map on demand");
117
118 /* NB: this value is shared by all CPTs, it can grow at runtime */
119 static int fmr_pool_size = 512;
120 module_param(fmr_pool_size, int, 0444);
121 MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
122
123 /* NB: this value is shared by all CPTs, it can grow at runtime */
124 static int fmr_flush_trigger = 384;
125 module_param(fmr_flush_trigger, int, 0444);
126 MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
127
128 static int fmr_cache = 1;
129 module_param(fmr_cache, int, 0444);
130 MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
131
132 /*
133  * 0: disable failover
134  * 1: enable failover if necessary
135  * 2: force to failover (for debug)
136  */
137 static int dev_failover = 0;
138 module_param(dev_failover, int, 0444);
139 MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
140
141 static int require_privileged_port;
142 module_param(require_privileged_port, int, 0644);
143 MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
144
145 static int use_privileged_port = 1;
146 module_param(use_privileged_port, int, 0644);
147 MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
148
149 static unsigned int wrq_sge = 1;
150 module_param(wrq_sge, uint, 0444);
151 MODULE_PARM_DESC(wrq_sge, "# scatter/gather element per work request");
152
153 kib_tunables_t kiblnd_tunables = {
154         .kib_dev_failover           = &dev_failover,
155         .kib_service                = &service,
156         .kib_cksum                  = &cksum,
157         .kib_timeout                = &timeout,
158         .kib_keepalive              = &keepalive,
159         .kib_ntx                    = &ntx,
160         .kib_default_ipif           = &ipif_name,
161         .kib_retry_count            = &retry_count,
162         .kib_rnr_retry_count        = &rnr_retry_count,
163         .kib_ib_mtu                 = &ib_mtu,
164         .kib_require_priv_port      = &require_privileged_port,
165         .kib_use_priv_port          = &use_privileged_port,
166         .kib_nscheds                = &nscheds,
167         .kib_wrq_sge                = &wrq_sge,
168 };
169
170 static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
171
172 /* # messages/RDMAs in-flight */
173 int
174 kiblnd_msg_queue_size(int version, struct lnet_ni *ni)
175 {
176         if (version == IBLND_MSG_VERSION_1)
177                 return IBLND_MSG_QUEUE_SIZE_V1;
178         else if (ni)
179                 return ni->ni_net->net_tunables.lct_peer_tx_credits;
180         else
181                 return peer_credits;
182 }
183
184 int
185 kiblnd_tunables_setup(struct lnet_ni *ni)
186 {
187         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
188         struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
189
190         /*
191          * if there was no tunables specified, setup the tunables to be
192          * defaulted
193          */
194         if (!ni->ni_lnd_tunables_set)
195                 memcpy(&ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib,
196                        &default_tunables, sizeof(*tunables));
197
198         tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
199
200         /* Current API version */
201         tunables->lnd_version = 0;
202
203         if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
204                 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
205                        *kiblnd_tunables.kib_ib_mtu);
206                 return -EINVAL;
207         }
208
209         net_tunables = &ni->ni_net->net_tunables;
210
211         if (net_tunables->lct_peer_timeout == -1)
212                 net_tunables->lct_peer_timeout = peer_timeout;
213
214         if (net_tunables->lct_max_tx_credits == -1)
215                 net_tunables->lct_max_tx_credits = credits;
216
217         if (net_tunables->lct_peer_tx_credits == -1)
218                 net_tunables->lct_peer_tx_credits = peer_credits;
219
220         if (net_tunables->lct_peer_rtr_credits == -1)
221                 net_tunables->lct_peer_rtr_credits = peer_buffer_credits;
222
223         if (net_tunables->lct_peer_tx_credits < IBLND_CREDITS_DEFAULT)
224                 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_DEFAULT;
225
226         if (net_tunables->lct_peer_tx_credits > IBLND_CREDITS_MAX)
227                 net_tunables->lct_peer_tx_credits = IBLND_CREDITS_MAX;
228
229         if (net_tunables->lct_peer_tx_credits >
230             net_tunables->lct_max_tx_credits)
231                 net_tunables->lct_peer_tx_credits =
232                         net_tunables->lct_max_tx_credits;
233
234         if (!tunables->lnd_peercredits_hiw)
235                 tunables->lnd_peercredits_hiw = peer_credits_hiw;
236
237         if (tunables->lnd_peercredits_hiw < net_tunables->lct_peer_tx_credits / 2)
238                 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits / 2;
239
240         if (tunables->lnd_peercredits_hiw >= net_tunables->lct_peer_tx_credits)
241                 tunables->lnd_peercredits_hiw = net_tunables->lct_peer_tx_credits - 1;
242
243         if (tunables->lnd_map_on_demand < IBLND_MIN_MAP_ON_DEMAND ||
244             tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
245                 /* Use the default */
246                 CWARN("Invalid map_on_demand (%d), expects %d - %d. Using default of %d\n",
247                       tunables->lnd_map_on_demand, IBLND_MIN_MAP_ON_DEMAND,
248                       IBLND_MAX_RDMA_FRAGS, IBLND_DEFAULT_MAP_ON_DEMAND);
249                 tunables->lnd_map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND;
250         }
251
252         if (tunables->lnd_map_on_demand == 1) {
253                 /* don't make sense to create map if only one fragment */
254                 tunables->lnd_map_on_demand = 2;
255         }
256
257         if (tunables->lnd_concurrent_sends == 0) {
258                 if (tunables->lnd_map_on_demand > 0 &&
259                     tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) {
260                         tunables->lnd_concurrent_sends =
261                                         net_tunables->lct_peer_tx_credits * 2;
262                 } else {
263                         tunables->lnd_concurrent_sends =
264                                 net_tunables->lct_peer_tx_credits;
265                 }
266         }
267
268         if (tunables->lnd_concurrent_sends > net_tunables->lct_peer_tx_credits * 2)
269                 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits * 2;
270
271         if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits / 2)
272                 tunables->lnd_concurrent_sends = net_tunables->lct_peer_tx_credits / 2;
273
274         if (tunables->lnd_concurrent_sends < net_tunables->lct_peer_tx_credits) {
275                 CWARN("Concurrent sends %d is lower than message "
276                       "queue size: %d, performance may drop slightly.\n",
277                       tunables->lnd_concurrent_sends,
278                       net_tunables->lct_peer_tx_credits);
279         }
280
281         if (!tunables->lnd_fmr_pool_size)
282                 tunables->lnd_fmr_pool_size = fmr_pool_size;
283         if (!tunables->lnd_fmr_flush_trigger)
284                 tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
285         if (!tunables->lnd_fmr_cache)
286                 tunables->lnd_fmr_cache = fmr_cache;
287
288         return 0;
289 }
290
291 int
292 kiblnd_tunables_init(void)
293 {
294         default_tunables.lnd_version = 0;
295         default_tunables.lnd_peercredits_hiw = peer_credits_hiw,
296         default_tunables.lnd_map_on_demand = map_on_demand;
297         default_tunables.lnd_concurrent_sends = concurrent_sends;
298         default_tunables.lnd_fmr_pool_size = fmr_pool_size;
299         default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
300         default_tunables.lnd_fmr_cache = fmr_cache;
301         return 0;
302 }