Whamcloud - gitweb
ad393636449488dc94965e64c9a2fe0b97bfe92b
[fs/lustre-release.git] / lnet / lnet / api-ni.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
36 #include <linux/moduleparam.h>
37
38 #include <lnet/lib-lnet.h>
39
40 #define D_LNI D_CONSOLE
41
42 /*
43  * initialize ln_api_mutex statically, since it needs to be used in
44  * discovery_set callback. That module parameter callback can be called
45  * before module init completes. The mutex needs to be ready for use then.
46  */
47 struct lnet the_lnet = {
48         .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
49 };              /* THE state of the network */
50 EXPORT_SYMBOL(the_lnet);
51
52 static char *ip2nets = "";
53 module_param(ip2nets, charp, 0444);
54 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
55
56 static char *networks = "";
57 module_param(networks, charp, 0444);
58 MODULE_PARM_DESC(networks, "local networks");
59
60 static char *routes = "";
61 module_param(routes, charp, 0444);
62 MODULE_PARM_DESC(routes, "routes to non-local networks");
63
64 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
65 module_param(rnet_htable_size, int, 0444);
66 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
67
68 static int use_tcp_bonding = false;
69 module_param(use_tcp_bonding, int, 0444);
70 MODULE_PARM_DESC(use_tcp_bonding,
71                  "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
72
73 unsigned int lnet_numa_range = 0;
74 module_param(lnet_numa_range, uint, 0444);
75 MODULE_PARM_DESC(lnet_numa_range,
76                 "NUMA range to consider during Multi-Rail selection");
77
78 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
79 static int intf_max_set(const char *val, struct kernel_param *kp);
80 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
81                   &lnet_interfaces_max, S_IRUGO|S_IWUSR);
82 MODULE_PARM_DESC(lnet_interfaces_max,
83                 "Maximum number of interfaces in a node.");
84
85 unsigned lnet_peer_discovery_disabled = 0;
86 static int discovery_set(const char *val, struct kernel_param *kp);
87 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
88                   &lnet_peer_discovery_disabled, S_IRUGO|S_IWUSR);
89 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
90                 "Set to 1 to disable peer discovery on this node.");
91
92 /*
93  * This sequence number keeps track of how many times DLC was used to
94  * update the local NIs. It is incremented when a NI is added or
95  * removed and checked when sending a message to determine if there is
96  * a need to re-run the selection algorithm. See lnet_select_pathway()
97  * for more details on its usage.
98  */
99 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
100
101 static int lnet_ping(struct lnet_process_id id, signed long timeout,
102                      struct lnet_process_id __user *ids, int n_ids);
103
104 static int lnet_discover(lnet_process_id_t id, __u32 force,
105                          lnet_process_id_t __user *ids, int n_ids);
106
107 static int
108 discovery_set(const char *val, struct kernel_param *kp)
109 {
110         int rc;
111         unsigned *discovery = (unsigned *)kp->arg;
112         unsigned long value;
113         struct lnet_ping_buffer *pbuf;
114
115         rc = kstrtoul(val, 0, &value);
116         if (rc) {
117                 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
118                 return rc;
119         }
120
121         value = (value) ? 1 : 0;
122
123         /*
124          * The purpose of locking the api_mutex here is to ensure that
125          * the correct value ends up stored properly.
126          */
127         mutex_lock(&the_lnet.ln_api_mutex);
128
129         if (value == *discovery) {
130                 mutex_unlock(&the_lnet.ln_api_mutex);
131                 return 0;
132         }
133
134         *discovery = value;
135
136         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
137                 mutex_unlock(&the_lnet.ln_api_mutex);
138                 return 0;
139         }
140
141         /* tell peers that discovery setting has changed */
142         lnet_net_lock(LNET_LOCK_EX);
143         pbuf = the_lnet.ln_ping_target;
144         if (value)
145                 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
146         else
147                 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
148         lnet_net_unlock(LNET_LOCK_EX);
149
150         lnet_push_update_to_peers(1);
151
152         mutex_unlock(&the_lnet.ln_api_mutex);
153
154         return 0;
155 }
156
157 static int
158 intf_max_set(const char *val, struct kernel_param *kp)
159 {
160         int value, rc;
161
162         rc = kstrtoint(val, 0, &value);
163         if (rc) {
164                 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
165                 return rc;
166         }
167
168         if (value < LNET_INTERFACES_MIN) {
169                 CWARN("max interfaces provided are too small, setting to %d\n",
170                       LNET_INTERFACES_MIN);
171                 value = LNET_INTERFACES_MIN;
172         }
173
174         *(int *)kp->arg = value;
175
176         return 0;
177 }
178
179 static char *
180 lnet_get_routes(void)
181 {
182         return routes;
183 }
184
185 static char *
186 lnet_get_networks(void)
187 {
188         char   *nets;
189         int     rc;
190
191         if (*networks != 0 && *ip2nets != 0) {
192                 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
193                                    "'ip2nets' but not both at once\n");
194                 return NULL;
195         }
196
197         if (*ip2nets != 0) {
198                 rc = lnet_parse_ip2nets(&nets, ip2nets);
199                 return (rc == 0) ? nets : NULL;
200         }
201
202         if (*networks != 0)
203                 return networks;
204
205         return "tcp";
206 }
207
208 static void
209 lnet_init_locks(void)
210 {
211         spin_lock_init(&the_lnet.ln_eq_wait_lock);
212         init_waitqueue_head(&the_lnet.ln_eq_waitq);
213         init_waitqueue_head(&the_lnet.ln_rc_waitq);
214         mutex_init(&the_lnet.ln_lnd_mutex);
215 }
216
217 static void
218 lnet_fini_locks(void)
219 {
220 }
221
222 struct kmem_cache *lnet_mes_cachep;        /* MEs kmem_cache */
223 struct kmem_cache *lnet_small_mds_cachep;  /* <= LNET_SMALL_MD_SIZE bytes
224                                             *  MDs kmem_cache */
225
226 static int
227 lnet_descriptor_setup(void)
228 {
229         /* create specific kmem_cache for MEs and small MDs (i.e., originally
230          * allocated in <size-xxx> kmem_cache).
231          */
232         lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
233                                             0, 0, NULL);
234         if (!lnet_mes_cachep)
235                 return -ENOMEM;
236
237         lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
238                                                   LNET_SMALL_MD_SIZE, 0, 0,
239                                                   NULL);
240         if (!lnet_small_mds_cachep)
241                 return -ENOMEM;
242
243         return 0;
244 }
245
246 static void
247 lnet_descriptor_cleanup(void)
248 {
249
250         if (lnet_small_mds_cachep) {
251                 kmem_cache_destroy(lnet_small_mds_cachep);
252                 lnet_small_mds_cachep = NULL;
253         }
254
255         if (lnet_mes_cachep) {
256                 kmem_cache_destroy(lnet_mes_cachep);
257                 lnet_mes_cachep = NULL;
258         }
259 }
260
261 static int
262 lnet_create_remote_nets_table(void)
263 {
264         int               i;
265         struct list_head *hash;
266
267         LASSERT(the_lnet.ln_remote_nets_hash == NULL);
268         LASSERT(the_lnet.ln_remote_nets_hbits > 0);
269         LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
270         if (hash == NULL) {
271                 CERROR("Failed to create remote nets hash table\n");
272                 return -ENOMEM;
273         }
274
275         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
276                 INIT_LIST_HEAD(&hash[i]);
277         the_lnet.ln_remote_nets_hash = hash;
278         return 0;
279 }
280
281 static void
282 lnet_destroy_remote_nets_table(void)
283 {
284         int i;
285
286         if (the_lnet.ln_remote_nets_hash == NULL)
287                 return;
288
289         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
290                 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
291
292         LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
293                     LNET_REMOTE_NETS_HASH_SIZE *
294                     sizeof(the_lnet.ln_remote_nets_hash[0]));
295         the_lnet.ln_remote_nets_hash = NULL;
296 }
297
298 static void
299 lnet_destroy_locks(void)
300 {
301         if (the_lnet.ln_res_lock != NULL) {
302                 cfs_percpt_lock_free(the_lnet.ln_res_lock);
303                 the_lnet.ln_res_lock = NULL;
304         }
305
306         if (the_lnet.ln_net_lock != NULL) {
307                 cfs_percpt_lock_free(the_lnet.ln_net_lock);
308                 the_lnet.ln_net_lock = NULL;
309         }
310
311         lnet_fini_locks();
312 }
313
314 static int
315 lnet_create_locks(void)
316 {
317         lnet_init_locks();
318
319         the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
320         if (the_lnet.ln_res_lock == NULL)
321                 goto failed;
322
323         the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
324         if (the_lnet.ln_net_lock == NULL)
325                 goto failed;
326
327         return 0;
328
329  failed:
330         lnet_destroy_locks();
331         return -ENOMEM;
332 }
333
334 static void lnet_assert_wire_constants(void)
335 {
336         /* Wire protocol assertions generated by 'wirecheck'
337          * running on Linux robert.bartonsoftware.com 2.6.8-1.521
338          * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
339          * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
340
341         /* Constants... */
342         CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
343         CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
344         CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
345         CLASSERT(LNET_MSG_ACK == 0);
346         CLASSERT(LNET_MSG_PUT == 1);
347         CLASSERT(LNET_MSG_GET == 2);
348         CLASSERT(LNET_MSG_REPLY == 3);
349         CLASSERT(LNET_MSG_HELLO == 4);
350
351         /* Checks for struct lnet_handle_wire */
352         CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
353         CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
354         CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
355         CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
356         CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
357
358         /* Checks for struct struct lnet_magicversion */
359         CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
360         CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
361         CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
362         CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
363         CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
364         CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
365         CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
366
367         /* Checks for struct struct lnet_hdr */
368         CLASSERT((int)sizeof(struct lnet_hdr) == 72);
369         CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
370         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
371         CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
372         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
373         CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
374         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
375         CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
376         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
377         CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
378         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
379         CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
380         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
381         CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
382         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
383
384         /* Ack */
385         CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
386         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
387         CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
388         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
389         CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
390         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
391
392         /* Put */
393         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
394         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
395         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
396         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
397         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
398         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
399         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
400         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
401         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
402         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
403
404         /* Get */
405         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
406         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
407         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
408         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
409         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
410         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
411         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
412         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
413         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
414         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
415
416         /* Reply */
417         CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
418         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
419
420         /* Hello */
421         CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
422         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
423         CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
424         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
425
426         /* Checks for struct lnet_ni_status and related constants */
427         CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
428         CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
429         CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
430
431         /* Checks for struct lnet_ni_status */
432         CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
433         CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
434         CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
435         CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
436         CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
437         CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
438         CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
439
440         /* Checks for struct lnet_ping_info and related constants */
441         CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
442         CLASSERT(LNET_PING_FEAT_INVAL == 0);
443         CLASSERT(LNET_PING_FEAT_BASE == 1);
444         CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
445         CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
446         CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
447         CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
448         CLASSERT(LNET_PING_FEAT_BITS == 31);
449
450         /* Checks for struct lnet_ping_info */
451         CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
452         CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
453         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
454         CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
455         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
456         CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
457         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
458         CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
459         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
460         CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
461         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
462 }
463
464 static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
465 {
466         struct lnet_lnd *lnd;
467         struct list_head *tmp;
468
469         /* holding lnd mutex */
470         list_for_each(tmp, &the_lnet.ln_lnds) {
471                 lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
472
473                 if (lnd->lnd_type == type)
474                         return lnd;
475         }
476         return NULL;
477 }
478
479 void
480 lnet_register_lnd(struct lnet_lnd *lnd)
481 {
482         mutex_lock(&the_lnet.ln_lnd_mutex);
483
484         LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
485         LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
486
487         list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
488         lnd->lnd_refcount = 0;
489
490         CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
491
492         mutex_unlock(&the_lnet.ln_lnd_mutex);
493 }
494 EXPORT_SYMBOL(lnet_register_lnd);
495
496 void
497 lnet_unregister_lnd(struct lnet_lnd *lnd)
498 {
499         mutex_lock(&the_lnet.ln_lnd_mutex);
500
501         LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
502         LASSERT(lnd->lnd_refcount == 0);
503
504         list_del(&lnd->lnd_list);
505         CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
506
507         mutex_unlock(&the_lnet.ln_lnd_mutex);
508 }
509 EXPORT_SYMBOL(lnet_unregister_lnd);
510
511 void
512 lnet_counters_get(struct lnet_counters *counters)
513 {
514         struct lnet_counters *ctr;
515         int             i;
516
517         memset(counters, 0, sizeof(*counters));
518
519         lnet_net_lock(LNET_LOCK_EX);
520
521         cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
522                 counters->msgs_max     += ctr->msgs_max;
523                 counters->msgs_alloc   += ctr->msgs_alloc;
524                 counters->errors       += ctr->errors;
525                 counters->send_count   += ctr->send_count;
526                 counters->recv_count   += ctr->recv_count;
527                 counters->route_count  += ctr->route_count;
528                 counters->drop_count   += ctr->drop_count;
529                 counters->send_length  += ctr->send_length;
530                 counters->recv_length  += ctr->recv_length;
531                 counters->route_length += ctr->route_length;
532                 counters->drop_length  += ctr->drop_length;
533
534         }
535         lnet_net_unlock(LNET_LOCK_EX);
536 }
537 EXPORT_SYMBOL(lnet_counters_get);
538
539 void
540 lnet_counters_reset(void)
541 {
542         struct lnet_counters *counters;
543         int             i;
544
545         lnet_net_lock(LNET_LOCK_EX);
546
547         cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
548                 memset(counters, 0, sizeof(struct lnet_counters));
549
550         lnet_net_unlock(LNET_LOCK_EX);
551 }
552
553 static char *
554 lnet_res_type2str(int type)
555 {
556         switch (type) {
557         default:
558                 LBUG();
559         case LNET_COOKIE_TYPE_MD:
560                 return "MD";
561         case LNET_COOKIE_TYPE_ME:
562                 return "ME";
563         case LNET_COOKIE_TYPE_EQ:
564                 return "EQ";
565         }
566 }
567
568 static void
569 lnet_res_container_cleanup(struct lnet_res_container *rec)
570 {
571         int     count = 0;
572
573         if (rec->rec_type == 0) /* not set yet, it's uninitialized */
574                 return;
575
576         while (!list_empty(&rec->rec_active)) {
577                 struct list_head *e = rec->rec_active.next;
578
579                 list_del_init(e);
580                 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
581                         lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
582
583                 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
584                         lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
585
586                 } else { /* NB: Active MEs should be attached on portals */
587                         LBUG();
588                 }
589                 count++;
590         }
591
592         if (count > 0) {
593                 /* Found alive MD/ME/EQ, user really should unlink/free
594                  * all of them before finalize LNet, but if someone didn't,
595                  * we have to recycle garbage for him */
596                 CERROR("%d active elements on exit of %s container\n",
597                        count, lnet_res_type2str(rec->rec_type));
598         }
599
600         if (rec->rec_lh_hash != NULL) {
601                 LIBCFS_FREE(rec->rec_lh_hash,
602                             LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
603                 rec->rec_lh_hash = NULL;
604         }
605
606         rec->rec_type = 0; /* mark it as finalized */
607 }
608
609 static int
610 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
611 {
612         int     rc = 0;
613         int     i;
614
615         LASSERT(rec->rec_type == 0);
616
617         rec->rec_type = type;
618         INIT_LIST_HEAD(&rec->rec_active);
619
620         rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
621
622         /* Arbitrary choice of hash table size */
623         LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
624                          LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
625         if (rec->rec_lh_hash == NULL) {
626                 rc = -ENOMEM;
627                 goto out;
628         }
629
630         for (i = 0; i < LNET_LH_HASH_SIZE; i++)
631                 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
632
633         return 0;
634
635 out:
636         CERROR("Failed to setup %s resource container\n",
637                lnet_res_type2str(type));
638         lnet_res_container_cleanup(rec);
639         return rc;
640 }
641
642 static void
643 lnet_res_containers_destroy(struct lnet_res_container **recs)
644 {
645         struct lnet_res_container       *rec;
646         int                             i;
647
648         cfs_percpt_for_each(rec, i, recs)
649                 lnet_res_container_cleanup(rec);
650
651         cfs_percpt_free(recs);
652 }
653
654 static struct lnet_res_container **
655 lnet_res_containers_create(int type)
656 {
657         struct lnet_res_container       **recs;
658         struct lnet_res_container       *rec;
659         int                             rc;
660         int                             i;
661
662         recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
663         if (recs == NULL) {
664                 CERROR("Failed to allocate %s resource containers\n",
665                        lnet_res_type2str(type));
666                 return NULL;
667         }
668
669         cfs_percpt_for_each(rec, i, recs) {
670                 rc = lnet_res_container_setup(rec, i, type);
671                 if (rc != 0) {
672                         lnet_res_containers_destroy(recs);
673                         return NULL;
674                 }
675         }
676
677         return recs;
678 }
679
680 struct lnet_libhandle *
681 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
682 {
683         /* ALWAYS called with lnet_res_lock held */
684         struct list_head        *head;
685         struct lnet_libhandle   *lh;
686         unsigned int            hash;
687
688         if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
689                 return NULL;
690
691         hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
692         head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
693
694         list_for_each_entry(lh, head, lh_hash_chain) {
695                 if (lh->lh_cookie == cookie)
696                         return lh;
697         }
698
699         return NULL;
700 }
701
702 void
703 lnet_res_lh_initialize(struct lnet_res_container *rec,
704                        struct lnet_libhandle *lh)
705 {
706         /* ALWAYS called with lnet_res_lock held */
707         unsigned int    ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
708         unsigned int    hash;
709
710         lh->lh_cookie = rec->rec_lh_cookie;
711         rec->rec_lh_cookie += 1 << ibits;
712
713         hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
714
715         list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
716 }
717
718 static int lnet_unprepare(void);
719
720 static int
721 lnet_prepare(lnet_pid_t requested_pid)
722 {
723         /* Prepare to bring up the network */
724         struct lnet_res_container **recs;
725         int                       rc = 0;
726
727         if (requested_pid == LNET_PID_ANY) {
728                 /* Don't instantiate LNET just for me */
729                 return -ENETDOWN;
730         }
731
732         LASSERT(the_lnet.ln_refcount == 0);
733
734         the_lnet.ln_routing = 0;
735
736         LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
737         the_lnet.ln_pid = requested_pid;
738
739         INIT_LIST_HEAD(&the_lnet.ln_test_peers);
740         INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
741         INIT_LIST_HEAD(&the_lnet.ln_nets);
742         INIT_LIST_HEAD(&the_lnet.ln_routers);
743         INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
744         INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
745         INIT_LIST_HEAD(&the_lnet.ln_dc_request);
746         INIT_LIST_HEAD(&the_lnet.ln_dc_working);
747         INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
748         init_waitqueue_head(&the_lnet.ln_dc_waitq);
749
750         rc = lnet_descriptor_setup();
751         if (rc != 0)
752                 goto failed;
753
754         rc = lnet_create_remote_nets_table();
755         if (rc != 0)
756                 goto failed;
757
758         /*
759          * NB the interface cookie in wire handles guards against delayed
760          * replies and ACKs appearing valid after reboot.
761          */
762         the_lnet.ln_interface_cookie = ktime_get_real_ns();
763
764         the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
765                                                 sizeof(struct lnet_counters));
766         if (the_lnet.ln_counters == NULL) {
767                 CERROR("Failed to allocate counters for LNet\n");
768                 rc = -ENOMEM;
769                 goto failed;
770         }
771
772         rc = lnet_peer_tables_create();
773         if (rc != 0)
774                 goto failed;
775
776         rc = lnet_msg_containers_create();
777         if (rc != 0)
778                 goto failed;
779
780         rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
781                                       LNET_COOKIE_TYPE_EQ);
782         if (rc != 0)
783                 goto failed;
784
785         recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
786         if (recs == NULL) {
787                 rc = -ENOMEM;
788                 goto failed;
789         }
790
791         the_lnet.ln_me_containers = recs;
792
793         recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
794         if (recs == NULL) {
795                 rc = -ENOMEM;
796                 goto failed;
797         }
798
799         the_lnet.ln_md_containers = recs;
800
801         rc = lnet_portals_create();
802         if (rc != 0) {
803                 CERROR("Failed to create portals for LNet: %d\n", rc);
804                 goto failed;
805         }
806
807         return 0;
808
809  failed:
810         lnet_unprepare();
811         return rc;
812 }
813
814 static int
815 lnet_unprepare (void)
816 {
817         /* NB no LNET_LOCK since this is the last reference.  All LND instances
818          * have shut down already, so it is safe to unlink and free all
819          * descriptors, even those that appear committed to a network op (eg MD
820          * with non-zero pending count) */
821
822         lnet_fail_nid(LNET_NID_ANY, 0);
823
824         LASSERT(the_lnet.ln_refcount == 0);
825         LASSERT(list_empty(&the_lnet.ln_test_peers));
826         LASSERT(list_empty(&the_lnet.ln_nets));
827
828         lnet_portals_destroy();
829
830         if (the_lnet.ln_md_containers != NULL) {
831                 lnet_res_containers_destroy(the_lnet.ln_md_containers);
832                 the_lnet.ln_md_containers = NULL;
833         }
834
835         if (the_lnet.ln_me_containers != NULL) {
836                 lnet_res_containers_destroy(the_lnet.ln_me_containers);
837                 the_lnet.ln_me_containers = NULL;
838         }
839
840         lnet_res_container_cleanup(&the_lnet.ln_eq_container);
841
842         lnet_msg_containers_destroy();
843         lnet_peer_uninit();
844         lnet_rtrpools_free(0);
845
846         if (the_lnet.ln_counters != NULL) {
847                 cfs_percpt_free(the_lnet.ln_counters);
848                 the_lnet.ln_counters = NULL;
849         }
850         lnet_destroy_remote_nets_table();
851         lnet_descriptor_cleanup();
852
853         return 0;
854 }
855
856 struct lnet_ni  *
857 lnet_net2ni_locked(__u32 net_id, int cpt)
858 {
859         struct lnet_ni   *ni;
860         struct lnet_net  *net;
861
862         LASSERT(cpt != LNET_LOCK_EX);
863
864         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
865                 if (net->net_id == net_id) {
866                         ni = list_entry(net->net_ni_list.next, struct lnet_ni,
867                                         ni_netlist);
868                         return ni;
869                 }
870         }
871
872         return NULL;
873 }
874
875 struct lnet_ni *
876 lnet_net2ni_addref(__u32 net)
877 {
878         struct lnet_ni *ni;
879
880         lnet_net_lock(0);
881         ni = lnet_net2ni_locked(net, 0);
882         if (ni)
883                 lnet_ni_addref_locked(ni, 0);
884         lnet_net_unlock(0);
885
886         return ni;
887 }
888 EXPORT_SYMBOL(lnet_net2ni_addref);
889
890 struct lnet_net *
891 lnet_get_net_locked(__u32 net_id)
892 {
893         struct lnet_net  *net;
894
895         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
896                 if (net->net_id == net_id)
897                         return net;
898         }
899
900         return NULL;
901 }
902
903 unsigned int
904 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
905 {
906         __u64           key = nid;
907         unsigned int    val;
908
909         LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
910
911         if (number == 1)
912                 return 0;
913
914         val = hash_long(key, LNET_CPT_BITS);
915         /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
916         if (val < number)
917                 return val;
918
919         return (unsigned int)(key + val + (val >> 1)) % number;
920 }
921
922 int
923 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
924 {
925         struct lnet_net *net;
926
927         /* must called with hold of lnet_net_lock */
928         if (LNET_CPT_NUMBER == 1)
929                 return 0; /* the only one */
930
931         /*
932          * If NI is provided then use the CPT identified in the NI cpt
933          * list if one exists. If one doesn't exist, then that NI is
934          * associated with all CPTs and it follows that the net it belongs
935          * to is implicitly associated with all CPTs, so just hash the nid
936          * and return that.
937          */
938         if (ni != NULL) {
939                 if (ni->ni_cpts != NULL)
940                         return ni->ni_cpts[lnet_nid_cpt_hash(nid,
941                                                              ni->ni_ncpts)];
942                 else
943                         return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
944         }
945
946         /* no NI provided so look at the net */
947         net = lnet_get_net_locked(LNET_NIDNET(nid));
948
949         if (net != NULL && net->net_cpts != NULL) {
950                 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
951         }
952
953         return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
954 }
955
956 int
957 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
958 {
959         int     cpt;
960         int     cpt2;
961
962         if (LNET_CPT_NUMBER == 1)
963                 return 0; /* the only one */
964
965         cpt = lnet_net_lock_current();
966
967         cpt2 = lnet_cpt_of_nid_locked(nid, ni);
968
969         lnet_net_unlock(cpt);
970
971         return cpt2;
972 }
973 EXPORT_SYMBOL(lnet_cpt_of_nid);
974
975 int
976 lnet_islocalnet(__u32 net_id)
977 {
978         struct lnet_net *net;
979         int             cpt;
980         bool            local;
981
982         cpt = lnet_net_lock_current();
983
984         net = lnet_get_net_locked(net_id);
985
986         local = net != NULL;
987
988         lnet_net_unlock(cpt);
989
990         return local;
991 }
992
993 bool
994 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
995 {
996         if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
997             ni->ni_state == LNET_NI_STATE_DEGRADED)
998                 return true;
999
1000         return false;
1001 }
1002
1003 struct lnet_ni  *
1004 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1005 {
1006         struct lnet_net  *net;
1007         struct lnet_ni   *ni;
1008
1009         LASSERT(cpt != LNET_LOCK_EX);
1010
1011         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1012                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1013                         if (ni->ni_nid == nid)
1014                                 return ni;
1015                 }
1016         }
1017
1018         return NULL;
1019 }
1020
1021 struct lnet_ni *
1022 lnet_nid2ni_addref(lnet_nid_t nid)
1023 {
1024         struct lnet_ni *ni;
1025
1026         lnet_net_lock(0);
1027         ni = lnet_nid2ni_locked(nid, 0);
1028         if (ni)
1029                 lnet_ni_addref_locked(ni, 0);
1030         lnet_net_unlock(0);
1031
1032         return ni;
1033 }
1034 EXPORT_SYMBOL(lnet_nid2ni_addref);
1035
1036 int
1037 lnet_islocalnid(lnet_nid_t nid)
1038 {
1039         struct lnet_ni  *ni;
1040         int             cpt;
1041
1042         cpt = lnet_net_lock_current();
1043         ni = lnet_nid2ni_locked(nid, cpt);
1044         lnet_net_unlock(cpt);
1045
1046         return ni != NULL;
1047 }
1048
1049 int
1050 lnet_count_acceptor_nets(void)
1051 {
1052         /* Return the # of NIs that need the acceptor. */
1053         int              count = 0;
1054         struct lnet_net  *net;
1055         int              cpt;
1056
1057         cpt = lnet_net_lock_current();
1058         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1059                 /* all socklnd type networks should have the acceptor
1060                  * thread started */
1061                 if (net->net_lnd->lnd_accept != NULL)
1062                         count++;
1063         }
1064
1065         lnet_net_unlock(cpt);
1066
1067         return count;
1068 }
1069
1070 struct lnet_ping_buffer *
1071 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1072 {
1073         struct lnet_ping_buffer *pbuf;
1074
1075         LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1076         if (pbuf) {
1077                 pbuf->pb_nnis = nnis;
1078                 atomic_set(&pbuf->pb_refcnt, 1);
1079         }
1080
1081         return pbuf;
1082 }
1083
1084 void
1085 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1086 {
1087         LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
1088         LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1089 }
1090
1091 static struct lnet_ping_buffer *
1092 lnet_ping_target_create(int nnis)
1093 {
1094         struct lnet_ping_buffer *pbuf;
1095
1096         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1097         if (pbuf == NULL) {
1098                 CERROR("Can't allocate ping source [%d]\n", nnis);
1099                 return NULL;
1100         }
1101
1102         pbuf->pb_info.pi_nnis = nnis;
1103         pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1104         pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1105         pbuf->pb_info.pi_features =
1106                 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1107
1108         return pbuf;
1109 }
1110
1111 static inline int
1112 lnet_get_net_ni_count_locked(struct lnet_net *net)
1113 {
1114         struct lnet_ni  *ni;
1115         int             count = 0;
1116
1117         list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1118                 count++;
1119
1120         return count;
1121 }
1122
1123 static inline int
1124 lnet_get_net_ni_count_pre(struct lnet_net *net)
1125 {
1126         struct lnet_ni  *ni;
1127         int             count = 0;
1128
1129         list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1130                 count++;
1131
1132         return count;
1133 }
1134
1135 static inline int
1136 lnet_get_ni_count(void)
1137 {
1138         struct lnet_ni  *ni;
1139         struct lnet_net *net;
1140         int             count = 0;
1141
1142         lnet_net_lock(0);
1143
1144         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1145                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1146                         count++;
1147         }
1148
1149         lnet_net_unlock(0);
1150
1151         return count;
1152 }
1153
1154 int
1155 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1156 {
1157         if (!pinfo)
1158                 return -EINVAL;
1159         if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1160                 return -EPROTO;
1161         if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1162                 return -EPROTO;
1163         /* Loopback is guaranteed to be present */
1164         if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1165                 return -ERANGE;
1166         if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1167                 return -EPROTO;
1168         return 0;
1169 }
1170
1171 static void
1172 lnet_ping_target_destroy(void)
1173 {
1174         struct lnet_net *net;
1175         struct lnet_ni  *ni;
1176
1177         lnet_net_lock(LNET_LOCK_EX);
1178
1179         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1180                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1181                         lnet_ni_lock(ni);
1182                         ni->ni_status = NULL;
1183                         lnet_ni_unlock(ni);
1184                 }
1185         }
1186
1187         lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1188         the_lnet.ln_ping_target = NULL;
1189
1190         lnet_net_unlock(LNET_LOCK_EX);
1191 }
1192
1193 static void
1194 lnet_ping_target_event_handler(struct lnet_event *event)
1195 {
1196         struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1197
1198         if (event->unlinked)
1199                 lnet_ping_buffer_decref(pbuf);
1200 }
1201
1202 static int
1203 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1204                        struct lnet_handle_md *ping_mdh,
1205                        int ni_count, bool set_eq)
1206 {
1207         struct lnet_process_id id = {
1208                 .nid = LNET_NID_ANY,
1209                 .pid = LNET_PID_ANY
1210         };
1211         struct lnet_handle_me me_handle;
1212         struct lnet_md md = { NULL };
1213         int rc, rc2;
1214
1215         if (set_eq) {
1216                 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1217                                  &the_lnet.ln_ping_target_eq);
1218                 if (rc != 0) {
1219                         CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1220                         return rc;
1221                 }
1222         }
1223
1224         *ppbuf = lnet_ping_target_create(ni_count);
1225         if (*ppbuf == NULL) {
1226                 rc = -ENOMEM;
1227                 goto fail_free_eq;
1228         }
1229
1230         /* Ping target ME/MD */
1231         rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1232                           LNET_PROTO_PING_MATCHBITS, 0,
1233                           LNET_UNLINK, LNET_INS_AFTER,
1234                           &me_handle);
1235         if (rc != 0) {
1236                 CERROR("Can't create ping target ME: %d\n", rc);
1237                 goto fail_decref_ping_buffer;
1238         }
1239
1240         /* initialize md content */
1241         md.start     = &(*ppbuf)->pb_info;
1242         md.length    = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1243         md.threshold = LNET_MD_THRESH_INF;
1244         md.max_size  = 0;
1245         md.options   = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1246                        LNET_MD_MANAGE_REMOTE;
1247         md.eq_handle = the_lnet.ln_ping_target_eq;
1248         md.user_ptr  = *ppbuf;
1249
1250         rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1251         if (rc != 0) {
1252                 CERROR("Can't attach ping target MD: %d\n", rc);
1253                 goto fail_unlink_ping_me;
1254         }
1255         lnet_ping_buffer_addref(*ppbuf);
1256
1257         return 0;
1258
1259 fail_unlink_ping_me:
1260         rc2 = LNetMEUnlink(me_handle);
1261         LASSERT(rc2 == 0);
1262 fail_decref_ping_buffer:
1263         LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1264         lnet_ping_buffer_decref(*ppbuf);
1265         *ppbuf = NULL;
1266 fail_free_eq:
1267         if (set_eq) {
1268                 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1269                 LASSERT(rc2 == 0);
1270         }
1271         return rc;
1272 }
1273
1274 static void
1275 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1276                     struct lnet_handle_md *ping_mdh)
1277 {
1278         sigset_t        blocked = cfs_block_allsigs();
1279
1280         LNetMDUnlink(*ping_mdh);
1281         LNetInvalidateMDHandle(ping_mdh);
1282
1283         /* NB the MD could be busy; this just starts the unlink */
1284         while (lnet_ping_buffer_numref(pbuf) > 1) {
1285                 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1286                 set_current_state(TASK_UNINTERRUPTIBLE);
1287                 schedule_timeout(cfs_time_seconds(1));
1288         }
1289
1290         cfs_restore_sigs(blocked);
1291 }
1292
1293 static void
1294 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1295 {
1296         struct lnet_ni          *ni;
1297         struct lnet_net         *net;
1298         struct lnet_ni_status *ns;
1299         int                     i;
1300         int                     rc;
1301
1302         i = 0;
1303         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1304                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1305                         LASSERT(i < pbuf->pb_nnis);
1306
1307                         ns = &pbuf->pb_info.pi_ni[i];
1308
1309                         ns->ns_nid = ni->ni_nid;
1310
1311                         lnet_ni_lock(ni);
1312                         ns->ns_status = (ni->ni_status != NULL) ?
1313                                          ni->ni_status->ns_status :
1314                                                 LNET_NI_STATUS_UP;
1315                         ni->ni_status = ns;
1316                         lnet_ni_unlock(ni);
1317
1318                         i++;
1319                 }
1320         }
1321         /*
1322          * We (ab)use the ns_status of the loopback interface to
1323          * transmit the sequence number. The first interface listed
1324          * must be the loopback interface.
1325          */
1326         rc = lnet_ping_info_validate(&pbuf->pb_info);
1327         if (rc) {
1328                 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1329                 LBUG();
1330         }
1331         LNET_PING_BUFFER_SEQNO(pbuf) =
1332                 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1333 }
1334
1335 static void
1336 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1337                         struct lnet_handle_md ping_mdh)
1338 {
1339         struct lnet_ping_buffer *old_pbuf = NULL;
1340         struct lnet_handle_md old_ping_md;
1341
1342         /* switch the NIs to point to the new ping info created */
1343         lnet_net_lock(LNET_LOCK_EX);
1344
1345         if (!the_lnet.ln_routing)
1346                 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1347         if (!lnet_peer_discovery_disabled)
1348                 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1349
1350         /* Ensure only known feature bits have been set. */
1351         LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1352         LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1353
1354         lnet_ping_target_install_locked(pbuf);
1355
1356         if (the_lnet.ln_ping_target) {
1357                 old_pbuf = the_lnet.ln_ping_target;
1358                 old_ping_md = the_lnet.ln_ping_target_md;
1359         }
1360         the_lnet.ln_ping_target_md = ping_mdh;
1361         the_lnet.ln_ping_target = pbuf;
1362
1363         lnet_net_unlock(LNET_LOCK_EX);
1364
1365         if (old_pbuf) {
1366                 /* unlink and free the old ping info */
1367                 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1368                 lnet_ping_buffer_decref(old_pbuf);
1369         }
1370
1371         lnet_push_update_to_peers(0);
1372 }
1373
1374 static void
1375 lnet_ping_target_fini(void)
1376 {
1377         int             rc;
1378
1379         lnet_ping_md_unlink(the_lnet.ln_ping_target,
1380                             &the_lnet.ln_ping_target_md);
1381
1382         rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1383         LASSERT(rc == 0);
1384
1385         lnet_ping_target_destroy();
1386 }
1387
1388 /* Resize the push target. */
1389 int lnet_push_target_resize(void)
1390 {
1391         lnet_process_id_t id = { LNET_NID_ANY, LNET_PID_ANY };
1392         lnet_md_t md = { NULL };
1393         lnet_handle_me_t meh;
1394         lnet_handle_md_t mdh;
1395         lnet_handle_md_t old_mdh;
1396         struct lnet_ping_buffer *pbuf;
1397         struct lnet_ping_buffer *old_pbuf;
1398         int nnis = the_lnet.ln_push_target_nnis;
1399         int rc;
1400
1401         if (nnis <= 0) {
1402                 rc = -EINVAL;
1403                 goto fail_return;
1404         }
1405 again:
1406         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1407         if (!pbuf) {
1408                 rc = -ENOMEM;
1409                 goto fail_return;
1410         }
1411
1412         rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1413                           LNET_PROTO_PING_MATCHBITS, 0,
1414                           LNET_UNLINK, LNET_INS_AFTER,
1415                           &meh);
1416         if (rc) {
1417                 CERROR("Can't create push target ME: %d\n", rc);
1418                 goto fail_decref_pbuf;
1419         }
1420
1421         /* initialize md content */
1422         md.start     = &pbuf->pb_info;
1423         md.length    = LNET_PING_INFO_SIZE(nnis);
1424         md.threshold = LNET_MD_THRESH_INF;
1425         md.max_size  = 0;
1426         md.options   = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
1427                        LNET_MD_MANAGE_REMOTE;
1428         md.user_ptr  = pbuf;
1429         md.eq_handle = the_lnet.ln_push_target_eq;
1430
1431         rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
1432         if (rc) {
1433                 CERROR("Can't attach push MD: %d\n", rc);
1434                 goto fail_unlink_meh;
1435         }
1436         lnet_ping_buffer_addref(pbuf);
1437
1438         lnet_net_lock(LNET_LOCK_EX);
1439         old_pbuf = the_lnet.ln_push_target;
1440         old_mdh = the_lnet.ln_push_target_md;
1441         the_lnet.ln_push_target = pbuf;
1442         the_lnet.ln_push_target_md = mdh;
1443         lnet_net_unlock(LNET_LOCK_EX);
1444
1445         if (old_pbuf) {
1446                 LNetMDUnlink(old_mdh);
1447                 lnet_ping_buffer_decref(old_pbuf);
1448         }
1449
1450         if (nnis < the_lnet.ln_push_target_nnis)
1451                 goto again;
1452
1453         CDEBUG(D_NET, "nnis %d success\n", nnis);
1454
1455         return 0;
1456
1457 fail_unlink_meh:
1458         LNetMEUnlink(meh);
1459 fail_decref_pbuf:
1460         lnet_ping_buffer_decref(pbuf);
1461 fail_return:
1462         CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
1463         return rc;
1464 }
1465
1466 static void lnet_push_target_event_handler(struct lnet_event *ev)
1467 {
1468         struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1469
1470         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1471                 lnet_swap_pinginfo(pbuf);
1472
1473         lnet_peer_push_event(ev);
1474         if (ev->unlinked)
1475                 lnet_ping_buffer_decref(pbuf);
1476 }
1477
1478 /* Initialize the push target. */
1479 static int lnet_push_target_init(void)
1480 {
1481         int rc;
1482
1483         if (the_lnet.ln_push_target)
1484                 return -EALREADY;
1485
1486         rc = LNetEQAlloc(0, lnet_push_target_event_handler,
1487                          &the_lnet.ln_push_target_eq);
1488         if (rc) {
1489                 CERROR("Can't allocated push target EQ: %d\n", rc);
1490                 return rc;
1491         }
1492
1493         /* Start at the required minimum, we'll enlarge if required. */
1494         the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
1495
1496         rc = lnet_push_target_resize();
1497
1498         if (rc) {
1499                 LNetEQFree(the_lnet.ln_push_target_eq);
1500                 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1501         }
1502
1503         return rc;
1504 }
1505
1506 /* Clean up the push target. */
1507 static void lnet_push_target_fini(void)
1508 {
1509         if (!the_lnet.ln_push_target)
1510                 return;
1511
1512         /* Unlink and invalidate to prevent new references. */
1513         LNetMDUnlink(the_lnet.ln_push_target_md);
1514         LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
1515
1516         /* Wait for the unlink to complete. */
1517         while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
1518                 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1519                 set_current_state(TASK_UNINTERRUPTIBLE);
1520                 schedule_timeout(cfs_time_seconds(1));
1521         }
1522
1523         lnet_ping_buffer_decref(the_lnet.ln_push_target);
1524         the_lnet.ln_push_target = NULL;
1525         the_lnet.ln_push_target_nnis = 0;
1526
1527         LNetEQFree(the_lnet.ln_push_target_eq);
1528         LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1529 }
1530
1531 static int
1532 lnet_ni_tq_credits(struct lnet_ni *ni)
1533 {
1534         int     credits;
1535
1536         LASSERT(ni->ni_ncpts >= 1);
1537
1538         if (ni->ni_ncpts == 1)
1539                 return ni->ni_net->net_tunables.lct_max_tx_credits;
1540
1541         credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1542         credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1543         credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1544
1545         return credits;
1546 }
1547
1548 static void
1549 lnet_ni_unlink_locked(struct lnet_ni *ni)
1550 {
1551         if (!list_empty(&ni->ni_cptlist)) {
1552                 list_del_init(&ni->ni_cptlist);
1553                 lnet_ni_decref_locked(ni, 0);
1554         }
1555
1556         /* move it to zombie list and nobody can find it anymore */
1557         LASSERT(!list_empty(&ni->ni_netlist));
1558         list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1559         lnet_ni_decref_locked(ni, 0);
1560 }
1561
1562 static void
1563 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1564 {
1565         int             i;
1566         int             islo;
1567         struct lnet_ni  *ni;
1568         struct list_head *zombie_list = &net->net_ni_zombie;
1569
1570         /*
1571          * Now wait for the NIs I just nuked to show up on the zombie
1572          * list and shut them down in guaranteed thread context
1573          */
1574         i = 2;
1575         while (!list_empty(zombie_list)) {
1576                 int     *ref;
1577                 int     j;
1578
1579                 ni = list_entry(zombie_list->next,
1580                                 struct lnet_ni, ni_netlist);
1581                 list_del_init(&ni->ni_netlist);
1582                 /* the ni should be in deleting state. If it's not it's
1583                  * a bug */
1584                 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1585                 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1586                         if (*ref == 0)
1587                                 continue;
1588                         /* still busy, add it back to zombie list */
1589                         list_add(&ni->ni_netlist, zombie_list);
1590                         break;
1591                 }
1592
1593                 if (!list_empty(&ni->ni_netlist)) {
1594                         lnet_net_unlock(LNET_LOCK_EX);
1595                         ++i;
1596                         if ((i & (-i)) == i) {
1597                                 CDEBUG(D_WARNING,
1598                                        "Waiting for zombie LNI %s\n",
1599                                        libcfs_nid2str(ni->ni_nid));
1600                         }
1601                         set_current_state(TASK_UNINTERRUPTIBLE);
1602                         schedule_timeout(cfs_time_seconds(1));
1603                         lnet_net_lock(LNET_LOCK_EX);
1604                         continue;
1605                 }
1606
1607                 lnet_net_unlock(LNET_LOCK_EX);
1608
1609                 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1610
1611                 LASSERT(!in_interrupt());
1612                 (net->net_lnd->lnd_shutdown)(ni);
1613
1614                 if (!islo)
1615                         CDEBUG(D_LNI, "Removed LNI %s\n",
1616                               libcfs_nid2str(ni->ni_nid));
1617
1618                 lnet_ni_free(ni);
1619                 i = 2;
1620                 lnet_net_lock(LNET_LOCK_EX);
1621         }
1622 }
1623
1624 /* shutdown down the NI and release refcount */
1625 static void
1626 lnet_shutdown_lndni(struct lnet_ni *ni)
1627 {
1628         int i;
1629         struct lnet_net *net = ni->ni_net;
1630
1631         lnet_net_lock(LNET_LOCK_EX);
1632         ni->ni_state = LNET_NI_STATE_DELETING;
1633         lnet_ni_unlink_locked(ni);
1634         lnet_incr_dlc_seq();
1635         lnet_net_unlock(LNET_LOCK_EX);
1636
1637         /* clear messages for this NI on the lazy portal */
1638         for (i = 0; i < the_lnet.ln_nportals; i++)
1639                 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1640
1641         lnet_net_lock(LNET_LOCK_EX);
1642         lnet_clear_zombies_nis_locked(net);
1643         lnet_net_unlock(LNET_LOCK_EX);
1644 }
1645
1646 static void
1647 lnet_shutdown_lndnet(struct lnet_net *net)
1648 {
1649         struct lnet_ni *ni;
1650
1651         lnet_net_lock(LNET_LOCK_EX);
1652
1653         net->net_state = LNET_NET_STATE_DELETING;
1654
1655         list_del_init(&net->net_list);
1656
1657         while (!list_empty(&net->net_ni_list)) {
1658                 ni = list_entry(net->net_ni_list.next,
1659                                 struct lnet_ni, ni_netlist);
1660                 lnet_net_unlock(LNET_LOCK_EX);
1661                 lnet_shutdown_lndni(ni);
1662                 lnet_net_lock(LNET_LOCK_EX);
1663         }
1664
1665         lnet_net_unlock(LNET_LOCK_EX);
1666
1667         /* Do peer table cleanup for this net */
1668         lnet_peer_tables_cleanup(net);
1669
1670         lnet_net_lock(LNET_LOCK_EX);
1671         /*
1672          * decrement ref count on lnd only when the entire network goes
1673          * away
1674          */
1675         net->net_lnd->lnd_refcount--;
1676
1677         lnet_net_unlock(LNET_LOCK_EX);
1678
1679         lnet_net_free(net);
1680 }
1681
1682 static void
1683 lnet_shutdown_lndnets(void)
1684 {
1685         struct lnet_net *net;
1686
1687         /* NB called holding the global mutex */
1688
1689         /* All quiet on the API front */
1690         LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
1691         LASSERT(the_lnet.ln_refcount == 0);
1692
1693         lnet_net_lock(LNET_LOCK_EX);
1694         the_lnet.ln_state = LNET_STATE_STOPPING;
1695
1696         while (!list_empty(&the_lnet.ln_nets)) {
1697                 /*
1698                  * move the nets to the zombie list to avoid them being
1699                  * picked up for new work. LONET is also included in the
1700                  * Nets that will be moved to the zombie list
1701                  */
1702                 net = list_entry(the_lnet.ln_nets.next,
1703                                  struct lnet_net, net_list);
1704                 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1705         }
1706
1707         /* Drop the cached loopback Net. */
1708         if (the_lnet.ln_loni != NULL) {
1709                 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1710                 the_lnet.ln_loni = NULL;
1711         }
1712         lnet_net_unlock(LNET_LOCK_EX);
1713
1714         /* iterate through the net zombie list and delete each net */
1715         while (!list_empty(&the_lnet.ln_net_zombie)) {
1716                 net = list_entry(the_lnet.ln_net_zombie.next,
1717                                  struct lnet_net, net_list);
1718                 lnet_shutdown_lndnet(net);
1719         }
1720
1721         lnet_net_lock(LNET_LOCK_EX);
1722         the_lnet.ln_state = LNET_STATE_SHUTDOWN;
1723         lnet_net_unlock(LNET_LOCK_EX);
1724 }
1725
1726 static int
1727 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1728 {
1729         int                     rc = -EINVAL;
1730         struct lnet_tx_queue    *tq;
1731         int                     i;
1732         struct lnet_net         *net = ni->ni_net;
1733
1734         mutex_lock(&the_lnet.ln_lnd_mutex);
1735
1736         if (tun) {
1737                 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1738                 ni->ni_lnd_tunables_set = true;
1739         }
1740
1741         rc = (net->net_lnd->lnd_startup)(ni);
1742
1743         mutex_unlock(&the_lnet.ln_lnd_mutex);
1744
1745         if (rc != 0) {
1746                 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1747                                    rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1748                 lnet_net_lock(LNET_LOCK_EX);
1749                 net->net_lnd->lnd_refcount--;
1750                 lnet_net_unlock(LNET_LOCK_EX);
1751                 goto failed0;
1752         }
1753
1754         ni->ni_state = LNET_NI_STATE_ACTIVE;
1755
1756         /* We keep a reference on the loopback net through the loopback NI */
1757         if (net->net_lnd->lnd_type == LOLND) {
1758                 lnet_ni_addref(ni);
1759                 LASSERT(the_lnet.ln_loni == NULL);
1760                 the_lnet.ln_loni = ni;
1761                 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1762                 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1763                 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1764                 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1765                 return 0;
1766         }
1767
1768         if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1769             ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1770                 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1771                                    libcfs_lnd2str(net->net_lnd->lnd_type),
1772                                    ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1773                                         "" : "per-peer ");
1774                 /* shutdown the NI since if we get here then it must've already
1775                  * been started
1776                  */
1777                 lnet_shutdown_lndni(ni);
1778                 return -EINVAL;
1779         }
1780
1781         cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1782                 tq->tq_credits_min =
1783                 tq->tq_credits_max =
1784                 tq->tq_credits = lnet_ni_tq_credits(ni);
1785         }
1786
1787         atomic_set(&ni->ni_tx_credits,
1788                    lnet_ni_tq_credits(ni) * ni->ni_ncpts);
1789
1790         CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1791                 libcfs_nid2str(ni->ni_nid),
1792                 ni->ni_net->net_tunables.lct_peer_tx_credits,
1793                 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1794                 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1795                 ni->ni_net->net_tunables.lct_peer_timeout);
1796
1797         return 0;
1798 failed0:
1799         lnet_ni_free(ni);
1800         return rc;
1801 }
1802
1803 static int
1804 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1805 {
1806         struct lnet_ni *ni;
1807         struct lnet_net *net_l = NULL;
1808         struct list_head        local_ni_list;
1809         int                     rc;
1810         int                     ni_count = 0;
1811         __u32                   lnd_type;
1812         struct lnet_lnd *lnd;
1813         int                     peer_timeout =
1814                 net->net_tunables.lct_peer_timeout;
1815         int                     maxtxcredits =
1816                 net->net_tunables.lct_max_tx_credits;
1817         int                     peerrtrcredits =
1818                 net->net_tunables.lct_peer_rtr_credits;
1819
1820         INIT_LIST_HEAD(&local_ni_list);
1821
1822         /*
1823          * make sure that this net is unique. If it isn't then
1824          * we are adding interfaces to an already existing network, and
1825          * 'net' is just a convenient way to pass in the list.
1826          * if it is unique we need to find the LND and load it if
1827          * necessary.
1828          */
1829         if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1830                 lnd_type = LNET_NETTYP(net->net_id);
1831
1832                 LASSERT(libcfs_isknown_lnd(lnd_type));
1833
1834                 mutex_lock(&the_lnet.ln_lnd_mutex);
1835                 lnd = lnet_find_lnd_by_type(lnd_type);
1836
1837                 if (lnd == NULL) {
1838                         mutex_unlock(&the_lnet.ln_lnd_mutex);
1839                         rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1840                         mutex_lock(&the_lnet.ln_lnd_mutex);
1841
1842                         lnd = lnet_find_lnd_by_type(lnd_type);
1843                         if (lnd == NULL) {
1844                                 mutex_unlock(&the_lnet.ln_lnd_mutex);
1845                                 CERROR("Can't load LND %s, module %s, rc=%d\n",
1846                                 libcfs_lnd2str(lnd_type),
1847                                 libcfs_lnd2modname(lnd_type), rc);
1848 #ifndef HAVE_MODULE_LOADING_SUPPORT
1849                                 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1850                                                 "compiled with kernel module "
1851                                                 "loading support.");
1852 #endif
1853                                 rc = -EINVAL;
1854                                 goto failed0;
1855                         }
1856                 }
1857
1858                 lnet_net_lock(LNET_LOCK_EX);
1859                 lnd->lnd_refcount++;
1860                 lnet_net_unlock(LNET_LOCK_EX);
1861
1862                 net->net_lnd = lnd;
1863
1864                 mutex_unlock(&the_lnet.ln_lnd_mutex);
1865
1866                 net_l = net;
1867         }
1868
1869         /*
1870          * net_l: if the network being added is unique then net_l
1871          *        will point to that network
1872          *        if the network being added is not unique then
1873          *        net_l points to the existing network.
1874          *
1875          * When we enter the loop below, we'll pick NIs off he
1876          * network beign added and start them up, then add them to
1877          * a local ni list. Once we've successfully started all
1878          * the NIs then we join the local NI list (of started up
1879          * networks) with the net_l->net_ni_list, which should
1880          * point to the correct network to add the new ni list to
1881          *
1882          * If any of the new NIs fail to start up, then we want to
1883          * iterate through the local ni list, which should include
1884          * any NIs which were successfully started up, and shut
1885          * them down.
1886          *
1887          * After than we want to delete the network being added,
1888          * to avoid a memory leak.
1889          */
1890
1891         /*
1892          * When a network uses TCP bonding then all its interfaces
1893          * must be specified when the network is first defined: the
1894          * TCP bonding code doesn't allow for interfaces to be added
1895          * or removed.
1896          */
1897         if (net_l != net && net_l != NULL && use_tcp_bonding &&
1898             LNET_NETTYP(net_l->net_id) == SOCKLND) {
1899                 rc = -EINVAL;
1900                 goto failed0;
1901         }
1902
1903         while (!list_empty(&net->net_ni_added)) {
1904                 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
1905                                 ni_netlist);
1906                 list_del_init(&ni->ni_netlist);
1907
1908                 /* make sure that the the NI we're about to start
1909                  * up is actually unique. if it's not fail. */
1910                 if (!lnet_ni_unique_net(&net_l->net_ni_list,
1911                                         ni->ni_interfaces[0])) {
1912                         rc = -EINVAL;
1913                         goto failed1;
1914                 }
1915
1916                 /* adjust the pointer the parent network, just in case it
1917                  * the net is a duplicate */
1918                 ni->ni_net = net_l;
1919
1920                 rc = lnet_startup_lndni(ni, tun);
1921
1922                 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
1923                         ni->ni_net->net_lnd->lnd_query != NULL);
1924
1925                 if (rc < 0)
1926                         goto failed1;
1927
1928                 lnet_ni_addref(ni);
1929                 list_add_tail(&ni->ni_netlist, &local_ni_list);
1930
1931                 ni_count++;
1932         }
1933
1934         lnet_net_lock(LNET_LOCK_EX);
1935         list_splice_tail(&local_ni_list, &net_l->net_ni_list);
1936         lnet_incr_dlc_seq();
1937         lnet_net_unlock(LNET_LOCK_EX);
1938
1939         /* if the network is not unique then we don't want to keep
1940          * it around after we're done. Free it. Otherwise add that
1941          * net to the global the_lnet.ln_nets */
1942         if (net_l != net && net_l != NULL) {
1943                 /*
1944                  * TODO - note. currently the tunables can not be updated
1945                  * once added
1946                  */
1947                 lnet_net_free(net);
1948         } else {
1949                 net->net_state = LNET_NET_STATE_ACTIVE;
1950                 /*
1951                  * restore tunables after it has been overwitten by the
1952                  * lnd
1953                  */
1954                 if (peer_timeout != -1)
1955                         net->net_tunables.lct_peer_timeout = peer_timeout;
1956                 if (maxtxcredits != -1)
1957                         net->net_tunables.lct_max_tx_credits = maxtxcredits;
1958                 if (peerrtrcredits != -1)
1959                         net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
1960
1961                 lnet_net_lock(LNET_LOCK_EX);
1962                 list_add_tail(&net->net_list, &the_lnet.ln_nets);
1963                 lnet_net_unlock(LNET_LOCK_EX);
1964         }
1965
1966         return ni_count;
1967
1968 failed1:
1969         /*
1970          * shutdown the new NIs that are being started up
1971          * free the NET being started
1972          */
1973         while (!list_empty(&local_ni_list)) {
1974                 ni = list_entry(local_ni_list.next, struct lnet_ni,
1975                                 ni_netlist);
1976
1977                 lnet_shutdown_lndni(ni);
1978         }
1979
1980 failed0:
1981         lnet_net_free(net);
1982
1983         return rc;
1984 }
1985
1986 static int
1987 lnet_startup_lndnets(struct list_head *netlist)
1988 {
1989         struct lnet_net         *net;
1990         int                     rc;
1991         int                     ni_count = 0;
1992
1993         /*
1994          * Change to running state before bringing up the LNDs. This
1995          * allows lnet_shutdown_lndnets() to assert that we've passed
1996          * through here.
1997          */
1998         lnet_net_lock(LNET_LOCK_EX);
1999         the_lnet.ln_state = LNET_STATE_RUNNING;
2000         lnet_net_unlock(LNET_LOCK_EX);
2001
2002         while (!list_empty(netlist)) {
2003                 net = list_entry(netlist->next, struct lnet_net, net_list);
2004                 list_del_init(&net->net_list);
2005
2006                 rc = lnet_startup_lndnet(net, NULL);
2007
2008                 if (rc < 0)
2009                         goto failed;
2010
2011                 ni_count += rc;
2012         }
2013
2014         return ni_count;
2015 failed:
2016         lnet_shutdown_lndnets();
2017
2018         return rc;
2019 }
2020
2021 /**
2022  * Initialize LNet library.
2023  *
2024  * Automatically called at module loading time. Caller has to call
2025  * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2026  * latter returned 0. It must be called exactly once.
2027  *
2028  * \retval 0 on success
2029  * \retval -ve on failures.
2030  */
2031 int lnet_lib_init(void)
2032 {
2033         int rc;
2034
2035         lnet_assert_wire_constants();
2036
2037         /* refer to global cfs_cpt_table for now */
2038         the_lnet.ln_cpt_table   = cfs_cpt_table;
2039         the_lnet.ln_cpt_number  = cfs_cpt_number(cfs_cpt_table);
2040
2041         LASSERT(the_lnet.ln_cpt_number > 0);
2042         if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2043                 /* we are under risk of consuming all lh_cookie */
2044                 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2045                        "please change setting of CPT-table and retry\n",
2046                        the_lnet.ln_cpt_number, LNET_CPT_MAX);
2047                 return -E2BIG;
2048         }
2049
2050         while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2051                 the_lnet.ln_cpt_bits++;
2052
2053         rc = lnet_create_locks();
2054         if (rc != 0) {
2055                 CERROR("Can't create LNet global locks: %d\n", rc);
2056                 return rc;
2057         }
2058
2059         the_lnet.ln_refcount = 0;
2060         LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
2061         INIT_LIST_HEAD(&the_lnet.ln_lnds);
2062         INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2063         INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
2064         INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
2065
2066         /* The hash table size is the number of bits it takes to express the set
2067          * ln_num_routes, minus 1 (better to under estimate than over so we
2068          * don't waste memory). */
2069         if (rnet_htable_size <= 0)
2070                 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2071         else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2072                 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2073         the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2074                                            order_base_2(rnet_htable_size) - 1);
2075
2076         /* All LNDs apart from the LOLND are in separate modules.  They
2077          * register themselves when their module loads, and unregister
2078          * themselves when their module is unloaded. */
2079         lnet_register_lnd(&the_lolnd);
2080         return 0;
2081 }
2082
2083 /**
2084  * Finalize LNet library.
2085  *
2086  * \pre lnet_lib_init() called with success.
2087  * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2088  */
2089 void lnet_lib_exit(void)
2090 {
2091         LASSERT(the_lnet.ln_refcount == 0);
2092
2093         while (!list_empty(&the_lnet.ln_lnds))
2094                 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
2095                                                struct lnet_lnd, lnd_list));
2096         lnet_destroy_locks();
2097 }
2098
2099 /**
2100  * Set LNet PID and start LNet interfaces, routing, and forwarding.
2101  *
2102  * Users must call this function at least once before any other functions.
2103  * For each successful call there must be a corresponding call to
2104  * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2105  * ignored.
2106  *
2107  * The PID used by LNet may be different from the one requested.
2108  * See LNetGetId().
2109  *
2110  * \param requested_pid PID requested by the caller.
2111  *
2112  * \return >= 0 on success, and < 0 error code on failures.
2113  */
2114 int
2115 LNetNIInit(lnet_pid_t requested_pid)
2116 {
2117         int                     im_a_router = 0;
2118         int                     rc;
2119         int                     ni_count;
2120         struct lnet_ping_buffer *pbuf;
2121         struct lnet_handle_md   ping_mdh;
2122         struct list_head        net_head;
2123         struct lnet_net         *net;
2124
2125         INIT_LIST_HEAD(&net_head);
2126
2127         mutex_lock(&the_lnet.ln_api_mutex);
2128
2129         CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2130
2131         if (the_lnet.ln_refcount > 0) {
2132                 rc = the_lnet.ln_refcount++;
2133                 mutex_unlock(&the_lnet.ln_api_mutex);
2134                 return rc;
2135         }
2136
2137         rc = lnet_prepare(requested_pid);
2138         if (rc != 0) {
2139                 mutex_unlock(&the_lnet.ln_api_mutex);
2140                 return rc;
2141         }
2142
2143         /* create a network for Loopback network */
2144         net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2145         if (net == NULL) {
2146                 rc = -ENOMEM;
2147                 goto err_empty_list;
2148         }
2149
2150         /* Add in the loopback NI */
2151         if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2152                 rc = -ENOMEM;
2153                 goto err_empty_list;
2154         }
2155
2156         /* If LNet is being initialized via DLC it is possible
2157          * that the user requests not to load module parameters (ones which
2158          * are supported by DLC) on initialization.  Therefore, make sure not
2159          * to load networks, routes and forwarding from module parameters
2160          * in this case.  On cleanup in case of failure only clean up
2161          * routes if it has been loaded */
2162         if (!the_lnet.ln_nis_from_mod_params) {
2163                 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2164                                          use_tcp_bonding);
2165                 if (rc < 0)
2166                         goto err_empty_list;
2167         }
2168
2169         ni_count = lnet_startup_lndnets(&net_head);
2170         if (ni_count < 0) {
2171                 rc = ni_count;
2172                 goto err_empty_list;
2173         }
2174
2175         if (!the_lnet.ln_nis_from_mod_params) {
2176                 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2177                 if (rc != 0)
2178                         goto err_shutdown_lndnis;
2179
2180                 rc = lnet_check_routes();
2181                 if (rc != 0)
2182                         goto err_destroy_routes;
2183
2184                 rc = lnet_rtrpools_alloc(im_a_router);
2185                 if (rc != 0)
2186                         goto err_destroy_routes;
2187         }
2188
2189         rc = lnet_acceptor_start();
2190         if (rc != 0)
2191                 goto err_destroy_routes;
2192
2193         the_lnet.ln_refcount = 1;
2194         /* Now I may use my own API functions... */
2195
2196         rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2197         if (rc != 0)
2198                 goto err_acceptor_stop;
2199
2200         lnet_ping_target_update(pbuf, ping_mdh);
2201
2202         rc = lnet_router_checker_start();
2203         if (rc != 0)
2204                 goto err_stop_ping;
2205
2206         rc = lnet_push_target_init();
2207         if (rc != 0)
2208                 goto err_stop_router_checker;
2209
2210         rc = lnet_peer_discovery_start();
2211         if (rc != 0)
2212                 goto err_destroy_push_target;
2213
2214         lnet_fault_init();
2215         lnet_proc_init();
2216
2217         mutex_unlock(&the_lnet.ln_api_mutex);
2218
2219         return 0;
2220
2221 err_destroy_push_target:
2222         lnet_push_target_fini();
2223 err_stop_router_checker:
2224         lnet_router_checker_stop();
2225 err_stop_ping:
2226         lnet_ping_target_fini();
2227 err_acceptor_stop:
2228         the_lnet.ln_refcount = 0;
2229         lnet_acceptor_stop();
2230 err_destroy_routes:
2231         if (!the_lnet.ln_nis_from_mod_params)
2232                 lnet_destroy_routes();
2233 err_shutdown_lndnis:
2234         lnet_shutdown_lndnets();
2235 err_empty_list:
2236         lnet_unprepare();
2237         LASSERT(rc < 0);
2238         mutex_unlock(&the_lnet.ln_api_mutex);
2239         while (!list_empty(&net_head)) {
2240                 struct lnet_net *net;
2241
2242                 net = list_entry(net_head.next, struct lnet_net, net_list);
2243                 list_del_init(&net->net_list);
2244                 lnet_net_free(net);
2245         }
2246         return rc;
2247 }
2248 EXPORT_SYMBOL(LNetNIInit);
2249
2250 /**
2251  * Stop LNet interfaces, routing, and forwarding.
2252  *
2253  * Users must call this function once for each successful call to LNetNIInit().
2254  * Once the LNetNIFini() operation has been started, the results of pending
2255  * API operations are undefined.
2256  *
2257  * \return always 0 for current implementation.
2258  */
2259 int
2260 LNetNIFini()
2261 {
2262         mutex_lock(&the_lnet.ln_api_mutex);
2263
2264         LASSERT(the_lnet.ln_refcount > 0);
2265
2266         if (the_lnet.ln_refcount != 1) {
2267                 the_lnet.ln_refcount--;
2268         } else {
2269                 LASSERT(!the_lnet.ln_niinit_self);
2270
2271                 lnet_fault_fini();
2272
2273                 lnet_proc_fini();
2274                 lnet_peer_discovery_stop();
2275                 lnet_push_target_fini();
2276                 lnet_router_checker_stop();
2277                 lnet_ping_target_fini();
2278
2279                 /* Teardown fns that use my own API functions BEFORE here */
2280                 the_lnet.ln_refcount = 0;
2281
2282                 lnet_acceptor_stop();
2283                 lnet_destroy_routes();
2284                 lnet_shutdown_lndnets();
2285                 lnet_unprepare();
2286         }
2287
2288         mutex_unlock(&the_lnet.ln_api_mutex);
2289         return 0;
2290 }
2291 EXPORT_SYMBOL(LNetNIFini);
2292
2293 /**
2294  * Grabs the ni data from the ni structure and fills the out
2295  * parameters
2296  *
2297  * \param[in] ni network        interface structure
2298  * \param[out] cfg_ni           NI config information
2299  * \param[out] tun              network and LND tunables
2300  */
2301 static void
2302 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2303                    struct lnet_ioctl_config_lnd_tunables *tun,
2304                    struct lnet_ioctl_element_stats *stats,
2305                    __u32 tun_size)
2306 {
2307         size_t min_size = 0;
2308         int i;
2309
2310         if (!ni || !cfg_ni || !tun)
2311                 return;
2312
2313         if (ni->ni_interfaces[0] != NULL) {
2314                 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2315                         if (ni->ni_interfaces[i] != NULL) {
2316                                 strncpy(cfg_ni->lic_ni_intf[i],
2317                                         ni->ni_interfaces[i],
2318                                         sizeof(cfg_ni->lic_ni_intf[i]));
2319                         }
2320                 }
2321         }
2322
2323         cfg_ni->lic_nid = ni->ni_nid;
2324         if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2325                 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2326         else
2327                 cfg_ni->lic_status = ni->ni_status->ns_status;
2328         cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2329         cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2330
2331         memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2332
2333         if (stats) {
2334                 stats->iel_send_count = atomic_read(&ni->ni_stats.send_count);
2335                 stats->iel_recv_count = atomic_read(&ni->ni_stats.recv_count);
2336         }
2337
2338         /*
2339          * tun->lt_tun will always be present, but in order to be
2340          * backwards compatible, we need to deal with the cases when
2341          * tun->lt_tun is smaller than what the kernel has, because it
2342          * comes from an older version of a userspace program, then we'll
2343          * need to copy as much information as we have available space.
2344          */
2345         min_size = tun_size - sizeof(tun->lt_cmn);
2346         memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2347
2348         /* copy over the cpts */
2349         if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2350             ni->ni_cpts == NULL)  {
2351                 for (i = 0; i < ni->ni_ncpts; i++)
2352                         cfg_ni->lic_cpts[i] = i;
2353         } else {
2354                 for (i = 0;
2355                      ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2356                      i < LNET_MAX_SHOW_NUM_CPT;
2357                      i++)
2358                         cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2359         }
2360         cfg_ni->lic_ncpts = ni->ni_ncpts;
2361 }
2362
2363 /**
2364  * NOTE: This is a legacy function left in the code to be backwards
2365  * compatible with older userspace programs. It should eventually be
2366  * removed.
2367  *
2368  * Grabs the ni data from the ni structure and fills the out
2369  * parameters
2370  *
2371  * \param[in] ni network        interface structure
2372  * \param[out] config           config information
2373  */
2374 static void
2375 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2376                          struct lnet_ioctl_config_data *config)
2377 {
2378         struct lnet_ioctl_net_config *net_config;
2379         struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2380         size_t min_size, tunable_size = 0;
2381         int i;
2382
2383         if (!ni || !config)
2384                 return;
2385
2386         net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2387         if (!net_config)
2388                 return;
2389
2390         BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2391                      ARRAY_SIZE(net_config->ni_interfaces));
2392
2393         for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2394                 if (!ni->ni_interfaces[i])
2395                         break;
2396
2397                 strncpy(net_config->ni_interfaces[i],
2398                         ni->ni_interfaces[i],
2399                         sizeof(net_config->ni_interfaces[i]));
2400         }
2401
2402         config->cfg_nid = ni->ni_nid;
2403         config->cfg_config_u.cfg_net.net_peer_timeout =
2404                 ni->ni_net->net_tunables.lct_peer_timeout;
2405         config->cfg_config_u.cfg_net.net_max_tx_credits =
2406                 ni->ni_net->net_tunables.lct_max_tx_credits;
2407         config->cfg_config_u.cfg_net.net_peer_tx_credits =
2408                 ni->ni_net->net_tunables.lct_peer_tx_credits;
2409         config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2410                 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2411
2412         if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2413                 net_config->ni_status = LNET_NI_STATUS_UP;
2414         else
2415                 net_config->ni_status = ni->ni_status->ns_status;
2416
2417         if (ni->ni_cpts) {
2418                 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2419
2420                 for (i = 0; i < num_cpts; i++)
2421                         net_config->ni_cpts[i] = ni->ni_cpts[i];
2422
2423                 config->cfg_ncpts = num_cpts;
2424         }
2425
2426         /*
2427          * See if user land tools sent in a newer and larger version
2428          * of struct lnet_tunables than what the kernel uses.
2429          */
2430         min_size = sizeof(*config) + sizeof(*net_config);
2431
2432         if (config->cfg_hdr.ioc_len > min_size)
2433                 tunable_size = config->cfg_hdr.ioc_len - min_size;
2434
2435         /* Don't copy too much data to user space */
2436         min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2437         lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2438
2439         if (lnd_cfg && min_size) {
2440                 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2441                 config->cfg_config_u.cfg_net.net_interface_count = 1;
2442
2443                 /* Tell user land that kernel side has less data */
2444                 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2445                         min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2446                         config->cfg_hdr.ioc_len -= min_size;
2447                 }
2448         }
2449 }
2450
2451 struct lnet_ni *
2452 lnet_get_ni_idx_locked(int idx)
2453 {
2454         struct lnet_ni          *ni;
2455         struct lnet_net         *net;
2456
2457         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2458                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2459                         if (idx-- == 0)
2460                                 return ni;
2461                 }
2462         }
2463
2464         return NULL;
2465 }
2466
2467 struct lnet_ni *
2468 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2469 {
2470         struct lnet_ni          *ni;
2471         struct lnet_net         *net = mynet;
2472
2473         if (prev == NULL) {
2474                 if (net == NULL)
2475                         net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2476                                         net_list);
2477                 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2478                                 ni_netlist);
2479
2480                 return ni;
2481         }
2482
2483         if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2484                 /* if you reached the end of the ni list and the net is
2485                  * specified, then there are no more nis in that net */
2486                 if (net != NULL)
2487                         return NULL;
2488
2489                 /* we reached the end of this net ni list. move to the
2490                  * next net */
2491                 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2492                         /* no more nets and no more NIs. */
2493                         return NULL;
2494
2495                 /* get the next net */
2496                 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2497                                  net_list);
2498                 /* get the ni on it */
2499                 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2500                                 ni_netlist);
2501
2502                 return ni;
2503         }
2504
2505         /* there are more nis left */
2506         ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2507
2508         return ni;
2509 }
2510
2511 int
2512 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2513 {
2514         struct lnet_ni *ni;
2515         int cpt;
2516         int rc = -ENOENT;
2517         int idx = config->cfg_count;
2518
2519         cpt = lnet_net_lock_current();
2520
2521         ni = lnet_get_ni_idx_locked(idx);
2522
2523         if (ni != NULL) {
2524                 rc = 0;
2525                 lnet_ni_lock(ni);
2526                 lnet_fill_ni_info_legacy(ni, config);
2527                 lnet_ni_unlock(ni);
2528         }
2529
2530         lnet_net_unlock(cpt);
2531         return rc;
2532 }
2533
2534 int
2535 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2536                    struct lnet_ioctl_config_lnd_tunables *tun,
2537                    struct lnet_ioctl_element_stats *stats,
2538                    __u32 tun_size)
2539 {
2540         struct lnet_ni          *ni;
2541         int                     cpt;
2542         int                     rc = -ENOENT;
2543
2544         if (!cfg_ni || !tun || !stats)
2545                 return -EINVAL;
2546
2547         cpt = lnet_net_lock_current();
2548
2549         ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2550
2551         if (ni) {
2552                 rc = 0;
2553                 lnet_ni_lock(ni);
2554                 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2555                 lnet_ni_unlock(ni);
2556         }
2557
2558         lnet_net_unlock(cpt);
2559         return rc;
2560 }
2561
2562 static int lnet_add_net_common(struct lnet_net *net,
2563                                struct lnet_ioctl_config_lnd_tunables *tun)
2564 {
2565         __u32                   net_id;
2566         struct lnet_ping_buffer *pbuf;
2567         struct lnet_handle_md   ping_mdh;
2568         int                     rc;
2569         struct lnet_remotenet *rnet;
2570         int                     net_ni_count;
2571         int                     num_acceptor_nets;
2572
2573         lnet_net_lock(LNET_LOCK_EX);
2574         rnet = lnet_find_rnet_locked(net->net_id);
2575         lnet_net_unlock(LNET_LOCK_EX);
2576         /*
2577          * make sure that the net added doesn't invalidate the current
2578          * configuration LNet is keeping
2579          */
2580         if (rnet) {
2581                 CERROR("Adding net %s will invalidate routing configuration\n",
2582                        libcfs_net2str(net->net_id));
2583                 lnet_net_free(net);
2584                 return -EUSERS;
2585         }
2586
2587         /*
2588          * make sure you calculate the correct number of slots in the ping
2589          * buffer. Since the ping info is a flattened list of all the NIs,
2590          * we should allocate enough slots to accomodate the number of NIs
2591          * which will be added.
2592          *
2593          * since ni hasn't been configured yet, use
2594          * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2595          */
2596         net_ni_count = lnet_get_net_ni_count_pre(net);
2597
2598         rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2599                                     net_ni_count + lnet_get_ni_count(),
2600                                     false);
2601         if (rc < 0) {
2602                 lnet_net_free(net);
2603                 return rc;
2604         }
2605
2606         if (tun)
2607                 memcpy(&net->net_tunables,
2608                        &tun->lt_cmn, sizeof(net->net_tunables));
2609         else
2610                 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2611
2612         /*
2613          * before starting this network get a count of the current TCP
2614          * networks which require the acceptor thread running. If that
2615          * count is == 0 before we start up this network, then we'd want to
2616          * start up the acceptor thread after starting up this network
2617          */
2618         num_acceptor_nets = lnet_count_acceptor_nets();
2619
2620         net_id = net->net_id;
2621
2622         rc = lnet_startup_lndnet(net,
2623                                  (tun) ? &tun->lt_tun : NULL);
2624         if (rc < 0)
2625                 goto failed;
2626
2627         lnet_net_lock(LNET_LOCK_EX);
2628         net = lnet_get_net_locked(net_id);
2629         lnet_net_unlock(LNET_LOCK_EX);
2630
2631         LASSERT(net);
2632
2633         /*
2634          * Start the acceptor thread if this is the first network
2635          * being added that requires the thread.
2636          */
2637         if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
2638                 rc = lnet_acceptor_start();
2639                 if (rc < 0) {
2640                         /* shutdown the net that we just started */
2641                         CERROR("Failed to start up acceptor thread\n");
2642                         lnet_shutdown_lndnet(net);
2643                         goto failed;
2644                 }
2645         }
2646
2647         lnet_net_lock(LNET_LOCK_EX);
2648         lnet_peer_net_added(net);
2649         lnet_net_unlock(LNET_LOCK_EX);
2650
2651         lnet_ping_target_update(pbuf, ping_mdh);
2652
2653         return 0;
2654
2655 failed:
2656         lnet_ping_md_unlink(pbuf, &ping_mdh);
2657         lnet_ping_buffer_decref(pbuf);
2658         return rc;
2659 }
2660
2661 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2662                                       struct lnet_ioctl_config_lnd_tunables *tun)
2663 {
2664         struct lnet_net *net;
2665         char *nets;
2666         int rc;
2667         struct list_head net_head;
2668
2669         INIT_LIST_HEAD(&net_head);
2670
2671         rc = lnet_parse_ip2nets(&nets, ip2nets);
2672         if (rc < 0)
2673                 return rc;
2674
2675         rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2676         if (rc < 0)
2677                 return rc;
2678
2679         mutex_lock(&the_lnet.ln_api_mutex);
2680         while (!list_empty(&net_head)) {
2681                 net = list_entry(net_head.next, struct lnet_net, net_list);
2682                 list_del_init(&net->net_list);
2683                 rc = lnet_add_net_common(net, tun);
2684                 if (rc < 0)
2685                         goto out;
2686         }
2687
2688 out:
2689         mutex_unlock(&the_lnet.ln_api_mutex);
2690
2691         while (!list_empty(&net_head)) {
2692                 net = list_entry(net_head.next, struct lnet_net, net_list);
2693                 list_del_init(&net->net_list);
2694                 lnet_net_free(net);
2695         }
2696         return rc;
2697 }
2698
2699 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
2700 {
2701         struct lnet_net *net;
2702         struct lnet_ni *ni;
2703         struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2704         int rc, i;
2705         __u32 net_id;
2706
2707         /* get the tunables if they are available */
2708         if (conf->lic_cfg_hdr.ioc_len >=
2709             sizeof(*conf) + sizeof(*tun))
2710                 tun = (struct lnet_ioctl_config_lnd_tunables *)
2711                         conf->lic_bulk;
2712
2713         /* handle legacy ip2nets from DLC */
2714         if (conf->lic_legacy_ip2nets[0] != '\0')
2715                 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
2716                                                   tun);
2717
2718         net_id = LNET_NIDNET(conf->lic_nid);
2719
2720         net = lnet_net_alloc(net_id, NULL);
2721         if (!net)
2722                 return -ENOMEM;
2723
2724         for (i = 0; i < conf->lic_ncpts; i++) {
2725                 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
2726                         return -EINVAL;
2727         }
2728
2729         ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
2730                                        conf->lic_ni_intf[0]);
2731         if (!ni)
2732                 return -ENOMEM;
2733
2734         mutex_lock(&the_lnet.ln_api_mutex);
2735
2736         rc = lnet_add_net_common(net, tun);
2737
2738         mutex_unlock(&the_lnet.ln_api_mutex);
2739
2740         return rc;
2741 }
2742
2743 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
2744 {
2745         struct lnet_net  *net;
2746         struct lnet_ni *ni;
2747         __u32 net_id = LNET_NIDNET(conf->lic_nid);
2748         struct lnet_ping_buffer *pbuf;
2749         struct lnet_handle_md  ping_mdh;
2750         int               rc;
2751         int               net_count;
2752         __u32             addr;
2753
2754         /* don't allow userspace to shutdown the LOLND */
2755         if (LNET_NETTYP(net_id) == LOLND)
2756                 return -EINVAL;
2757
2758         mutex_lock(&the_lnet.ln_api_mutex);
2759
2760         lnet_net_lock(0);
2761
2762         net = lnet_get_net_locked(net_id);
2763         if (!net) {
2764                 CERROR("net %s not found\n",
2765                        libcfs_net2str(net_id));
2766                 rc = -ENOENT;
2767                 goto unlock_net;
2768         }
2769
2770         addr = LNET_NIDADDR(conf->lic_nid);
2771         if (addr == 0) {
2772                 /* remove the entire net */
2773                 net_count = lnet_get_net_ni_count_locked(net);
2774
2775                 lnet_net_unlock(0);
2776
2777                 /* create and link a new ping info, before removing the old one */
2778                 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2779                                         lnet_get_ni_count() - net_count,
2780                                         false);
2781                 if (rc != 0)
2782                         goto unlock_api_mutex;
2783
2784                 lnet_shutdown_lndnet(net);
2785
2786                 if (lnet_count_acceptor_nets() == 0)
2787                         lnet_acceptor_stop();
2788
2789                 lnet_ping_target_update(pbuf, ping_mdh);
2790
2791                 goto unlock_api_mutex;
2792         }
2793
2794         ni = lnet_nid2ni_locked(conf->lic_nid, 0);
2795         if (!ni) {
2796                 CERROR("nid %s not found\n",
2797                        libcfs_nid2str(conf->lic_nid));
2798                 rc = -ENOENT;
2799                 goto unlock_net;
2800         }
2801
2802         net_count = lnet_get_net_ni_count_locked(net);
2803
2804         lnet_net_unlock(0);
2805
2806         /* create and link a new ping info, before removing the old one */
2807         rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2808                                   lnet_get_ni_count() - 1, false);
2809         if (rc != 0)
2810                 goto unlock_api_mutex;
2811
2812         lnet_shutdown_lndni(ni);
2813
2814         if (lnet_count_acceptor_nets() == 0)
2815                 lnet_acceptor_stop();
2816
2817         lnet_ping_target_update(pbuf, ping_mdh);
2818
2819         /* check if the net is empty and remove it if it is */
2820         if (net_count == 1)
2821                 lnet_shutdown_lndnet(net);
2822
2823         goto unlock_api_mutex;
2824
2825 unlock_net:
2826         lnet_net_unlock(0);
2827 unlock_api_mutex:
2828         mutex_unlock(&the_lnet.ln_api_mutex);
2829
2830         return rc;
2831 }
2832
2833 /*
2834  * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
2835  * They are only expected to be called for unique networks.
2836  * That can be as a result of older DLC library
2837  * calls. Multi-Rail DLC and beyond no longer uses these APIs.
2838  */
2839 int
2840 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
2841 {
2842         struct lnet_net         *net;
2843         struct list_head        net_head;
2844         int                     rc;
2845         struct lnet_ioctl_config_lnd_tunables tun;
2846         char *nets = conf->cfg_config_u.cfg_net.net_intf;
2847
2848         INIT_LIST_HEAD(&net_head);
2849
2850         /* Create a net/ni structures for the network string */
2851         rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2852         if (rc <= 0)
2853                 return rc == 0 ? -EINVAL : rc;
2854
2855         mutex_lock(&the_lnet.ln_api_mutex);
2856
2857         if (rc > 1) {
2858                 rc = -EINVAL; /* only add one network per call */
2859                 goto out_unlock_clean;
2860         }
2861
2862         net = list_entry(net_head.next, struct lnet_net, net_list);
2863         list_del_init(&net->net_list);
2864
2865         LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
2866
2867         memset(&tun, 0, sizeof(tun));
2868
2869         tun.lt_cmn.lct_peer_timeout =
2870           conf->cfg_config_u.cfg_net.net_peer_timeout;
2871         tun.lt_cmn.lct_peer_tx_credits =
2872           conf->cfg_config_u.cfg_net.net_peer_tx_credits;
2873         tun.lt_cmn.lct_peer_rtr_credits =
2874           conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
2875         tun.lt_cmn.lct_max_tx_credits =
2876           conf->cfg_config_u.cfg_net.net_max_tx_credits;
2877
2878         rc = lnet_add_net_common(net, &tun);
2879
2880 out_unlock_clean:
2881         mutex_unlock(&the_lnet.ln_api_mutex);
2882         while (!list_empty(&net_head)) {
2883                 /* net_head list is empty in success case */
2884                 net = list_entry(net_head.next, struct lnet_net, net_list);
2885                 list_del_init(&net->net_list);
2886                 lnet_net_free(net);
2887         }
2888         return rc;
2889 }
2890
2891 int
2892 lnet_dyn_del_net(__u32 net_id)
2893 {
2894         struct lnet_net  *net;
2895         struct lnet_ping_buffer *pbuf;
2896         struct lnet_handle_md ping_mdh;
2897         int               rc;
2898         int               net_ni_count;
2899
2900         /* don't allow userspace to shutdown the LOLND */
2901         if (LNET_NETTYP(net_id) == LOLND)
2902                 return -EINVAL;
2903
2904         mutex_lock(&the_lnet.ln_api_mutex);
2905
2906         lnet_net_lock(0);
2907
2908         net = lnet_get_net_locked(net_id);
2909         if (net == NULL) {
2910                 lnet_net_unlock(0);
2911                 rc = -EINVAL;
2912                 goto out;
2913         }
2914
2915         net_ni_count = lnet_get_net_ni_count_locked(net);
2916
2917         lnet_net_unlock(0);
2918
2919         /* create and link a new ping info, before removing the old one */
2920         rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2921                                     lnet_get_ni_count() - net_ni_count, false);
2922         if (rc != 0)
2923                 goto out;
2924
2925         lnet_shutdown_lndnet(net);
2926
2927         if (lnet_count_acceptor_nets() == 0)
2928                 lnet_acceptor_stop();
2929
2930         lnet_ping_target_update(pbuf, ping_mdh);
2931
2932 out:
2933         mutex_unlock(&the_lnet.ln_api_mutex);
2934
2935         return rc;
2936 }
2937
2938 void lnet_incr_dlc_seq(void)
2939 {
2940         atomic_inc(&lnet_dlc_seq_no);
2941 }
2942
2943 __u32 lnet_get_dlc_seq_locked(void)
2944 {
2945         return atomic_read(&lnet_dlc_seq_no);
2946 }
2947
2948 /**
2949  * LNet ioctl handler.
2950  *
2951  */
2952 int
2953 LNetCtl(unsigned int cmd, void *arg)
2954 {
2955         struct libcfs_ioctl_data *data = arg;
2956         struct lnet_ioctl_config_data *config;
2957         struct lnet_process_id    id = {0};
2958         struct lnet_ni           *ni;
2959         int                       rc;
2960
2961         BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
2962                      sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
2963
2964         switch (cmd) {
2965         case IOC_LIBCFS_GET_NI:
2966                 rc = LNetGetId(data->ioc_count, &id);
2967                 data->ioc_nid = id.nid;
2968                 return rc;
2969
2970         case IOC_LIBCFS_FAIL_NID:
2971                 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
2972
2973         case IOC_LIBCFS_ADD_ROUTE:
2974                 config = arg;
2975
2976                 if (config->cfg_hdr.ioc_len < sizeof(*config))
2977                         return -EINVAL;
2978
2979                 mutex_lock(&the_lnet.ln_api_mutex);
2980                 rc = lnet_add_route(config->cfg_net,
2981                                     config->cfg_config_u.cfg_route.rtr_hop,
2982                                     config->cfg_nid,
2983                                     config->cfg_config_u.cfg_route.
2984                                         rtr_priority);
2985                 if (rc == 0) {
2986                         rc = lnet_check_routes();
2987                         if (rc != 0)
2988                                 lnet_del_route(config->cfg_net,
2989                                                config->cfg_nid);
2990                 }
2991                 mutex_unlock(&the_lnet.ln_api_mutex);
2992                 return rc;
2993
2994         case IOC_LIBCFS_DEL_ROUTE:
2995                 config = arg;
2996
2997                 if (config->cfg_hdr.ioc_len < sizeof(*config))
2998                         return -EINVAL;
2999
3000                 mutex_lock(&the_lnet.ln_api_mutex);
3001                 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3002                 mutex_unlock(&the_lnet.ln_api_mutex);
3003                 return rc;
3004
3005         case IOC_LIBCFS_GET_ROUTE:
3006                 config = arg;
3007
3008                 if (config->cfg_hdr.ioc_len < sizeof(*config))
3009                         return -EINVAL;
3010
3011                 mutex_lock(&the_lnet.ln_api_mutex);
3012                 rc = lnet_get_route(config->cfg_count,
3013                                     &config->cfg_net,
3014                                     &config->cfg_config_u.cfg_route.rtr_hop,
3015                                     &config->cfg_nid,
3016                                     &config->cfg_config_u.cfg_route.rtr_flags,
3017                                     &config->cfg_config_u.cfg_route.
3018                                         rtr_priority);
3019                 mutex_unlock(&the_lnet.ln_api_mutex);
3020                 return rc;
3021
3022         case IOC_LIBCFS_GET_LOCAL_NI: {
3023                 struct lnet_ioctl_config_ni *cfg_ni;
3024                 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3025                 struct lnet_ioctl_element_stats *stats;
3026                 __u32 tun_size;
3027
3028                 cfg_ni = arg;
3029                 /* get the tunables if they are available */
3030                 if (cfg_ni->lic_cfg_hdr.ioc_len <
3031                     sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
3032                         return -EINVAL;
3033
3034                 stats = (struct lnet_ioctl_element_stats *)
3035                         cfg_ni->lic_bulk;
3036                 tun = (struct lnet_ioctl_config_lnd_tunables *)
3037                                 (cfg_ni->lic_bulk + sizeof(*stats));
3038
3039                 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3040                         sizeof(*stats);
3041
3042                 mutex_lock(&the_lnet.ln_api_mutex);
3043                 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3044                 mutex_unlock(&the_lnet.ln_api_mutex);
3045                 return rc;
3046         }
3047
3048         case IOC_LIBCFS_GET_NET: {
3049                 size_t total = sizeof(*config) +
3050                                sizeof(struct lnet_ioctl_net_config);
3051                 config = arg;
3052
3053                 if (config->cfg_hdr.ioc_len < total)
3054                         return -EINVAL;
3055
3056                 mutex_lock(&the_lnet.ln_api_mutex);
3057                 rc = lnet_get_net_config(config);
3058                 mutex_unlock(&the_lnet.ln_api_mutex);
3059                 return rc;
3060         }
3061
3062         case IOC_LIBCFS_GET_LNET_STATS:
3063         {
3064                 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3065
3066                 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3067                         return -EINVAL;
3068
3069                 mutex_lock(&the_lnet.ln_api_mutex);
3070                 lnet_counters_get(&lnet_stats->st_cntrs);
3071                 mutex_unlock(&the_lnet.ln_api_mutex);
3072                 return 0;
3073         }
3074
3075         case IOC_LIBCFS_CONFIG_RTR:
3076                 config = arg;
3077
3078                 if (config->cfg_hdr.ioc_len < sizeof(*config))
3079                         return -EINVAL;
3080
3081                 mutex_lock(&the_lnet.ln_api_mutex);
3082                 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3083                         rc = lnet_rtrpools_enable();
3084                         mutex_unlock(&the_lnet.ln_api_mutex);
3085                         return rc;
3086                 }
3087                 lnet_rtrpools_disable();
3088                 mutex_unlock(&the_lnet.ln_api_mutex);
3089                 return 0;
3090
3091         case IOC_LIBCFS_ADD_BUF:
3092                 config = arg;
3093
3094                 if (config->cfg_hdr.ioc_len < sizeof(*config))
3095                         return -EINVAL;
3096
3097                 mutex_lock(&the_lnet.ln_api_mutex);
3098                 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3099                                                 buf_tiny,
3100                                           config->cfg_config_u.cfg_buffers.
3101                                                 buf_small,
3102                                           config->cfg_config_u.cfg_buffers.
3103                                                 buf_large);
3104                 mutex_unlock(&the_lnet.ln_api_mutex);
3105                 return rc;
3106
3107         case IOC_LIBCFS_SET_NUMA_RANGE: {
3108                 struct lnet_ioctl_set_value *numa;
3109                 numa = arg;
3110                 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3111                         return -EINVAL;
3112                 lnet_net_lock(LNET_LOCK_EX);
3113                 lnet_numa_range = numa->sv_value;
3114                 lnet_net_unlock(LNET_LOCK_EX);
3115                 return 0;
3116         }
3117
3118         case IOC_LIBCFS_GET_NUMA_RANGE: {
3119                 struct lnet_ioctl_set_value *numa;
3120                 numa = arg;
3121                 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3122                         return -EINVAL;
3123                 numa->sv_value = lnet_numa_range;
3124                 return 0;
3125         }
3126
3127         case IOC_LIBCFS_GET_BUF: {
3128                 struct lnet_ioctl_pool_cfg *pool_cfg;
3129                 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3130
3131                 config = arg;
3132
3133                 if (config->cfg_hdr.ioc_len < total)
3134                         return -EINVAL;
3135
3136                 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3137
3138                 mutex_lock(&the_lnet.ln_api_mutex);
3139                 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3140                 mutex_unlock(&the_lnet.ln_api_mutex);
3141                 return rc;
3142         }
3143
3144         case IOC_LIBCFS_ADD_PEER_NI: {
3145                 struct lnet_ioctl_peer_cfg *cfg = arg;
3146
3147                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3148                         return -EINVAL;
3149
3150                 mutex_lock(&the_lnet.ln_api_mutex);
3151                 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3152                                       cfg->prcfg_cfg_nid,
3153                                       cfg->prcfg_mr);
3154                 mutex_unlock(&the_lnet.ln_api_mutex);
3155                 return rc;
3156         }
3157
3158         case IOC_LIBCFS_DEL_PEER_NI: {
3159                 struct lnet_ioctl_peer_cfg *cfg = arg;
3160
3161                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3162                         return -EINVAL;
3163
3164                 mutex_lock(&the_lnet.ln_api_mutex);
3165                 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3166                                       cfg->prcfg_cfg_nid);
3167                 mutex_unlock(&the_lnet.ln_api_mutex);
3168                 return rc;
3169         }
3170
3171         case IOC_LIBCFS_GET_PEER_INFO: {
3172                 struct lnet_ioctl_peer *peer_info = arg;
3173
3174                 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3175                         return -EINVAL;
3176
3177                 mutex_lock(&the_lnet.ln_api_mutex);
3178                 rc = lnet_get_peer_ni_info(
3179                    peer_info->pr_count,
3180                    &peer_info->pr_nid,
3181                    peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3182                    &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3183                    &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3184                    &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3185                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3186                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3187                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3188                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3189                 mutex_unlock(&the_lnet.ln_api_mutex);
3190                 return rc;
3191         }
3192
3193         case IOC_LIBCFS_GET_PEER_NI: {
3194                 struct lnet_ioctl_peer_cfg *cfg = arg;
3195
3196                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3197                         return -EINVAL;
3198
3199                 mutex_lock(&the_lnet.ln_api_mutex);
3200                 rc = lnet_get_peer_info(&cfg->prcfg_prim_nid,
3201                                         &cfg->prcfg_cfg_nid,
3202                                         &cfg->prcfg_count,
3203                                         &cfg->prcfg_mr,
3204                                         &cfg->prcfg_size,
3205                                         (void __user *)cfg->prcfg_bulk);
3206                 mutex_unlock(&the_lnet.ln_api_mutex);
3207                 return rc;
3208         }
3209
3210         case IOC_LIBCFS_GET_PEER_LIST: {
3211                 struct lnet_ioctl_peer_cfg *cfg = arg;
3212
3213                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3214                         return -EINVAL;
3215
3216                 mutex_lock(&the_lnet.ln_api_mutex);
3217                 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
3218                                 (lnet_process_id_t __user *)cfg->prcfg_bulk);
3219                 mutex_unlock(&the_lnet.ln_api_mutex);
3220                 return rc;
3221         }
3222
3223         case IOC_LIBCFS_NOTIFY_ROUTER: {
3224                 unsigned long jiffies_passed;
3225
3226                 jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
3227                 jiffies_passed = cfs_time_seconds(jiffies_passed);
3228
3229                 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
3230                                    jiffies - jiffies_passed);
3231         }
3232
3233         case IOC_LIBCFS_LNET_DIST:
3234                 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3235                 if (rc < 0 && rc != -EHOSTUNREACH)
3236                         return rc;
3237
3238                 data->ioc_u32[0] = rc;
3239                 return 0;
3240
3241         case IOC_LIBCFS_TESTPROTOCOMPAT:
3242                 lnet_net_lock(LNET_LOCK_EX);
3243                 the_lnet.ln_testprotocompat = data->ioc_flags;
3244                 lnet_net_unlock(LNET_LOCK_EX);
3245                 return 0;
3246
3247         case IOC_LIBCFS_LNET_FAULT:
3248                 return lnet_fault_ctl(data->ioc_flags, data);
3249
3250         case IOC_LIBCFS_PING: {
3251                 signed long timeout;
3252
3253                 id.nid = data->ioc_nid;
3254                 id.pid = data->ioc_u32[0];
3255
3256                 /* If timeout is negative then set default of 3 minutes */
3257                 if (((s32)data->ioc_u32[1] <= 0) ||
3258                     data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3259                         timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3260                 else
3261                         timeout = msecs_to_jiffies(data->ioc_u32[1]);
3262
3263                 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3264                                data->ioc_plen1 / sizeof(struct lnet_process_id));
3265
3266                 if (rc < 0)
3267                         return rc;
3268
3269                 data->ioc_count = rc;
3270                 return 0;
3271         }
3272
3273         case IOC_LIBCFS_PING_PEER: {
3274                 struct lnet_ioctl_ping_data *ping = arg;
3275                 struct lnet_peer *lp;
3276                 signed long timeout;
3277
3278                 /* If timeout is negative then set default of 3 minutes */
3279                 if (((s32)ping->op_param) <= 0 ||
3280                     ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3281                         timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3282                 else
3283                         timeout = msecs_to_jiffies(ping->op_param);
3284
3285                 rc = lnet_ping(ping->ping_id, timeout,
3286                                ping->ping_buf,
3287                                ping->ping_count);
3288                 if (rc < 0)
3289                         return rc;
3290
3291                 lp = lnet_find_peer(ping->ping_id.nid);
3292                 if (lp) {
3293                         ping->ping_id.nid = lp->lp_primary_nid;
3294                         ping->mr_info = lnet_peer_is_multi_rail(lp);
3295                 }
3296                 ping->ping_count = rc;
3297                 return 0;
3298         }
3299
3300         case IOC_LIBCFS_DISCOVER: {
3301                 struct lnet_ioctl_ping_data *discover = arg;
3302                 struct lnet_peer *lp;
3303
3304                 rc = lnet_discover(discover->ping_id, discover->op_param,
3305                                    discover->ping_buf,
3306                                    discover->ping_count);
3307                 if (rc < 0)
3308                         return rc;
3309                 lp = lnet_find_peer(discover->ping_id.nid);
3310                 if (lp) {
3311                         discover->ping_id.nid = lp->lp_primary_nid;
3312                         discover->mr_info = lnet_peer_is_multi_rail(lp);
3313                 }
3314
3315                 discover->ping_count = rc;
3316                 return 0;
3317         }
3318
3319         default:
3320                 ni = lnet_net2ni_addref(data->ioc_net);
3321                 if (ni == NULL)
3322                         return -EINVAL;
3323
3324                 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3325                         rc = -EINVAL;
3326                 else
3327                         rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3328
3329                 lnet_ni_decref(ni);
3330                 return rc;
3331         }
3332         /* not reached */
3333 }
3334 EXPORT_SYMBOL(LNetCtl);
3335
3336 void LNetDebugPeer(struct lnet_process_id id)
3337 {
3338         lnet_debug_peer(id.nid);
3339 }
3340 EXPORT_SYMBOL(LNetDebugPeer);
3341
3342 /**
3343  * Determine if the specified peer \a nid is on the local node.
3344  *
3345  * \param nid   peer nid to check
3346  *
3347  * \retval true         If peer NID is on the local node.
3348  * \retval false        If peer NID is not on the local node.
3349  */
3350 bool LNetIsPeerLocal(lnet_nid_t nid)
3351 {
3352         struct lnet_net *net;
3353         struct lnet_ni *ni;
3354         int cpt;
3355
3356         cpt = lnet_net_lock_current();
3357         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3358                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3359                         if (ni->ni_nid == nid) {
3360                                 lnet_net_unlock(cpt);
3361                                 return true;
3362                         }
3363                 }
3364         }
3365         lnet_net_unlock(cpt);
3366
3367         return false;
3368 }
3369 EXPORT_SYMBOL(LNetIsPeerLocal);
3370
3371 /**
3372  * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
3373  * Note that all interfaces share a same PID, as requested by LNetNIInit().
3374  *
3375  * \param index Index of the interface to look up.
3376  * \param id On successful return, this location will hold the
3377  * struct lnet_process_id ID of the interface.
3378  *
3379  * \retval 0 If an interface exists at \a index.
3380  * \retval -ENOENT If no interface has been found.
3381  */
3382 int
3383 LNetGetId(unsigned int index, struct lnet_process_id *id)
3384 {
3385         struct lnet_ni   *ni;
3386         struct lnet_net  *net;
3387         int               cpt;
3388         int               rc = -ENOENT;
3389
3390         LASSERT(the_lnet.ln_refcount > 0);
3391
3392         cpt = lnet_net_lock_current();
3393
3394         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3395                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3396                         if (index-- != 0)
3397                                 continue;
3398
3399                         id->nid = ni->ni_nid;
3400                         id->pid = the_lnet.ln_pid;
3401                         rc = 0;
3402                         break;
3403                 }
3404         }
3405
3406         lnet_net_unlock(cpt);
3407         return rc;
3408 }
3409 EXPORT_SYMBOL(LNetGetId);
3410
3411 static int lnet_ping(struct lnet_process_id id, signed long timeout,
3412                      struct lnet_process_id __user *ids, int n_ids)
3413 {
3414         struct lnet_handle_eq eqh;
3415         struct lnet_handle_md mdh;
3416         struct lnet_event event;
3417         struct lnet_md md = { NULL };
3418         int which;
3419         int unlinked = 0;
3420         int replied = 0;
3421         const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3422         struct lnet_ping_buffer *pbuf;
3423         struct lnet_process_id tmpid;
3424         int i;
3425         int nob;
3426         int rc;
3427         int rc2;
3428         sigset_t blocked;
3429
3430         /* n_ids limit is arbitrary */
3431         if (n_ids <= 0 || n_ids > lnet_interfaces_max || id.nid == LNET_NID_ANY)
3432                 return -EINVAL;
3433
3434         if (id.pid == LNET_PID_ANY)
3435                 id.pid = LNET_PID_LUSTRE;
3436
3437         pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
3438         if (!pbuf)
3439                 return -ENOMEM;
3440
3441         /* NB 2 events max (including any unlink event) */
3442         rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3443         if (rc != 0) {
3444                 CERROR("Can't allocate EQ: %d\n", rc);
3445                 goto fail_ping_buffer_decref;
3446         }
3447
3448         /* initialize md content */
3449         md.start     = &pbuf->pb_info;
3450         md.length    = LNET_PING_INFO_SIZE(n_ids);
3451         md.threshold = 2; /* GET/REPLY */
3452         md.max_size  = 0;
3453         md.options   = LNET_MD_TRUNCATE;
3454         md.user_ptr  = NULL;
3455         md.eq_handle = eqh;
3456
3457         rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3458         if (rc != 0) {
3459                 CERROR("Can't bind MD: %d\n", rc);
3460                 goto fail_free_eq;
3461         }
3462
3463         rc = LNetGet(LNET_NID_ANY, mdh, id,
3464                      LNET_RESERVED_PORTAL,
3465                      LNET_PROTO_PING_MATCHBITS, 0);
3466
3467         if (rc != 0) {
3468                 /* Don't CERROR; this could be deliberate! */
3469                 rc2 = LNetMDUnlink(mdh);
3470                 LASSERT(rc2 == 0);
3471
3472                 /* NB must wait for the UNLINK event below... */
3473                 unlinked = 1;
3474                 timeout = a_long_time;
3475         }
3476
3477         do {
3478                 /* MUST block for unlink to complete */
3479                 if (unlinked)
3480                         blocked = cfs_block_allsigs();
3481
3482                 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3483
3484                 if (unlinked)
3485                         cfs_restore_sigs(blocked);
3486
3487                 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3488                        (rc2 <= 0) ? -1 : event.type,
3489                        (rc2 <= 0) ? -1 : event.status,
3490                        (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3491
3492                 LASSERT(rc2 != -EOVERFLOW);     /* can't miss anything */
3493
3494                 if (rc2 <= 0 || event.status != 0) {
3495                         /* timeout or error */
3496                         if (!replied && rc == 0)
3497                                 rc = (rc2 < 0) ? rc2 :
3498                                      (rc2 == 0) ? -ETIMEDOUT :
3499                                      event.status;
3500
3501                         if (!unlinked) {
3502                                 /* Ensure completion in finite time... */
3503                                 LNetMDUnlink(mdh);
3504                                 /* No assertion (racing with network) */
3505                                 unlinked = 1;
3506                                 timeout = a_long_time;
3507                         } else if (rc2 == 0) {
3508                                 /* timed out waiting for unlink */
3509                                 CWARN("ping %s: late network completion\n",
3510                                       libcfs_id2str(id));
3511                         }
3512                 } else if (event.type == LNET_EVENT_REPLY) {
3513                         replied = 1;
3514                         rc = event.mlength;
3515                 }
3516         } while (rc2 <= 0 || !event.unlinked);
3517
3518         if (!replied) {
3519                 if (rc >= 0)
3520                         CWARN("%s: Unexpected rc >= 0 but no reply!\n",
3521                               libcfs_id2str(id));
3522                 rc = -EIO;
3523                 goto fail_free_eq;
3524         }
3525
3526         nob = rc;
3527         LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
3528
3529         rc = -EPROTO;           /* if I can't parse... */
3530
3531         if (nob < 8) {
3532                 CERROR("%s: ping info too short %d\n",
3533                        libcfs_id2str(id), nob);
3534                 goto fail_free_eq;
3535         }
3536
3537         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
3538                 lnet_swap_pinginfo(pbuf);
3539         } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
3540                 CERROR("%s: Unexpected magic %08x\n",
3541                        libcfs_id2str(id), pbuf->pb_info.pi_magic);
3542                 goto fail_free_eq;
3543         }
3544
3545         if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
3546                 CERROR("%s: ping w/o NI status: 0x%x\n",
3547                        libcfs_id2str(id), pbuf->pb_info.pi_features);
3548                 goto fail_free_eq;
3549         }
3550
3551         if (nob < LNET_PING_INFO_SIZE(0)) {
3552                 CERROR("%s: Short reply %d(%d min)\n",
3553                        libcfs_id2str(id),
3554                        nob, (int)LNET_PING_INFO_SIZE(0));
3555                 goto fail_free_eq;
3556         }
3557
3558         if (pbuf->pb_info.pi_nnis < n_ids)
3559                 n_ids = pbuf->pb_info.pi_nnis;
3560
3561         if (nob < LNET_PING_INFO_SIZE(n_ids)) {
3562                 CERROR("%s: Short reply %d(%d expected)\n",
3563                        libcfs_id2str(id),
3564                        nob, (int)LNET_PING_INFO_SIZE(n_ids));
3565                 goto fail_free_eq;
3566         }
3567
3568         rc = -EFAULT;           /* if I segv in copy_to_user()... */
3569
3570         memset(&tmpid, 0, sizeof(tmpid));
3571         for (i = 0; i < n_ids; i++) {
3572                 tmpid.pid = pbuf->pb_info.pi_pid;
3573                 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
3574                 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
3575                         goto fail_free_eq;
3576         }
3577         rc = pbuf->pb_info.pi_nnis;
3578
3579  fail_free_eq:
3580         rc2 = LNetEQFree(eqh);
3581         if (rc2 != 0)
3582                 CERROR("rc2 %d\n", rc2);
3583         LASSERT(rc2 == 0);
3584
3585  fail_ping_buffer_decref:
3586         lnet_ping_buffer_decref(pbuf);
3587         return rc;
3588 }
3589
3590 static int
3591 lnet_discover(lnet_process_id_t id, __u32 force, lnet_process_id_t __user *ids,
3592               int n_ids)
3593 {
3594         struct lnet_peer_ni *lpni;
3595         struct lnet_peer_ni *p;
3596         struct lnet_peer *lp;
3597         lnet_process_id_t *buf;
3598         int cpt;
3599         int i;
3600         int rc;
3601         int max_intf = lnet_interfaces_max;
3602
3603         if (n_ids <= 0 ||
3604             id.nid == LNET_NID_ANY ||
3605             n_ids > max_intf)
3606                 return -EINVAL;
3607
3608         if (id.pid == LNET_PID_ANY)
3609                 id.pid = LNET_PID_LUSTRE;
3610
3611         LIBCFS_ALLOC(buf, n_ids * sizeof(*buf));
3612         if (!buf)
3613                 return -ENOMEM;
3614
3615         cpt = lnet_net_lock_current();
3616         lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
3617         if (IS_ERR(lpni)) {
3618                 rc = PTR_ERR(lpni);
3619                 goto out;
3620         }
3621
3622         /*
3623          * Clearing the NIDS_UPTODATE flag ensures the peer will
3624          * be discovered, provided discovery has not been disabled.
3625          */
3626         lp = lpni->lpni_peer_net->lpn_peer;
3627         spin_lock(&lp->lp_lock);
3628         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3629         /* If the force flag is set, force a PING and PUSH as well. */
3630         if (force)
3631                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
3632         spin_unlock(&lp->lp_lock);
3633         rc = lnet_discover_peer_locked(lpni, cpt, true);
3634         if (rc)
3635                 goto out_decref;
3636
3637         /* Peer may have changed. */
3638         lp = lpni->lpni_peer_net->lpn_peer;
3639         if (lp->lp_nnis < n_ids)
3640                 n_ids = lp->lp_nnis;
3641
3642         i = 0;
3643         p = NULL;
3644         while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
3645                 buf[i].pid = id.pid;
3646                 buf[i].nid = p->lpni_nid;
3647                 if (++i >= n_ids)
3648                         break;
3649         }
3650
3651         lnet_net_unlock(cpt);
3652
3653         rc = -EFAULT;
3654         if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
3655                 goto out_relock;
3656         rc = n_ids;
3657 out_relock:
3658         lnet_net_lock(cpt);
3659 out_decref:
3660         lnet_peer_ni_decref_locked(lpni);
3661 out:
3662         lnet_net_unlock(cpt);
3663
3664         return rc;
3665 }