Whamcloud - gitweb
LU-9918 lnet: decref on peer after use
[fs/lustre-release.git] / lnet / lnet / api-ni.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
36 #include <linux/moduleparam.h>
37
38 #include <lnet/lib-lnet.h>
39
40 #define D_LNI D_CONSOLE
41
42 /*
43  * initialize ln_api_mutex statically, since it needs to be used in
44  * discovery_set callback. That module parameter callback can be called
45  * before module init completes. The mutex needs to be ready for use then.
46  */
47 struct lnet the_lnet = {
48         .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
49 };              /* THE state of the network */
50 EXPORT_SYMBOL(the_lnet);
51
52 static char *ip2nets = "";
53 module_param(ip2nets, charp, 0444);
54 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
55
56 static char *networks = "";
57 module_param(networks, charp, 0444);
58 MODULE_PARM_DESC(networks, "local networks");
59
60 static char *routes = "";
61 module_param(routes, charp, 0444);
62 MODULE_PARM_DESC(routes, "routes to non-local networks");
63
64 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
65 module_param(rnet_htable_size, int, 0444);
66 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
67
68 static int use_tcp_bonding = false;
69 module_param(use_tcp_bonding, int, 0444);
70 MODULE_PARM_DESC(use_tcp_bonding,
71                  "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
72
73 unsigned int lnet_numa_range = 0;
74 module_param(lnet_numa_range, uint, 0444);
75 MODULE_PARM_DESC(lnet_numa_range,
76                 "NUMA range to consider during Multi-Rail selection");
77
78 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
79 static int intf_max_set(const char *val, struct kernel_param *kp);
80 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
81                   &lnet_interfaces_max, S_IRUGO|S_IWUSR);
82 MODULE_PARM_DESC(lnet_interfaces_max,
83                 "Maximum number of interfaces in a node.");
84
85 unsigned lnet_peer_discovery_disabled = 0;
86 static int discovery_set(const char *val, struct kernel_param *kp);
87 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
88                   &lnet_peer_discovery_disabled, S_IRUGO|S_IWUSR);
89 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
90                 "Set to 1 to disable peer discovery on this node.");
91
92 /*
93  * This sequence number keeps track of how many times DLC was used to
94  * update the local NIs. It is incremented when a NI is added or
95  * removed and checked when sending a message to determine if there is
96  * a need to re-run the selection algorithm. See lnet_select_pathway()
97  * for more details on its usage.
98  */
99 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
100
101 static int lnet_ping(struct lnet_process_id id, signed long timeout,
102                      struct lnet_process_id __user *ids, int n_ids);
103
104 static int lnet_discover(lnet_process_id_t id, __u32 force,
105                          lnet_process_id_t __user *ids, int n_ids);
106
107 static int
108 discovery_set(const char *val, struct kernel_param *kp)
109 {
110         int rc;
111         unsigned *discovery = (unsigned *)kp->arg;
112         unsigned long value;
113         struct lnet_ping_buffer *pbuf;
114
115         rc = kstrtoul(val, 0, &value);
116         if (rc) {
117                 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
118                 return rc;
119         }
120
121         value = (value) ? 1 : 0;
122
123         /*
124          * The purpose of locking the api_mutex here is to ensure that
125          * the correct value ends up stored properly.
126          */
127         mutex_lock(&the_lnet.ln_api_mutex);
128
129         if (value == *discovery) {
130                 mutex_unlock(&the_lnet.ln_api_mutex);
131                 return 0;
132         }
133
134         *discovery = value;
135
136         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
137                 mutex_unlock(&the_lnet.ln_api_mutex);
138                 return 0;
139         }
140
141         /* tell peers that discovery setting has changed */
142         lnet_net_lock(LNET_LOCK_EX);
143         pbuf = the_lnet.ln_ping_target;
144         if (value)
145                 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
146         else
147                 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
148         lnet_net_unlock(LNET_LOCK_EX);
149
150         lnet_push_update_to_peers(1);
151
152         mutex_unlock(&the_lnet.ln_api_mutex);
153
154         return 0;
155 }
156
157 static int
158 intf_max_set(const char *val, struct kernel_param *kp)
159 {
160         int value, rc;
161
162         rc = kstrtoint(val, 0, &value);
163         if (rc) {
164                 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
165                 return rc;
166         }
167
168         if (value < LNET_INTERFACES_MIN) {
169                 CWARN("max interfaces provided are too small, setting to %d\n",
170                       LNET_INTERFACES_MAX_DEFAULT);
171                 value = LNET_INTERFACES_MAX_DEFAULT;
172         }
173
174         *(int *)kp->arg = value;
175
176         return 0;
177 }
178
179 static char *
180 lnet_get_routes(void)
181 {
182         return routes;
183 }
184
185 static char *
186 lnet_get_networks(void)
187 {
188         char   *nets;
189         int     rc;
190
191         if (*networks != 0 && *ip2nets != 0) {
192                 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
193                                    "'ip2nets' but not both at once\n");
194                 return NULL;
195         }
196
197         if (*ip2nets != 0) {
198                 rc = lnet_parse_ip2nets(&nets, ip2nets);
199                 return (rc == 0) ? nets : NULL;
200         }
201
202         if (*networks != 0)
203                 return networks;
204
205         return "tcp";
206 }
207
208 static void
209 lnet_init_locks(void)
210 {
211         spin_lock_init(&the_lnet.ln_eq_wait_lock);
212         init_waitqueue_head(&the_lnet.ln_eq_waitq);
213         init_waitqueue_head(&the_lnet.ln_rc_waitq);
214         mutex_init(&the_lnet.ln_lnd_mutex);
215 }
216
217 static void
218 lnet_fini_locks(void)
219 {
220 }
221
222 struct kmem_cache *lnet_mes_cachep;        /* MEs kmem_cache */
223 struct kmem_cache *lnet_small_mds_cachep;  /* <= LNET_SMALL_MD_SIZE bytes
224                                             *  MDs kmem_cache */
225
226 static int
227 lnet_descriptor_setup(void)
228 {
229         /* create specific kmem_cache for MEs and small MDs (i.e., originally
230          * allocated in <size-xxx> kmem_cache).
231          */
232         lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
233                                             0, 0, NULL);
234         if (!lnet_mes_cachep)
235                 return -ENOMEM;
236
237         lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
238                                                   LNET_SMALL_MD_SIZE, 0, 0,
239                                                   NULL);
240         if (!lnet_small_mds_cachep)
241                 return -ENOMEM;
242
243         return 0;
244 }
245
246 static void
247 lnet_descriptor_cleanup(void)
248 {
249
250         if (lnet_small_mds_cachep) {
251                 kmem_cache_destroy(lnet_small_mds_cachep);
252                 lnet_small_mds_cachep = NULL;
253         }
254
255         if (lnet_mes_cachep) {
256                 kmem_cache_destroy(lnet_mes_cachep);
257                 lnet_mes_cachep = NULL;
258         }
259 }
260
261 static int
262 lnet_create_remote_nets_table(void)
263 {
264         int               i;
265         struct list_head *hash;
266
267         LASSERT(the_lnet.ln_remote_nets_hash == NULL);
268         LASSERT(the_lnet.ln_remote_nets_hbits > 0);
269         LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
270         if (hash == NULL) {
271                 CERROR("Failed to create remote nets hash table\n");
272                 return -ENOMEM;
273         }
274
275         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
276                 INIT_LIST_HEAD(&hash[i]);
277         the_lnet.ln_remote_nets_hash = hash;
278         return 0;
279 }
280
281 static void
282 lnet_destroy_remote_nets_table(void)
283 {
284         int i;
285
286         if (the_lnet.ln_remote_nets_hash == NULL)
287                 return;
288
289         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
290                 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
291
292         LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
293                     LNET_REMOTE_NETS_HASH_SIZE *
294                     sizeof(the_lnet.ln_remote_nets_hash[0]));
295         the_lnet.ln_remote_nets_hash = NULL;
296 }
297
298 static void
299 lnet_destroy_locks(void)
300 {
301         if (the_lnet.ln_res_lock != NULL) {
302                 cfs_percpt_lock_free(the_lnet.ln_res_lock);
303                 the_lnet.ln_res_lock = NULL;
304         }
305
306         if (the_lnet.ln_net_lock != NULL) {
307                 cfs_percpt_lock_free(the_lnet.ln_net_lock);
308                 the_lnet.ln_net_lock = NULL;
309         }
310
311         lnet_fini_locks();
312 }
313
314 static int
315 lnet_create_locks(void)
316 {
317         lnet_init_locks();
318
319         the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
320         if (the_lnet.ln_res_lock == NULL)
321                 goto failed;
322
323         the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
324         if (the_lnet.ln_net_lock == NULL)
325                 goto failed;
326
327         return 0;
328
329  failed:
330         lnet_destroy_locks();
331         return -ENOMEM;
332 }
333
334 static void lnet_assert_wire_constants(void)
335 {
336         /* Wire protocol assertions generated by 'wirecheck'
337          * running on Linux robert.bartonsoftware.com 2.6.8-1.521
338          * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
339          * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
340
341         /* Constants... */
342         CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
343         CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
344         CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
345         CLASSERT(LNET_MSG_ACK == 0);
346         CLASSERT(LNET_MSG_PUT == 1);
347         CLASSERT(LNET_MSG_GET == 2);
348         CLASSERT(LNET_MSG_REPLY == 3);
349         CLASSERT(LNET_MSG_HELLO == 4);
350
351         /* Checks for struct lnet_handle_wire */
352         CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
353         CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
354         CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
355         CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
356         CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
357
358         /* Checks for struct struct lnet_magicversion */
359         CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
360         CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
361         CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
362         CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
363         CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
364         CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
365         CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
366
367         /* Checks for struct struct lnet_hdr */
368         CLASSERT((int)sizeof(struct lnet_hdr) == 72);
369         CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
370         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
371         CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
372         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
373         CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
374         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
375         CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
376         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
377         CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
378         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
379         CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
380         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
381         CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
382         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
383
384         /* Ack */
385         CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
386         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
387         CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
388         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
389         CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
390         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
391
392         /* Put */
393         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
394         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
395         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
396         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
397         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
398         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
399         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
400         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
401         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
402         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
403
404         /* Get */
405         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
406         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
407         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
408         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
409         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
410         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
411         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
412         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
413         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
414         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
415
416         /* Reply */
417         CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
418         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
419
420         /* Hello */
421         CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
422         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
423         CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
424         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
425
426         /* Checks for struct lnet_ni_status and related constants */
427         CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
428         CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
429         CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
430
431         /* Checks for struct lnet_ni_status */
432         CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
433         CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
434         CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
435         CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
436         CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
437         CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
438         CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
439
440         /* Checks for struct lnet_ping_info and related constants */
441         CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
442         CLASSERT(LNET_PING_FEAT_INVAL == 0);
443         CLASSERT(LNET_PING_FEAT_BASE == 1);
444         CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
445         CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
446         CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
447         CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
448         CLASSERT(LNET_PING_FEAT_BITS == 31);
449
450         /* Checks for struct lnet_ping_info */
451         CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
452         CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
453         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
454         CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
455         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
456         CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
457         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
458         CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
459         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
460         CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
461         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
462 }
463
464 static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
465 {
466         struct lnet_lnd *lnd;
467         struct list_head *tmp;
468
469         /* holding lnd mutex */
470         list_for_each(tmp, &the_lnet.ln_lnds) {
471                 lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
472
473                 if (lnd->lnd_type == type)
474                         return lnd;
475         }
476         return NULL;
477 }
478
479 void
480 lnet_register_lnd(struct lnet_lnd *lnd)
481 {
482         mutex_lock(&the_lnet.ln_lnd_mutex);
483
484         LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
485         LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
486
487         list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
488         lnd->lnd_refcount = 0;
489
490         CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
491
492         mutex_unlock(&the_lnet.ln_lnd_mutex);
493 }
494 EXPORT_SYMBOL(lnet_register_lnd);
495
496 void
497 lnet_unregister_lnd(struct lnet_lnd *lnd)
498 {
499         mutex_lock(&the_lnet.ln_lnd_mutex);
500
501         LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
502         LASSERT(lnd->lnd_refcount == 0);
503
504         list_del(&lnd->lnd_list);
505         CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
506
507         mutex_unlock(&the_lnet.ln_lnd_mutex);
508 }
509 EXPORT_SYMBOL(lnet_unregister_lnd);
510
511 void
512 lnet_counters_get(struct lnet_counters *counters)
513 {
514         struct lnet_counters *ctr;
515         int             i;
516
517         memset(counters, 0, sizeof(*counters));
518
519         lnet_net_lock(LNET_LOCK_EX);
520
521         cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
522                 counters->msgs_max     += ctr->msgs_max;
523                 counters->msgs_alloc   += ctr->msgs_alloc;
524                 counters->errors       += ctr->errors;
525                 counters->send_count   += ctr->send_count;
526                 counters->recv_count   += ctr->recv_count;
527                 counters->route_count  += ctr->route_count;
528                 counters->drop_count   += ctr->drop_count;
529                 counters->send_length  += ctr->send_length;
530                 counters->recv_length  += ctr->recv_length;
531                 counters->route_length += ctr->route_length;
532                 counters->drop_length  += ctr->drop_length;
533
534         }
535         lnet_net_unlock(LNET_LOCK_EX);
536 }
537 EXPORT_SYMBOL(lnet_counters_get);
538
539 void
540 lnet_counters_reset(void)
541 {
542         struct lnet_counters *counters;
543         int             i;
544
545         lnet_net_lock(LNET_LOCK_EX);
546
547         cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
548                 memset(counters, 0, sizeof(struct lnet_counters));
549
550         lnet_net_unlock(LNET_LOCK_EX);
551 }
552
553 static char *
554 lnet_res_type2str(int type)
555 {
556         switch (type) {
557         default:
558                 LBUG();
559         case LNET_COOKIE_TYPE_MD:
560                 return "MD";
561         case LNET_COOKIE_TYPE_ME:
562                 return "ME";
563         case LNET_COOKIE_TYPE_EQ:
564                 return "EQ";
565         }
566 }
567
568 static void
569 lnet_res_container_cleanup(struct lnet_res_container *rec)
570 {
571         int     count = 0;
572
573         if (rec->rec_type == 0) /* not set yet, it's uninitialized */
574                 return;
575
576         while (!list_empty(&rec->rec_active)) {
577                 struct list_head *e = rec->rec_active.next;
578
579                 list_del_init(e);
580                 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
581                         lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
582
583                 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
584                         lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
585
586                 } else { /* NB: Active MEs should be attached on portals */
587                         LBUG();
588                 }
589                 count++;
590         }
591
592         if (count > 0) {
593                 /* Found alive MD/ME/EQ, user really should unlink/free
594                  * all of them before finalize LNet, but if someone didn't,
595                  * we have to recycle garbage for him */
596                 CERROR("%d active elements on exit of %s container\n",
597                        count, lnet_res_type2str(rec->rec_type));
598         }
599
600         if (rec->rec_lh_hash != NULL) {
601                 LIBCFS_FREE(rec->rec_lh_hash,
602                             LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
603                 rec->rec_lh_hash = NULL;
604         }
605
606         rec->rec_type = 0; /* mark it as finalized */
607 }
608
609 static int
610 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
611 {
612         int     rc = 0;
613         int     i;
614
615         LASSERT(rec->rec_type == 0);
616
617         rec->rec_type = type;
618         INIT_LIST_HEAD(&rec->rec_active);
619
620         rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
621
622         /* Arbitrary choice of hash table size */
623         LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
624                          LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
625         if (rec->rec_lh_hash == NULL) {
626                 rc = -ENOMEM;
627                 goto out;
628         }
629
630         for (i = 0; i < LNET_LH_HASH_SIZE; i++)
631                 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
632
633         return 0;
634
635 out:
636         CERROR("Failed to setup %s resource container\n",
637                lnet_res_type2str(type));
638         lnet_res_container_cleanup(rec);
639         return rc;
640 }
641
642 static void
643 lnet_res_containers_destroy(struct lnet_res_container **recs)
644 {
645         struct lnet_res_container       *rec;
646         int                             i;
647
648         cfs_percpt_for_each(rec, i, recs)
649                 lnet_res_container_cleanup(rec);
650
651         cfs_percpt_free(recs);
652 }
653
654 static struct lnet_res_container **
655 lnet_res_containers_create(int type)
656 {
657         struct lnet_res_container       **recs;
658         struct lnet_res_container       *rec;
659         int                             rc;
660         int                             i;
661
662         recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
663         if (recs == NULL) {
664                 CERROR("Failed to allocate %s resource containers\n",
665                        lnet_res_type2str(type));
666                 return NULL;
667         }
668
669         cfs_percpt_for_each(rec, i, recs) {
670                 rc = lnet_res_container_setup(rec, i, type);
671                 if (rc != 0) {
672                         lnet_res_containers_destroy(recs);
673                         return NULL;
674                 }
675         }
676
677         return recs;
678 }
679
680 struct lnet_libhandle *
681 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
682 {
683         /* ALWAYS called with lnet_res_lock held */
684         struct list_head        *head;
685         struct lnet_libhandle   *lh;
686         unsigned int            hash;
687
688         if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
689                 return NULL;
690
691         hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
692         head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
693
694         list_for_each_entry(lh, head, lh_hash_chain) {
695                 if (lh->lh_cookie == cookie)
696                         return lh;
697         }
698
699         return NULL;
700 }
701
702 void
703 lnet_res_lh_initialize(struct lnet_res_container *rec,
704                        struct lnet_libhandle *lh)
705 {
706         /* ALWAYS called with lnet_res_lock held */
707         unsigned int    ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
708         unsigned int    hash;
709
710         lh->lh_cookie = rec->rec_lh_cookie;
711         rec->rec_lh_cookie += 1 << ibits;
712
713         hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
714
715         list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
716 }
717
718 static int lnet_unprepare(void);
719
720 static int
721 lnet_prepare(lnet_pid_t requested_pid)
722 {
723         /* Prepare to bring up the network */
724         struct lnet_res_container **recs;
725         int                       rc = 0;
726
727         if (requested_pid == LNET_PID_ANY) {
728                 /* Don't instantiate LNET just for me */
729                 return -ENETDOWN;
730         }
731
732         LASSERT(the_lnet.ln_refcount == 0);
733
734         the_lnet.ln_routing = 0;
735
736         LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
737         the_lnet.ln_pid = requested_pid;
738
739         INIT_LIST_HEAD(&the_lnet.ln_test_peers);
740         INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
741         INIT_LIST_HEAD(&the_lnet.ln_nets);
742         INIT_LIST_HEAD(&the_lnet.ln_routers);
743         INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
744         INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
745         INIT_LIST_HEAD(&the_lnet.ln_dc_request);
746         INIT_LIST_HEAD(&the_lnet.ln_dc_working);
747         INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
748         init_waitqueue_head(&the_lnet.ln_dc_waitq);
749
750         rc = lnet_descriptor_setup();
751         if (rc != 0)
752                 goto failed;
753
754         rc = lnet_create_remote_nets_table();
755         if (rc != 0)
756                 goto failed;
757
758         /*
759          * NB the interface cookie in wire handles guards against delayed
760          * replies and ACKs appearing valid after reboot.
761          */
762         the_lnet.ln_interface_cookie = ktime_get_real_ns();
763
764         the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
765                                                 sizeof(struct lnet_counters));
766         if (the_lnet.ln_counters == NULL) {
767                 CERROR("Failed to allocate counters for LNet\n");
768                 rc = -ENOMEM;
769                 goto failed;
770         }
771
772         rc = lnet_peer_tables_create();
773         if (rc != 0)
774                 goto failed;
775
776         rc = lnet_msg_containers_create();
777         if (rc != 0)
778                 goto failed;
779
780         rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
781                                       LNET_COOKIE_TYPE_EQ);
782         if (rc != 0)
783                 goto failed;
784
785         recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
786         if (recs == NULL) {
787                 rc = -ENOMEM;
788                 goto failed;
789         }
790
791         the_lnet.ln_me_containers = recs;
792
793         recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
794         if (recs == NULL) {
795                 rc = -ENOMEM;
796                 goto failed;
797         }
798
799         the_lnet.ln_md_containers = recs;
800
801         rc = lnet_portals_create();
802         if (rc != 0) {
803                 CERROR("Failed to create portals for LNet: %d\n", rc);
804                 goto failed;
805         }
806
807         return 0;
808
809  failed:
810         lnet_unprepare();
811         return rc;
812 }
813
814 static int
815 lnet_unprepare (void)
816 {
817         /* NB no LNET_LOCK since this is the last reference.  All LND instances
818          * have shut down already, so it is safe to unlink and free all
819          * descriptors, even those that appear committed to a network op (eg MD
820          * with non-zero pending count) */
821
822         lnet_fail_nid(LNET_NID_ANY, 0);
823
824         LASSERT(the_lnet.ln_refcount == 0);
825         LASSERT(list_empty(&the_lnet.ln_test_peers));
826         LASSERT(list_empty(&the_lnet.ln_nets));
827
828         lnet_portals_destroy();
829
830         if (the_lnet.ln_md_containers != NULL) {
831                 lnet_res_containers_destroy(the_lnet.ln_md_containers);
832                 the_lnet.ln_md_containers = NULL;
833         }
834
835         if (the_lnet.ln_me_containers != NULL) {
836                 lnet_res_containers_destroy(the_lnet.ln_me_containers);
837                 the_lnet.ln_me_containers = NULL;
838         }
839
840         lnet_res_container_cleanup(&the_lnet.ln_eq_container);
841
842         lnet_msg_containers_destroy();
843         lnet_peer_uninit();
844         lnet_rtrpools_free(0);
845
846         if (the_lnet.ln_counters != NULL) {
847                 cfs_percpt_free(the_lnet.ln_counters);
848                 the_lnet.ln_counters = NULL;
849         }
850         lnet_destroy_remote_nets_table();
851         lnet_descriptor_cleanup();
852
853         return 0;
854 }
855
856 struct lnet_ni  *
857 lnet_net2ni_locked(__u32 net_id, int cpt)
858 {
859         struct lnet_ni   *ni;
860         struct lnet_net  *net;
861
862         LASSERT(cpt != LNET_LOCK_EX);
863
864         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
865                 if (net->net_id == net_id) {
866                         ni = list_entry(net->net_ni_list.next, struct lnet_ni,
867                                         ni_netlist);
868                         return ni;
869                 }
870         }
871
872         return NULL;
873 }
874
875 struct lnet_ni *
876 lnet_net2ni_addref(__u32 net)
877 {
878         struct lnet_ni *ni;
879
880         lnet_net_lock(0);
881         ni = lnet_net2ni_locked(net, 0);
882         if (ni)
883                 lnet_ni_addref_locked(ni, 0);
884         lnet_net_unlock(0);
885
886         return ni;
887 }
888 EXPORT_SYMBOL(lnet_net2ni_addref);
889
890 struct lnet_net *
891 lnet_get_net_locked(__u32 net_id)
892 {
893         struct lnet_net  *net;
894
895         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
896                 if (net->net_id == net_id)
897                         return net;
898         }
899
900         return NULL;
901 }
902
903 unsigned int
904 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
905 {
906         __u64           key = nid;
907         unsigned int    val;
908
909         LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
910
911         if (number == 1)
912                 return 0;
913
914         val = hash_long(key, LNET_CPT_BITS);
915         /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
916         if (val < number)
917                 return val;
918
919         return (unsigned int)(key + val + (val >> 1)) % number;
920 }
921
922 int
923 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
924 {
925         struct lnet_net *net;
926
927         /* must called with hold of lnet_net_lock */
928         if (LNET_CPT_NUMBER == 1)
929                 return 0; /* the only one */
930
931         /*
932          * If NI is provided then use the CPT identified in the NI cpt
933          * list if one exists. If one doesn't exist, then that NI is
934          * associated with all CPTs and it follows that the net it belongs
935          * to is implicitly associated with all CPTs, so just hash the nid
936          * and return that.
937          */
938         if (ni != NULL) {
939                 if (ni->ni_cpts != NULL)
940                         return ni->ni_cpts[lnet_nid_cpt_hash(nid,
941                                                              ni->ni_ncpts)];
942                 else
943                         return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
944         }
945
946         /* no NI provided so look at the net */
947         net = lnet_get_net_locked(LNET_NIDNET(nid));
948
949         if (net != NULL && net->net_cpts != NULL) {
950                 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
951         }
952
953         return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
954 }
955
956 int
957 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
958 {
959         int     cpt;
960         int     cpt2;
961
962         if (LNET_CPT_NUMBER == 1)
963                 return 0; /* the only one */
964
965         cpt = lnet_net_lock_current();
966
967         cpt2 = lnet_cpt_of_nid_locked(nid, ni);
968
969         lnet_net_unlock(cpt);
970
971         return cpt2;
972 }
973 EXPORT_SYMBOL(lnet_cpt_of_nid);
974
975 int
976 lnet_islocalnet(__u32 net_id)
977 {
978         struct lnet_net *net;
979         int             cpt;
980         bool            local;
981
982         cpt = lnet_net_lock_current();
983
984         net = lnet_get_net_locked(net_id);
985
986         local = net != NULL;
987
988         lnet_net_unlock(cpt);
989
990         return local;
991 }
992
993 bool
994 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
995 {
996         if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
997             ni->ni_state == LNET_NI_STATE_DEGRADED)
998                 return true;
999
1000         return false;
1001 }
1002
1003 struct lnet_ni  *
1004 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1005 {
1006         struct lnet_net  *net;
1007         struct lnet_ni   *ni;
1008
1009         LASSERT(cpt != LNET_LOCK_EX);
1010
1011         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1012                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1013                         if (ni->ni_nid == nid)
1014                                 return ni;
1015                 }
1016         }
1017
1018         return NULL;
1019 }
1020
1021 struct lnet_ni *
1022 lnet_nid2ni_addref(lnet_nid_t nid)
1023 {
1024         struct lnet_ni *ni;
1025
1026         lnet_net_lock(0);
1027         ni = lnet_nid2ni_locked(nid, 0);
1028         if (ni)
1029                 lnet_ni_addref_locked(ni, 0);
1030         lnet_net_unlock(0);
1031
1032         return ni;
1033 }
1034 EXPORT_SYMBOL(lnet_nid2ni_addref);
1035
1036 int
1037 lnet_islocalnid(lnet_nid_t nid)
1038 {
1039         struct lnet_ni  *ni;
1040         int             cpt;
1041
1042         cpt = lnet_net_lock_current();
1043         ni = lnet_nid2ni_locked(nid, cpt);
1044         lnet_net_unlock(cpt);
1045
1046         return ni != NULL;
1047 }
1048
1049 int
1050 lnet_count_acceptor_nets(void)
1051 {
1052         /* Return the # of NIs that need the acceptor. */
1053         int              count = 0;
1054         struct lnet_net  *net;
1055         int              cpt;
1056
1057         cpt = lnet_net_lock_current();
1058         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1059                 /* all socklnd type networks should have the acceptor
1060                  * thread started */
1061                 if (net->net_lnd->lnd_accept != NULL)
1062                         count++;
1063         }
1064
1065         lnet_net_unlock(cpt);
1066
1067         return count;
1068 }
1069
1070 struct lnet_ping_buffer *
1071 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1072 {
1073         struct lnet_ping_buffer *pbuf;
1074
1075         LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1076         if (pbuf) {
1077                 pbuf->pb_nnis = nnis;
1078                 atomic_set(&pbuf->pb_refcnt, 1);
1079         }
1080
1081         return pbuf;
1082 }
1083
1084 void
1085 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1086 {
1087         LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
1088         LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1089 }
1090
1091 static struct lnet_ping_buffer *
1092 lnet_ping_target_create(int nnis)
1093 {
1094         struct lnet_ping_buffer *pbuf;
1095
1096         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1097         if (pbuf == NULL) {
1098                 CERROR("Can't allocate ping source [%d]\n", nnis);
1099                 return NULL;
1100         }
1101
1102         pbuf->pb_info.pi_nnis = nnis;
1103         pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1104         pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1105         pbuf->pb_info.pi_features =
1106                 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1107
1108         return pbuf;
1109 }
1110
1111 static inline int
1112 lnet_get_net_ni_count_locked(struct lnet_net *net)
1113 {
1114         struct lnet_ni  *ni;
1115         int             count = 0;
1116
1117         list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1118                 count++;
1119
1120         return count;
1121 }
1122
1123 static inline int
1124 lnet_get_net_ni_count_pre(struct lnet_net *net)
1125 {
1126         struct lnet_ni  *ni;
1127         int             count = 0;
1128
1129         list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1130                 count++;
1131
1132         return count;
1133 }
1134
1135 static inline int
1136 lnet_get_ni_count(void)
1137 {
1138         struct lnet_ni  *ni;
1139         struct lnet_net *net;
1140         int             count = 0;
1141
1142         lnet_net_lock(0);
1143
1144         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1145                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1146                         count++;
1147         }
1148
1149         lnet_net_unlock(0);
1150
1151         return count;
1152 }
1153
1154 int
1155 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1156 {
1157         if (!pinfo)
1158                 return -EINVAL;
1159         if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1160                 return -EPROTO;
1161         if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1162                 return -EPROTO;
1163         /* Loopback is guaranteed to be present */
1164         if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1165                 return -ERANGE;
1166         if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1167                 return -EPROTO;
1168         return 0;
1169 }
1170
1171 static void
1172 lnet_ping_target_destroy(void)
1173 {
1174         struct lnet_net *net;
1175         struct lnet_ni  *ni;
1176
1177         lnet_net_lock(LNET_LOCK_EX);
1178
1179         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1180                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1181                         lnet_ni_lock(ni);
1182                         ni->ni_status = NULL;
1183                         lnet_ni_unlock(ni);
1184                 }
1185         }
1186
1187         lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1188         the_lnet.ln_ping_target = NULL;
1189
1190         lnet_net_unlock(LNET_LOCK_EX);
1191 }
1192
1193 static void
1194 lnet_ping_target_event_handler(struct lnet_event *event)
1195 {
1196         struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1197
1198         if (event->unlinked)
1199                 lnet_ping_buffer_decref(pbuf);
1200 }
1201
1202 static int
1203 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1204                        struct lnet_handle_md *ping_mdh,
1205                        int ni_count, bool set_eq)
1206 {
1207         struct lnet_process_id id = {
1208                 .nid = LNET_NID_ANY,
1209                 .pid = LNET_PID_ANY
1210         };
1211         struct lnet_handle_me me_handle;
1212         struct lnet_md md = { NULL };
1213         int rc, rc2;
1214
1215         if (set_eq) {
1216                 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1217                                  &the_lnet.ln_ping_target_eq);
1218                 if (rc != 0) {
1219                         CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1220                         return rc;
1221                 }
1222         }
1223
1224         *ppbuf = lnet_ping_target_create(ni_count);
1225         if (*ppbuf == NULL) {
1226                 rc = -ENOMEM;
1227                 goto fail_free_eq;
1228         }
1229
1230         /* Ping target ME/MD */
1231         rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1232                           LNET_PROTO_PING_MATCHBITS, 0,
1233                           LNET_UNLINK, LNET_INS_AFTER,
1234                           &me_handle);
1235         if (rc != 0) {
1236                 CERROR("Can't create ping target ME: %d\n", rc);
1237                 goto fail_decref_ping_buffer;
1238         }
1239
1240         /* initialize md content */
1241         md.start     = &(*ppbuf)->pb_info;
1242         md.length    = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1243         md.threshold = LNET_MD_THRESH_INF;
1244         md.max_size  = 0;
1245         md.options   = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1246                        LNET_MD_MANAGE_REMOTE;
1247         md.eq_handle = the_lnet.ln_ping_target_eq;
1248         md.user_ptr  = *ppbuf;
1249
1250         rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1251         if (rc != 0) {
1252                 CERROR("Can't attach ping target MD: %d\n", rc);
1253                 goto fail_unlink_ping_me;
1254         }
1255         lnet_ping_buffer_addref(*ppbuf);
1256
1257         return 0;
1258
1259 fail_unlink_ping_me:
1260         rc2 = LNetMEUnlink(me_handle);
1261         LASSERT(rc2 == 0);
1262 fail_decref_ping_buffer:
1263         LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1264         lnet_ping_buffer_decref(*ppbuf);
1265         *ppbuf = NULL;
1266 fail_free_eq:
1267         if (set_eq) {
1268                 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1269                 LASSERT(rc2 == 0);
1270         }
1271         return rc;
1272 }
1273
1274 static void
1275 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1276                     struct lnet_handle_md *ping_mdh)
1277 {
1278         sigset_t        blocked = cfs_block_allsigs();
1279
1280         LNetMDUnlink(*ping_mdh);
1281         LNetInvalidateMDHandle(ping_mdh);
1282
1283         /* NB the MD could be busy; this just starts the unlink */
1284         while (lnet_ping_buffer_numref(pbuf) > 1) {
1285                 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1286                 set_current_state(TASK_UNINTERRUPTIBLE);
1287                 schedule_timeout(cfs_time_seconds(1));
1288         }
1289
1290         cfs_restore_sigs(blocked);
1291 }
1292
1293 static void
1294 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1295 {
1296         struct lnet_ni          *ni;
1297         struct lnet_net         *net;
1298         struct lnet_ni_status *ns;
1299         int                     i;
1300         int                     rc;
1301
1302         i = 0;
1303         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1304                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1305                         LASSERT(i < pbuf->pb_nnis);
1306
1307                         ns = &pbuf->pb_info.pi_ni[i];
1308
1309                         ns->ns_nid = ni->ni_nid;
1310
1311                         lnet_ni_lock(ni);
1312                         ns->ns_status = (ni->ni_status != NULL) ?
1313                                          ni->ni_status->ns_status :
1314                                                 LNET_NI_STATUS_UP;
1315                         ni->ni_status = ns;
1316                         lnet_ni_unlock(ni);
1317
1318                         i++;
1319                 }
1320         }
1321         /*
1322          * We (ab)use the ns_status of the loopback interface to
1323          * transmit the sequence number. The first interface listed
1324          * must be the loopback interface.
1325          */
1326         rc = lnet_ping_info_validate(&pbuf->pb_info);
1327         if (rc) {
1328                 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1329                 LBUG();
1330         }
1331         LNET_PING_BUFFER_SEQNO(pbuf) =
1332                 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1333 }
1334
1335 static void
1336 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1337                         struct lnet_handle_md ping_mdh)
1338 {
1339         struct lnet_ping_buffer *old_pbuf = NULL;
1340         struct lnet_handle_md old_ping_md;
1341
1342         /* switch the NIs to point to the new ping info created */
1343         lnet_net_lock(LNET_LOCK_EX);
1344
1345         if (!the_lnet.ln_routing)
1346                 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1347         if (!lnet_peer_discovery_disabled)
1348                 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1349
1350         /* Ensure only known feature bits have been set. */
1351         LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1352         LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1353
1354         lnet_ping_target_install_locked(pbuf);
1355
1356         if (the_lnet.ln_ping_target) {
1357                 old_pbuf = the_lnet.ln_ping_target;
1358                 old_ping_md = the_lnet.ln_ping_target_md;
1359         }
1360         the_lnet.ln_ping_target_md = ping_mdh;
1361         the_lnet.ln_ping_target = pbuf;
1362
1363         lnet_net_unlock(LNET_LOCK_EX);
1364
1365         if (old_pbuf) {
1366                 /* unlink and free the old ping info */
1367                 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1368                 lnet_ping_buffer_decref(old_pbuf);
1369         }
1370
1371         lnet_push_update_to_peers(0);
1372 }
1373
1374 static void
1375 lnet_ping_target_fini(void)
1376 {
1377         int             rc;
1378
1379         lnet_ping_md_unlink(the_lnet.ln_ping_target,
1380                             &the_lnet.ln_ping_target_md);
1381
1382         rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1383         LASSERT(rc == 0);
1384
1385         lnet_ping_target_destroy();
1386 }
1387
1388 /* Resize the push target. */
1389 int lnet_push_target_resize(void)
1390 {
1391         lnet_process_id_t id = { LNET_NID_ANY, LNET_PID_ANY };
1392         lnet_md_t md = { NULL };
1393         lnet_handle_me_t meh;
1394         lnet_handle_md_t mdh;
1395         lnet_handle_md_t old_mdh;
1396         struct lnet_ping_buffer *pbuf;
1397         struct lnet_ping_buffer *old_pbuf;
1398         int nnis = the_lnet.ln_push_target_nnis;
1399         int rc;
1400
1401         if (nnis <= 0) {
1402                 rc = -EINVAL;
1403                 goto fail_return;
1404         }
1405 again:
1406         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1407         if (!pbuf) {
1408                 rc = -ENOMEM;
1409                 goto fail_return;
1410         }
1411
1412         rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1413                           LNET_PROTO_PING_MATCHBITS, 0,
1414                           LNET_UNLINK, LNET_INS_AFTER,
1415                           &meh);
1416         if (rc) {
1417                 CERROR("Can't create push target ME: %d\n", rc);
1418                 goto fail_decref_pbuf;
1419         }
1420
1421         /* initialize md content */
1422         md.start     = &pbuf->pb_info;
1423         md.length    = LNET_PING_INFO_SIZE(nnis);
1424         md.threshold = LNET_MD_THRESH_INF;
1425         md.max_size  = 0;
1426         md.options   = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
1427                        LNET_MD_MANAGE_REMOTE;
1428         md.user_ptr  = pbuf;
1429         md.eq_handle = the_lnet.ln_push_target_eq;
1430
1431         rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
1432         if (rc) {
1433                 CERROR("Can't attach push MD: %d\n", rc);
1434                 goto fail_unlink_meh;
1435         }
1436         lnet_ping_buffer_addref(pbuf);
1437
1438         lnet_net_lock(LNET_LOCK_EX);
1439         old_pbuf = the_lnet.ln_push_target;
1440         old_mdh = the_lnet.ln_push_target_md;
1441         the_lnet.ln_push_target = pbuf;
1442         the_lnet.ln_push_target_md = mdh;
1443         lnet_net_unlock(LNET_LOCK_EX);
1444
1445         if (old_pbuf) {
1446                 LNetMDUnlink(old_mdh);
1447                 lnet_ping_buffer_decref(old_pbuf);
1448         }
1449
1450         if (nnis < the_lnet.ln_push_target_nnis)
1451                 goto again;
1452
1453         CDEBUG(D_NET, "nnis %d success\n", nnis);
1454
1455         return 0;
1456
1457 fail_unlink_meh:
1458         LNetMEUnlink(meh);
1459 fail_decref_pbuf:
1460         lnet_ping_buffer_decref(pbuf);
1461 fail_return:
1462         CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
1463         return rc;
1464 }
1465
1466 static void lnet_push_target_event_handler(struct lnet_event *ev)
1467 {
1468         struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1469
1470         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1471                 lnet_swap_pinginfo(pbuf);
1472
1473         lnet_peer_push_event(ev);
1474         if (ev->unlinked)
1475                 lnet_ping_buffer_decref(pbuf);
1476 }
1477
1478 /* Initialize the push target. */
1479 static int lnet_push_target_init(void)
1480 {
1481         int rc;
1482
1483         if (the_lnet.ln_push_target)
1484                 return -EALREADY;
1485
1486         rc = LNetEQAlloc(0, lnet_push_target_event_handler,
1487                          &the_lnet.ln_push_target_eq);
1488         if (rc) {
1489                 CERROR("Can't allocated push target EQ: %d\n", rc);
1490                 return rc;
1491         }
1492
1493         /* Start at the required minimum, we'll enlarge if required. */
1494         the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
1495
1496         rc = lnet_push_target_resize();
1497
1498         if (rc) {
1499                 LNetEQFree(the_lnet.ln_push_target_eq);
1500                 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1501         }
1502
1503         return rc;
1504 }
1505
1506 /* Clean up the push target. */
1507 static void lnet_push_target_fini(void)
1508 {
1509         if (!the_lnet.ln_push_target)
1510                 return;
1511
1512         /* Unlink and invalidate to prevent new references. */
1513         LNetMDUnlink(the_lnet.ln_push_target_md);
1514         LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
1515
1516         /* Wait for the unlink to complete. */
1517         while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
1518                 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1519                 set_current_state(TASK_UNINTERRUPTIBLE);
1520                 schedule_timeout(cfs_time_seconds(1));
1521         }
1522
1523         lnet_ping_buffer_decref(the_lnet.ln_push_target);
1524         the_lnet.ln_push_target = NULL;
1525         the_lnet.ln_push_target_nnis = 0;
1526
1527         LNetEQFree(the_lnet.ln_push_target_eq);
1528         LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1529 }
1530
1531 static int
1532 lnet_ni_tq_credits(struct lnet_ni *ni)
1533 {
1534         int     credits;
1535
1536         LASSERT(ni->ni_ncpts >= 1);
1537
1538         if (ni->ni_ncpts == 1)
1539                 return ni->ni_net->net_tunables.lct_max_tx_credits;
1540
1541         credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1542         credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1543         credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1544
1545         return credits;
1546 }
1547
1548 static void
1549 lnet_ni_unlink_locked(struct lnet_ni *ni)
1550 {
1551         if (!list_empty(&ni->ni_cptlist)) {
1552                 list_del_init(&ni->ni_cptlist);
1553                 lnet_ni_decref_locked(ni, 0);
1554         }
1555
1556         /* move it to zombie list and nobody can find it anymore */
1557         LASSERT(!list_empty(&ni->ni_netlist));
1558         list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1559         lnet_ni_decref_locked(ni, 0);
1560 }
1561
1562 static void
1563 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1564 {
1565         int             i;
1566         int             islo;
1567         struct lnet_ni  *ni;
1568         struct list_head *zombie_list = &net->net_ni_zombie;
1569
1570         /*
1571          * Now wait for the NIs I just nuked to show up on the zombie
1572          * list and shut them down in guaranteed thread context
1573          */
1574         i = 2;
1575         while (!list_empty(zombie_list)) {
1576                 int     *ref;
1577                 int     j;
1578
1579                 ni = list_entry(zombie_list->next,
1580                                 struct lnet_ni, ni_netlist);
1581                 list_del_init(&ni->ni_netlist);
1582                 /* the ni should be in deleting state. If it's not it's
1583                  * a bug */
1584                 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1585                 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1586                         if (*ref == 0)
1587                                 continue;
1588                         /* still busy, add it back to zombie list */
1589                         list_add(&ni->ni_netlist, zombie_list);
1590                         break;
1591                 }
1592
1593                 if (!list_empty(&ni->ni_netlist)) {
1594                         lnet_net_unlock(LNET_LOCK_EX);
1595                         ++i;
1596                         if ((i & (-i)) == i) {
1597                                 CDEBUG(D_WARNING,
1598                                        "Waiting for zombie LNI %s\n",
1599                                        libcfs_nid2str(ni->ni_nid));
1600                         }
1601                         set_current_state(TASK_UNINTERRUPTIBLE);
1602                         schedule_timeout(cfs_time_seconds(1));
1603                         lnet_net_lock(LNET_LOCK_EX);
1604                         continue;
1605                 }
1606
1607                 lnet_net_unlock(LNET_LOCK_EX);
1608
1609                 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1610
1611                 LASSERT(!in_interrupt());
1612                 (net->net_lnd->lnd_shutdown)(ni);
1613
1614                 if (!islo)
1615                         CDEBUG(D_LNI, "Removed LNI %s\n",
1616                               libcfs_nid2str(ni->ni_nid));
1617
1618                 lnet_ni_free(ni);
1619                 i = 2;
1620                 lnet_net_lock(LNET_LOCK_EX);
1621         }
1622 }
1623
1624 /* shutdown down the NI and release refcount */
1625 static void
1626 lnet_shutdown_lndni(struct lnet_ni *ni)
1627 {
1628         int i;
1629         struct lnet_net *net = ni->ni_net;
1630
1631         lnet_net_lock(LNET_LOCK_EX);
1632         ni->ni_state = LNET_NI_STATE_DELETING;
1633         lnet_ni_unlink_locked(ni);
1634         lnet_incr_dlc_seq();
1635         lnet_net_unlock(LNET_LOCK_EX);
1636
1637         /* clear messages for this NI on the lazy portal */
1638         for (i = 0; i < the_lnet.ln_nportals; i++)
1639                 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1640
1641         lnet_net_lock(LNET_LOCK_EX);
1642         lnet_clear_zombies_nis_locked(net);
1643         lnet_net_unlock(LNET_LOCK_EX);
1644 }
1645
1646 static void
1647 lnet_shutdown_lndnet(struct lnet_net *net)
1648 {
1649         struct lnet_ni *ni;
1650
1651         lnet_net_lock(LNET_LOCK_EX);
1652
1653         net->net_state = LNET_NET_STATE_DELETING;
1654
1655         list_del_init(&net->net_list);
1656
1657         while (!list_empty(&net->net_ni_list)) {
1658                 ni = list_entry(net->net_ni_list.next,
1659                                 struct lnet_ni, ni_netlist);
1660                 lnet_net_unlock(LNET_LOCK_EX);
1661                 lnet_shutdown_lndni(ni);
1662                 lnet_net_lock(LNET_LOCK_EX);
1663         }
1664
1665         lnet_net_unlock(LNET_LOCK_EX);
1666
1667         /* Do peer table cleanup for this net */
1668         lnet_peer_tables_cleanup(net);
1669
1670         lnet_net_lock(LNET_LOCK_EX);
1671         /*
1672          * decrement ref count on lnd only when the entire network goes
1673          * away
1674          */
1675         net->net_lnd->lnd_refcount--;
1676
1677         lnet_net_unlock(LNET_LOCK_EX);
1678
1679         lnet_net_free(net);
1680 }
1681
1682 static void
1683 lnet_shutdown_lndnets(void)
1684 {
1685         struct lnet_net *net;
1686
1687         /* NB called holding the global mutex */
1688
1689         /* All quiet on the API front */
1690         LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
1691         LASSERT(the_lnet.ln_refcount == 0);
1692
1693         lnet_net_lock(LNET_LOCK_EX);
1694         the_lnet.ln_state = LNET_STATE_STOPPING;
1695
1696         while (!list_empty(&the_lnet.ln_nets)) {
1697                 /*
1698                  * move the nets to the zombie list to avoid them being
1699                  * picked up for new work. LONET is also included in the
1700                  * Nets that will be moved to the zombie list
1701                  */
1702                 net = list_entry(the_lnet.ln_nets.next,
1703                                  struct lnet_net, net_list);
1704                 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1705         }
1706
1707         /* Drop the cached loopback Net. */
1708         if (the_lnet.ln_loni != NULL) {
1709                 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1710                 the_lnet.ln_loni = NULL;
1711         }
1712         lnet_net_unlock(LNET_LOCK_EX);
1713
1714         /* iterate through the net zombie list and delete each net */
1715         while (!list_empty(&the_lnet.ln_net_zombie)) {
1716                 net = list_entry(the_lnet.ln_net_zombie.next,
1717                                  struct lnet_net, net_list);
1718                 lnet_shutdown_lndnet(net);
1719         }
1720
1721         lnet_net_lock(LNET_LOCK_EX);
1722         the_lnet.ln_state = LNET_STATE_SHUTDOWN;
1723         lnet_net_unlock(LNET_LOCK_EX);
1724 }
1725
1726 static int
1727 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1728 {
1729         int                     rc = -EINVAL;
1730         struct lnet_tx_queue    *tq;
1731         int                     i;
1732         struct lnet_net         *net = ni->ni_net;
1733
1734         mutex_lock(&the_lnet.ln_lnd_mutex);
1735
1736         if (tun) {
1737                 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1738                 ni->ni_lnd_tunables_set = true;
1739         }
1740
1741         rc = (net->net_lnd->lnd_startup)(ni);
1742
1743         mutex_unlock(&the_lnet.ln_lnd_mutex);
1744
1745         if (rc != 0) {
1746                 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1747                                    rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1748                 lnet_net_lock(LNET_LOCK_EX);
1749                 net->net_lnd->lnd_refcount--;
1750                 lnet_net_unlock(LNET_LOCK_EX);
1751                 goto failed0;
1752         }
1753
1754         ni->ni_state = LNET_NI_STATE_ACTIVE;
1755
1756         /* We keep a reference on the loopback net through the loopback NI */
1757         if (net->net_lnd->lnd_type == LOLND) {
1758                 lnet_ni_addref(ni);
1759                 LASSERT(the_lnet.ln_loni == NULL);
1760                 the_lnet.ln_loni = ni;
1761                 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1762                 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1763                 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1764                 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1765                 return 0;
1766         }
1767
1768         if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1769             ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1770                 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1771                                    libcfs_lnd2str(net->net_lnd->lnd_type),
1772                                    ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1773                                         "" : "per-peer ");
1774                 /* shutdown the NI since if we get here then it must've already
1775                  * been started
1776                  */
1777                 lnet_shutdown_lndni(ni);
1778                 return -EINVAL;
1779         }
1780
1781         cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1782                 tq->tq_credits_min =
1783                 tq->tq_credits_max =
1784                 tq->tq_credits = lnet_ni_tq_credits(ni);
1785         }
1786
1787         atomic_set(&ni->ni_tx_credits,
1788                    lnet_ni_tq_credits(ni) * ni->ni_ncpts);
1789
1790         CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1791                 libcfs_nid2str(ni->ni_nid),
1792                 ni->ni_net->net_tunables.lct_peer_tx_credits,
1793                 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1794                 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1795                 ni->ni_net->net_tunables.lct_peer_timeout);
1796
1797         return 0;
1798 failed0:
1799         lnet_ni_free(ni);
1800         return rc;
1801 }
1802
1803 static int
1804 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1805 {
1806         struct lnet_ni *ni;
1807         struct lnet_net *net_l = NULL;
1808         struct list_head        local_ni_list;
1809         int                     rc;
1810         int                     ni_count = 0;
1811         __u32                   lnd_type;
1812         struct lnet_lnd *lnd;
1813         int                     peer_timeout =
1814                 net->net_tunables.lct_peer_timeout;
1815         int                     maxtxcredits =
1816                 net->net_tunables.lct_max_tx_credits;
1817         int                     peerrtrcredits =
1818                 net->net_tunables.lct_peer_rtr_credits;
1819
1820         INIT_LIST_HEAD(&local_ni_list);
1821
1822         /*
1823          * make sure that this net is unique. If it isn't then
1824          * we are adding interfaces to an already existing network, and
1825          * 'net' is just a convenient way to pass in the list.
1826          * if it is unique we need to find the LND and load it if
1827          * necessary.
1828          */
1829         if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1830                 lnd_type = LNET_NETTYP(net->net_id);
1831
1832                 LASSERT(libcfs_isknown_lnd(lnd_type));
1833
1834                 mutex_lock(&the_lnet.ln_lnd_mutex);
1835                 lnd = lnet_find_lnd_by_type(lnd_type);
1836
1837                 if (lnd == NULL) {
1838                         mutex_unlock(&the_lnet.ln_lnd_mutex);
1839                         rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1840                         mutex_lock(&the_lnet.ln_lnd_mutex);
1841
1842                         lnd = lnet_find_lnd_by_type(lnd_type);
1843                         if (lnd == NULL) {
1844                                 mutex_unlock(&the_lnet.ln_lnd_mutex);
1845                                 CERROR("Can't load LND %s, module %s, rc=%d\n",
1846                                 libcfs_lnd2str(lnd_type),
1847                                 libcfs_lnd2modname(lnd_type), rc);
1848 #ifndef HAVE_MODULE_LOADING_SUPPORT
1849                                 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1850                                                 "compiled with kernel module "
1851                                                 "loading support.");
1852 #endif
1853                                 rc = -EINVAL;
1854                                 goto failed0;
1855                         }
1856                 }
1857
1858                 lnet_net_lock(LNET_LOCK_EX);
1859                 lnd->lnd_refcount++;
1860                 lnet_net_unlock(LNET_LOCK_EX);
1861
1862                 net->net_lnd = lnd;
1863
1864                 mutex_unlock(&the_lnet.ln_lnd_mutex);
1865
1866                 net_l = net;
1867         }
1868
1869         /*
1870          * net_l: if the network being added is unique then net_l
1871          *        will point to that network
1872          *        if the network being added is not unique then
1873          *        net_l points to the existing network.
1874          *
1875          * When we enter the loop below, we'll pick NIs off he
1876          * network beign added and start them up, then add them to
1877          * a local ni list. Once we've successfully started all
1878          * the NIs then we join the local NI list (of started up
1879          * networks) with the net_l->net_ni_list, which should
1880          * point to the correct network to add the new ni list to
1881          *
1882          * If any of the new NIs fail to start up, then we want to
1883          * iterate through the local ni list, which should include
1884          * any NIs which were successfully started up, and shut
1885          * them down.
1886          *
1887          * After than we want to delete the network being added,
1888          * to avoid a memory leak.
1889          */
1890
1891         /*
1892          * When a network uses TCP bonding then all its interfaces
1893          * must be specified when the network is first defined: the
1894          * TCP bonding code doesn't allow for interfaces to be added
1895          * or removed.
1896          */
1897         if (net_l != net && net_l != NULL && use_tcp_bonding &&
1898             LNET_NETTYP(net_l->net_id) == SOCKLND) {
1899                 rc = -EINVAL;
1900                 goto failed0;
1901         }
1902
1903         while (!list_empty(&net->net_ni_added)) {
1904                 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
1905                                 ni_netlist);
1906                 list_del_init(&ni->ni_netlist);
1907
1908                 /* make sure that the the NI we're about to start
1909                  * up is actually unique. if it's not fail. */
1910                 if (!lnet_ni_unique_net(&net_l->net_ni_list,
1911                                         ni->ni_interfaces[0])) {
1912                         rc = -EINVAL;
1913                         goto failed1;
1914                 }
1915
1916                 /* adjust the pointer the parent network, just in case it
1917                  * the net is a duplicate */
1918                 ni->ni_net = net_l;
1919
1920                 rc = lnet_startup_lndni(ni, tun);
1921
1922                 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
1923                         ni->ni_net->net_lnd->lnd_query != NULL);
1924
1925                 if (rc < 0)
1926                         goto failed1;
1927
1928                 lnet_ni_addref(ni);
1929                 list_add_tail(&ni->ni_netlist, &local_ni_list);
1930
1931                 ni_count++;
1932         }
1933
1934         lnet_net_lock(LNET_LOCK_EX);
1935         list_splice_tail(&local_ni_list, &net_l->net_ni_list);
1936         lnet_incr_dlc_seq();
1937         lnet_net_unlock(LNET_LOCK_EX);
1938
1939         /* if the network is not unique then we don't want to keep
1940          * it around after we're done. Free it. Otherwise add that
1941          * net to the global the_lnet.ln_nets */
1942         if (net_l != net && net_l != NULL) {
1943                 /*
1944                  * TODO - note. currently the tunables can not be updated
1945                  * once added
1946                  */
1947                 lnet_net_free(net);
1948         } else {
1949                 net->net_state = LNET_NET_STATE_ACTIVE;
1950                 /*
1951                  * restore tunables after it has been overwitten by the
1952                  * lnd
1953                  */
1954                 if (peer_timeout != -1)
1955                         net->net_tunables.lct_peer_timeout = peer_timeout;
1956                 if (maxtxcredits != -1)
1957                         net->net_tunables.lct_max_tx_credits = maxtxcredits;
1958                 if (peerrtrcredits != -1)
1959                         net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
1960
1961                 lnet_net_lock(LNET_LOCK_EX);
1962                 list_add_tail(&net->net_list, &the_lnet.ln_nets);
1963                 lnet_net_unlock(LNET_LOCK_EX);
1964         }
1965
1966         return ni_count;
1967
1968 failed1:
1969         /*
1970          * shutdown the new NIs that are being started up
1971          * free the NET being started
1972          */
1973         while (!list_empty(&local_ni_list)) {
1974                 ni = list_entry(local_ni_list.next, struct lnet_ni,
1975                                 ni_netlist);
1976
1977                 lnet_shutdown_lndni(ni);
1978         }
1979
1980 failed0:
1981         lnet_net_free(net);
1982
1983         return rc;
1984 }
1985
1986 static int
1987 lnet_startup_lndnets(struct list_head *netlist)
1988 {
1989         struct lnet_net         *net;
1990         int                     rc;
1991         int                     ni_count = 0;
1992
1993         /*
1994          * Change to running state before bringing up the LNDs. This
1995          * allows lnet_shutdown_lndnets() to assert that we've passed
1996          * through here.
1997          */
1998         lnet_net_lock(LNET_LOCK_EX);
1999         the_lnet.ln_state = LNET_STATE_RUNNING;
2000         lnet_net_unlock(LNET_LOCK_EX);
2001
2002         while (!list_empty(netlist)) {
2003                 net = list_entry(netlist->next, struct lnet_net, net_list);
2004                 list_del_init(&net->net_list);
2005
2006                 rc = lnet_startup_lndnet(net, NULL);
2007
2008                 if (rc < 0)
2009                         goto failed;
2010
2011                 ni_count += rc;
2012         }
2013
2014         return ni_count;
2015 failed:
2016         lnet_shutdown_lndnets();
2017
2018         return rc;
2019 }
2020
2021 /**
2022  * Initialize LNet library.
2023  *
2024  * Automatically called at module loading time. Caller has to call
2025  * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2026  * latter returned 0. It must be called exactly once.
2027  *
2028  * \retval 0 on success
2029  * \retval -ve on failures.
2030  */
2031 int lnet_lib_init(void)
2032 {
2033         int rc;
2034
2035         lnet_assert_wire_constants();
2036
2037         /* refer to global cfs_cpt_table for now */
2038         the_lnet.ln_cpt_table   = cfs_cpt_table;
2039         the_lnet.ln_cpt_number  = cfs_cpt_number(cfs_cpt_table);
2040
2041         LASSERT(the_lnet.ln_cpt_number > 0);
2042         if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2043                 /* we are under risk of consuming all lh_cookie */
2044                 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2045                        "please change setting of CPT-table and retry\n",
2046                        the_lnet.ln_cpt_number, LNET_CPT_MAX);
2047                 return -E2BIG;
2048         }
2049
2050         while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2051                 the_lnet.ln_cpt_bits++;
2052
2053         rc = lnet_create_locks();
2054         if (rc != 0) {
2055                 CERROR("Can't create LNet global locks: %d\n", rc);
2056                 return rc;
2057         }
2058
2059         the_lnet.ln_refcount = 0;
2060         LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
2061         INIT_LIST_HEAD(&the_lnet.ln_lnds);
2062         INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2063         INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
2064         INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
2065
2066         /* The hash table size is the number of bits it takes to express the set
2067          * ln_num_routes, minus 1 (better to under estimate than over so we
2068          * don't waste memory). */
2069         if (rnet_htable_size <= 0)
2070                 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2071         else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2072                 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2073         the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2074                                            order_base_2(rnet_htable_size) - 1);
2075
2076         /* All LNDs apart from the LOLND are in separate modules.  They
2077          * register themselves when their module loads, and unregister
2078          * themselves when their module is unloaded. */
2079         lnet_register_lnd(&the_lolnd);
2080         return 0;
2081 }
2082
2083 /**
2084  * Finalize LNet library.
2085  *
2086  * \pre lnet_lib_init() called with success.
2087  * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2088  */
2089 void lnet_lib_exit(void)
2090 {
2091         LASSERT(the_lnet.ln_refcount == 0);
2092
2093         while (!list_empty(&the_lnet.ln_lnds))
2094                 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
2095                                                struct lnet_lnd, lnd_list));
2096         lnet_destroy_locks();
2097 }
2098
2099 /**
2100  * Set LNet PID and start LNet interfaces, routing, and forwarding.
2101  *
2102  * Users must call this function at least once before any other functions.
2103  * For each successful call there must be a corresponding call to
2104  * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2105  * ignored.
2106  *
2107  * The PID used by LNet may be different from the one requested.
2108  * See LNetGetId().
2109  *
2110  * \param requested_pid PID requested by the caller.
2111  *
2112  * \return >= 0 on success, and < 0 error code on failures.
2113  */
2114 int
2115 LNetNIInit(lnet_pid_t requested_pid)
2116 {
2117         int                     im_a_router = 0;
2118         int                     rc;
2119         int                     ni_count;
2120         struct lnet_ping_buffer *pbuf;
2121         struct lnet_handle_md   ping_mdh;
2122         struct list_head        net_head;
2123         struct lnet_net         *net;
2124
2125         INIT_LIST_HEAD(&net_head);
2126
2127         mutex_lock(&the_lnet.ln_api_mutex);
2128
2129         CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2130
2131         if (the_lnet.ln_refcount > 0) {
2132                 rc = the_lnet.ln_refcount++;
2133                 mutex_unlock(&the_lnet.ln_api_mutex);
2134                 return rc;
2135         }
2136
2137         rc = lnet_prepare(requested_pid);
2138         if (rc != 0) {
2139                 mutex_unlock(&the_lnet.ln_api_mutex);
2140                 return rc;
2141         }
2142
2143         /* create a network for Loopback network */
2144         net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2145         if (net == NULL) {
2146                 rc = -ENOMEM;
2147                 goto err_empty_list;
2148         }
2149
2150         /* Add in the loopback NI */
2151         if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2152                 rc = -ENOMEM;
2153                 goto err_empty_list;
2154         }
2155
2156         /* If LNet is being initialized via DLC it is possible
2157          * that the user requests not to load module parameters (ones which
2158          * are supported by DLC) on initialization.  Therefore, make sure not
2159          * to load networks, routes and forwarding from module parameters
2160          * in this case.  On cleanup in case of failure only clean up
2161          * routes if it has been loaded */
2162         if (!the_lnet.ln_nis_from_mod_params) {
2163                 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2164                                          use_tcp_bonding);
2165                 if (rc < 0)
2166                         goto err_empty_list;
2167         }
2168
2169         ni_count = lnet_startup_lndnets(&net_head);
2170         if (ni_count < 0) {
2171                 rc = ni_count;
2172                 goto err_empty_list;
2173         }
2174
2175         if (!the_lnet.ln_nis_from_mod_params) {
2176                 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2177                 if (rc != 0)
2178                         goto err_shutdown_lndnis;
2179
2180                 rc = lnet_check_routes();
2181                 if (rc != 0)
2182                         goto err_destroy_routes;
2183
2184                 rc = lnet_rtrpools_alloc(im_a_router);
2185                 if (rc != 0)
2186                         goto err_destroy_routes;
2187         }
2188
2189         rc = lnet_acceptor_start();
2190         if (rc != 0)
2191                 goto err_destroy_routes;
2192
2193         the_lnet.ln_refcount = 1;
2194         /* Now I may use my own API functions... */
2195
2196         rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2197         if (rc != 0)
2198                 goto err_acceptor_stop;
2199
2200         lnet_ping_target_update(pbuf, ping_mdh);
2201
2202         rc = lnet_router_checker_start();
2203         if (rc != 0)
2204                 goto err_stop_ping;
2205
2206         rc = lnet_push_target_init();
2207         if (rc != 0)
2208                 goto err_stop_router_checker;
2209
2210         rc = lnet_peer_discovery_start();
2211         if (rc != 0)
2212                 goto err_destroy_push_target;
2213
2214         lnet_fault_init();
2215         lnet_proc_init();
2216
2217         mutex_unlock(&the_lnet.ln_api_mutex);
2218
2219         return 0;
2220
2221 err_destroy_push_target:
2222         lnet_push_target_fini();
2223 err_stop_router_checker:
2224         lnet_router_checker_stop();
2225 err_stop_ping:
2226         lnet_ping_target_fini();
2227 err_acceptor_stop:
2228         the_lnet.ln_refcount = 0;
2229         lnet_acceptor_stop();
2230 err_destroy_routes:
2231         if (!the_lnet.ln_nis_from_mod_params)
2232                 lnet_destroy_routes();
2233 err_shutdown_lndnis:
2234         lnet_shutdown_lndnets();
2235 err_empty_list:
2236         lnet_unprepare();
2237         LASSERT(rc < 0);
2238         mutex_unlock(&the_lnet.ln_api_mutex);
2239         while (!list_empty(&net_head)) {
2240                 struct lnet_net *net;
2241
2242                 net = list_entry(net_head.next, struct lnet_net, net_list);
2243                 list_del_init(&net->net_list);
2244                 lnet_net_free(net);
2245         }
2246         return rc;
2247 }
2248 EXPORT_SYMBOL(LNetNIInit);
2249
2250 /**
2251  * Stop LNet interfaces, routing, and forwarding.
2252  *
2253  * Users must call this function once for each successful call to LNetNIInit().
2254  * Once the LNetNIFini() operation has been started, the results of pending
2255  * API operations are undefined.
2256  *
2257  * \return always 0 for current implementation.
2258  */
2259 int
2260 LNetNIFini()
2261 {
2262         mutex_lock(&the_lnet.ln_api_mutex);
2263
2264         LASSERT(the_lnet.ln_refcount > 0);
2265
2266         if (the_lnet.ln_refcount != 1) {
2267                 the_lnet.ln_refcount--;
2268         } else {
2269                 LASSERT(!the_lnet.ln_niinit_self);
2270
2271                 lnet_fault_fini();
2272
2273                 lnet_proc_fini();
2274                 lnet_peer_discovery_stop();
2275                 lnet_push_target_fini();
2276                 lnet_router_checker_stop();
2277                 lnet_ping_target_fini();
2278
2279                 /* Teardown fns that use my own API functions BEFORE here */
2280                 the_lnet.ln_refcount = 0;
2281
2282                 lnet_acceptor_stop();
2283                 lnet_destroy_routes();
2284                 lnet_shutdown_lndnets();
2285                 lnet_unprepare();
2286         }
2287
2288         mutex_unlock(&the_lnet.ln_api_mutex);
2289         return 0;
2290 }
2291 EXPORT_SYMBOL(LNetNIFini);
2292
2293 /**
2294  * Grabs the ni data from the ni structure and fills the out
2295  * parameters
2296  *
2297  * \param[in] ni network        interface structure
2298  * \param[out] cfg_ni           NI config information
2299  * \param[out] tun              network and LND tunables
2300  */
2301 static void
2302 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2303                    struct lnet_ioctl_config_lnd_tunables *tun,
2304                    struct lnet_ioctl_element_stats *stats,
2305                    __u32 tun_size)
2306 {
2307         size_t min_size = 0;
2308         int i;
2309
2310         if (!ni || !cfg_ni || !tun)
2311                 return;
2312
2313         if (ni->ni_interfaces[0] != NULL) {
2314                 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2315                         if (ni->ni_interfaces[i] != NULL) {
2316                                 strncpy(cfg_ni->lic_ni_intf[i],
2317                                         ni->ni_interfaces[i],
2318                                         sizeof(cfg_ni->lic_ni_intf[i]));
2319                         }
2320                 }
2321         }
2322
2323         cfg_ni->lic_nid = ni->ni_nid;
2324         if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2325                 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2326         else
2327                 cfg_ni->lic_status = ni->ni_status->ns_status;
2328         cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2329         cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2330
2331         memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2332
2333         if (stats) {
2334                 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
2335                                                        LNET_STATS_TYPE_SEND);
2336                 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
2337                                                        LNET_STATS_TYPE_RECV);
2338                 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
2339                                                        LNET_STATS_TYPE_DROP);
2340         }
2341
2342         /*
2343          * tun->lt_tun will always be present, but in order to be
2344          * backwards compatible, we need to deal with the cases when
2345          * tun->lt_tun is smaller than what the kernel has, because it
2346          * comes from an older version of a userspace program, then we'll
2347          * need to copy as much information as we have available space.
2348          */
2349         min_size = tun_size - sizeof(tun->lt_cmn);
2350         memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2351
2352         /* copy over the cpts */
2353         if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2354             ni->ni_cpts == NULL)  {
2355                 for (i = 0; i < ni->ni_ncpts; i++)
2356                         cfg_ni->lic_cpts[i] = i;
2357         } else {
2358                 for (i = 0;
2359                      ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2360                      i < LNET_MAX_SHOW_NUM_CPT;
2361                      i++)
2362                         cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2363         }
2364         cfg_ni->lic_ncpts = ni->ni_ncpts;
2365 }
2366
2367 /**
2368  * NOTE: This is a legacy function left in the code to be backwards
2369  * compatible with older userspace programs. It should eventually be
2370  * removed.
2371  *
2372  * Grabs the ni data from the ni structure and fills the out
2373  * parameters
2374  *
2375  * \param[in] ni network        interface structure
2376  * \param[out] config           config information
2377  */
2378 static void
2379 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2380                          struct lnet_ioctl_config_data *config)
2381 {
2382         struct lnet_ioctl_net_config *net_config;
2383         struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2384         size_t min_size, tunable_size = 0;
2385         int i;
2386
2387         if (!ni || !config)
2388                 return;
2389
2390         net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2391         if (!net_config)
2392                 return;
2393
2394         BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2395                      ARRAY_SIZE(net_config->ni_interfaces));
2396
2397         for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2398                 if (!ni->ni_interfaces[i])
2399                         break;
2400
2401                 strncpy(net_config->ni_interfaces[i],
2402                         ni->ni_interfaces[i],
2403                         sizeof(net_config->ni_interfaces[i]));
2404         }
2405
2406         config->cfg_nid = ni->ni_nid;
2407         config->cfg_config_u.cfg_net.net_peer_timeout =
2408                 ni->ni_net->net_tunables.lct_peer_timeout;
2409         config->cfg_config_u.cfg_net.net_max_tx_credits =
2410                 ni->ni_net->net_tunables.lct_max_tx_credits;
2411         config->cfg_config_u.cfg_net.net_peer_tx_credits =
2412                 ni->ni_net->net_tunables.lct_peer_tx_credits;
2413         config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2414                 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2415
2416         if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2417                 net_config->ni_status = LNET_NI_STATUS_UP;
2418         else
2419                 net_config->ni_status = ni->ni_status->ns_status;
2420
2421         if (ni->ni_cpts) {
2422                 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2423
2424                 for (i = 0; i < num_cpts; i++)
2425                         net_config->ni_cpts[i] = ni->ni_cpts[i];
2426
2427                 config->cfg_ncpts = num_cpts;
2428         }
2429
2430         /*
2431          * See if user land tools sent in a newer and larger version
2432          * of struct lnet_tunables than what the kernel uses.
2433          */
2434         min_size = sizeof(*config) + sizeof(*net_config);
2435
2436         if (config->cfg_hdr.ioc_len > min_size)
2437                 tunable_size = config->cfg_hdr.ioc_len - min_size;
2438
2439         /* Don't copy too much data to user space */
2440         min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2441         lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2442
2443         if (lnd_cfg && min_size) {
2444                 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2445                 config->cfg_config_u.cfg_net.net_interface_count = 1;
2446
2447                 /* Tell user land that kernel side has less data */
2448                 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2449                         min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2450                         config->cfg_hdr.ioc_len -= min_size;
2451                 }
2452         }
2453 }
2454
2455 struct lnet_ni *
2456 lnet_get_ni_idx_locked(int idx)
2457 {
2458         struct lnet_ni          *ni;
2459         struct lnet_net         *net;
2460
2461         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2462                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2463                         if (idx-- == 0)
2464                                 return ni;
2465                 }
2466         }
2467
2468         return NULL;
2469 }
2470
2471 struct lnet_ni *
2472 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2473 {
2474         struct lnet_ni          *ni;
2475         struct lnet_net         *net = mynet;
2476
2477         if (prev == NULL) {
2478                 if (net == NULL)
2479                         net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2480                                         net_list);
2481                 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2482                                 ni_netlist);
2483
2484                 return ni;
2485         }
2486
2487         if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2488                 /* if you reached the end of the ni list and the net is
2489                  * specified, then there are no more nis in that net */
2490                 if (net != NULL)
2491                         return NULL;
2492
2493                 /* we reached the end of this net ni list. move to the
2494                  * next net */
2495                 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2496                         /* no more nets and no more NIs. */
2497                         return NULL;
2498
2499                 /* get the next net */
2500                 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2501                                  net_list);
2502                 /* get the ni on it */
2503                 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2504                                 ni_netlist);
2505
2506                 return ni;
2507         }
2508
2509         /* there are more nis left */
2510         ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2511
2512         return ni;
2513 }
2514
2515 int
2516 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2517 {
2518         struct lnet_ni *ni;
2519         int cpt;
2520         int rc = -ENOENT;
2521         int idx = config->cfg_count;
2522
2523         cpt = lnet_net_lock_current();
2524
2525         ni = lnet_get_ni_idx_locked(idx);
2526
2527         if (ni != NULL) {
2528                 rc = 0;
2529                 lnet_ni_lock(ni);
2530                 lnet_fill_ni_info_legacy(ni, config);
2531                 lnet_ni_unlock(ni);
2532         }
2533
2534         lnet_net_unlock(cpt);
2535         return rc;
2536 }
2537
2538 int
2539 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2540                    struct lnet_ioctl_config_lnd_tunables *tun,
2541                    struct lnet_ioctl_element_stats *stats,
2542                    __u32 tun_size)
2543 {
2544         struct lnet_ni          *ni;
2545         int                     cpt;
2546         int                     rc = -ENOENT;
2547
2548         if (!cfg_ni || !tun || !stats)
2549                 return -EINVAL;
2550
2551         cpt = lnet_net_lock_current();
2552
2553         ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2554
2555         if (ni) {
2556                 rc = 0;
2557                 lnet_ni_lock(ni);
2558                 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2559                 lnet_ni_unlock(ni);
2560         }
2561
2562         lnet_net_unlock(cpt);
2563         return rc;
2564 }
2565
2566 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
2567 {
2568         struct lnet_ni *ni;
2569         int cpt;
2570         int rc = -ENOENT;
2571
2572         if (!msg_stats)
2573                 return -EINVAL;
2574
2575         cpt = lnet_net_lock_current();
2576
2577         ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
2578
2579         if (ni) {
2580                 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
2581                 rc = 0;
2582         }
2583
2584         lnet_net_unlock(cpt);
2585
2586         return rc;
2587 }
2588
2589 static int lnet_add_net_common(struct lnet_net *net,
2590                                struct lnet_ioctl_config_lnd_tunables *tun)
2591 {
2592         __u32                   net_id;
2593         struct lnet_ping_buffer *pbuf;
2594         struct lnet_handle_md   ping_mdh;
2595         int                     rc;
2596         struct lnet_remotenet *rnet;
2597         int                     net_ni_count;
2598         int                     num_acceptor_nets;
2599
2600         lnet_net_lock(LNET_LOCK_EX);
2601         rnet = lnet_find_rnet_locked(net->net_id);
2602         lnet_net_unlock(LNET_LOCK_EX);
2603         /*
2604          * make sure that the net added doesn't invalidate the current
2605          * configuration LNet is keeping
2606          */
2607         if (rnet) {
2608                 CERROR("Adding net %s will invalidate routing configuration\n",
2609                        libcfs_net2str(net->net_id));
2610                 lnet_net_free(net);
2611                 return -EUSERS;
2612         }
2613
2614         /*
2615          * make sure you calculate the correct number of slots in the ping
2616          * buffer. Since the ping info is a flattened list of all the NIs,
2617          * we should allocate enough slots to accomodate the number of NIs
2618          * which will be added.
2619          *
2620          * since ni hasn't been configured yet, use
2621          * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2622          */
2623         net_ni_count = lnet_get_net_ni_count_pre(net);
2624
2625         rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2626                                     net_ni_count + lnet_get_ni_count(),
2627                                     false);
2628         if (rc < 0) {
2629                 lnet_net_free(net);
2630                 return rc;
2631         }
2632
2633         if (tun)
2634                 memcpy(&net->net_tunables,
2635                        &tun->lt_cmn, sizeof(net->net_tunables));
2636         else
2637                 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2638
2639         /*
2640          * before starting this network get a count of the current TCP
2641          * networks which require the acceptor thread running. If that
2642          * count is == 0 before we start up this network, then we'd want to
2643          * start up the acceptor thread after starting up this network
2644          */
2645         num_acceptor_nets = lnet_count_acceptor_nets();
2646
2647         net_id = net->net_id;
2648
2649         rc = lnet_startup_lndnet(net,
2650                                  (tun) ? &tun->lt_tun : NULL);
2651         if (rc < 0)
2652                 goto failed;
2653
2654         lnet_net_lock(LNET_LOCK_EX);
2655         net = lnet_get_net_locked(net_id);
2656         lnet_net_unlock(LNET_LOCK_EX);
2657
2658         LASSERT(net);
2659
2660         /*
2661          * Start the acceptor thread if this is the first network
2662          * being added that requires the thread.
2663          */
2664         if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
2665                 rc = lnet_acceptor_start();
2666                 if (rc < 0) {
2667                         /* shutdown the net that we just started */
2668                         CERROR("Failed to start up acceptor thread\n");
2669                         lnet_shutdown_lndnet(net);
2670                         goto failed;
2671                 }
2672         }
2673
2674         lnet_net_lock(LNET_LOCK_EX);
2675         lnet_peer_net_added(net);
2676         lnet_net_unlock(LNET_LOCK_EX);
2677
2678         lnet_ping_target_update(pbuf, ping_mdh);
2679
2680         return 0;
2681
2682 failed:
2683         lnet_ping_md_unlink(pbuf, &ping_mdh);
2684         lnet_ping_buffer_decref(pbuf);
2685         return rc;
2686 }
2687
2688 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2689                                       struct lnet_ioctl_config_lnd_tunables *tun)
2690 {
2691         struct lnet_net *net;
2692         char *nets;
2693         int rc;
2694         struct list_head net_head;
2695
2696         INIT_LIST_HEAD(&net_head);
2697
2698         rc = lnet_parse_ip2nets(&nets, ip2nets);
2699         if (rc < 0)
2700                 return rc;
2701
2702         rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2703         if (rc < 0)
2704                 return rc;
2705
2706         mutex_lock(&the_lnet.ln_api_mutex);
2707         while (!list_empty(&net_head)) {
2708                 net = list_entry(net_head.next, struct lnet_net, net_list);
2709                 list_del_init(&net->net_list);
2710                 rc = lnet_add_net_common(net, tun);
2711                 if (rc < 0)
2712                         goto out;
2713         }
2714
2715 out:
2716         mutex_unlock(&the_lnet.ln_api_mutex);
2717
2718         while (!list_empty(&net_head)) {
2719                 net = list_entry(net_head.next, struct lnet_net, net_list);
2720                 list_del_init(&net->net_list);
2721                 lnet_net_free(net);
2722         }
2723         return rc;
2724 }
2725
2726 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
2727 {
2728         struct lnet_net *net;
2729         struct lnet_ni *ni;
2730         struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2731         int rc, i;
2732         __u32 net_id;
2733
2734         /* get the tunables if they are available */
2735         if (conf->lic_cfg_hdr.ioc_len >=
2736             sizeof(*conf) + sizeof(*tun))
2737                 tun = (struct lnet_ioctl_config_lnd_tunables *)
2738                         conf->lic_bulk;
2739
2740         /* handle legacy ip2nets from DLC */
2741         if (conf->lic_legacy_ip2nets[0] != '\0')
2742                 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
2743                                                   tun);
2744
2745         net_id = LNET_NIDNET(conf->lic_nid);
2746
2747         net = lnet_net_alloc(net_id, NULL);
2748         if (!net)
2749                 return -ENOMEM;
2750
2751         for (i = 0; i < conf->lic_ncpts; i++) {
2752                 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
2753                         return -EINVAL;
2754         }
2755
2756         ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
2757                                        conf->lic_ni_intf[0]);
2758         if (!ni)
2759                 return -ENOMEM;
2760
2761         mutex_lock(&the_lnet.ln_api_mutex);
2762
2763         rc = lnet_add_net_common(net, tun);
2764
2765         mutex_unlock(&the_lnet.ln_api_mutex);
2766
2767         return rc;
2768 }
2769
2770 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
2771 {
2772         struct lnet_net  *net;
2773         struct lnet_ni *ni;
2774         __u32 net_id = LNET_NIDNET(conf->lic_nid);
2775         struct lnet_ping_buffer *pbuf;
2776         struct lnet_handle_md  ping_mdh;
2777         int               rc;
2778         int               net_count;
2779         __u32             addr;
2780
2781         /* don't allow userspace to shutdown the LOLND */
2782         if (LNET_NETTYP(net_id) == LOLND)
2783                 return -EINVAL;
2784
2785         mutex_lock(&the_lnet.ln_api_mutex);
2786
2787         lnet_net_lock(0);
2788
2789         net = lnet_get_net_locked(net_id);
2790         if (!net) {
2791                 CERROR("net %s not found\n",
2792                        libcfs_net2str(net_id));
2793                 rc = -ENOENT;
2794                 goto unlock_net;
2795         }
2796
2797         addr = LNET_NIDADDR(conf->lic_nid);
2798         if (addr == 0) {
2799                 /* remove the entire net */
2800                 net_count = lnet_get_net_ni_count_locked(net);
2801
2802                 lnet_net_unlock(0);
2803
2804                 /* create and link a new ping info, before removing the old one */
2805                 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2806                                         lnet_get_ni_count() - net_count,
2807                                         false);
2808                 if (rc != 0)
2809                         goto unlock_api_mutex;
2810
2811                 lnet_shutdown_lndnet(net);
2812
2813                 if (lnet_count_acceptor_nets() == 0)
2814                         lnet_acceptor_stop();
2815
2816                 lnet_ping_target_update(pbuf, ping_mdh);
2817
2818                 goto unlock_api_mutex;
2819         }
2820
2821         ni = lnet_nid2ni_locked(conf->lic_nid, 0);
2822         if (!ni) {
2823                 CERROR("nid %s not found\n",
2824                        libcfs_nid2str(conf->lic_nid));
2825                 rc = -ENOENT;
2826                 goto unlock_net;
2827         }
2828
2829         net_count = lnet_get_net_ni_count_locked(net);
2830
2831         lnet_net_unlock(0);
2832
2833         /* create and link a new ping info, before removing the old one */
2834         rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2835                                   lnet_get_ni_count() - 1, false);
2836         if (rc != 0)
2837                 goto unlock_api_mutex;
2838
2839         lnet_shutdown_lndni(ni);
2840
2841         if (lnet_count_acceptor_nets() == 0)
2842                 lnet_acceptor_stop();
2843
2844         lnet_ping_target_update(pbuf, ping_mdh);
2845
2846         /* check if the net is empty and remove it if it is */
2847         if (net_count == 1)
2848                 lnet_shutdown_lndnet(net);
2849
2850         goto unlock_api_mutex;
2851
2852 unlock_net:
2853         lnet_net_unlock(0);
2854 unlock_api_mutex:
2855         mutex_unlock(&the_lnet.ln_api_mutex);
2856
2857         return rc;
2858 }
2859
2860 /*
2861  * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
2862  * They are only expected to be called for unique networks.
2863  * That can be as a result of older DLC library
2864  * calls. Multi-Rail DLC and beyond no longer uses these APIs.
2865  */
2866 int
2867 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
2868 {
2869         struct lnet_net         *net;
2870         struct list_head        net_head;
2871         int                     rc;
2872         struct lnet_ioctl_config_lnd_tunables tun;
2873         char *nets = conf->cfg_config_u.cfg_net.net_intf;
2874
2875         INIT_LIST_HEAD(&net_head);
2876
2877         /* Create a net/ni structures for the network string */
2878         rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2879         if (rc <= 0)
2880                 return rc == 0 ? -EINVAL : rc;
2881
2882         mutex_lock(&the_lnet.ln_api_mutex);
2883
2884         if (rc > 1) {
2885                 rc = -EINVAL; /* only add one network per call */
2886                 goto out_unlock_clean;
2887         }
2888
2889         net = list_entry(net_head.next, struct lnet_net, net_list);
2890         list_del_init(&net->net_list);
2891
2892         LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
2893
2894         memset(&tun, 0, sizeof(tun));
2895
2896         tun.lt_cmn.lct_peer_timeout =
2897           conf->cfg_config_u.cfg_net.net_peer_timeout;
2898         tun.lt_cmn.lct_peer_tx_credits =
2899           conf->cfg_config_u.cfg_net.net_peer_tx_credits;
2900         tun.lt_cmn.lct_peer_rtr_credits =
2901           conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
2902         tun.lt_cmn.lct_max_tx_credits =
2903           conf->cfg_config_u.cfg_net.net_max_tx_credits;
2904
2905         rc = lnet_add_net_common(net, &tun);
2906
2907 out_unlock_clean:
2908         mutex_unlock(&the_lnet.ln_api_mutex);
2909         while (!list_empty(&net_head)) {
2910                 /* net_head list is empty in success case */
2911                 net = list_entry(net_head.next, struct lnet_net, net_list);
2912                 list_del_init(&net->net_list);
2913                 lnet_net_free(net);
2914         }
2915         return rc;
2916 }
2917
2918 int
2919 lnet_dyn_del_net(__u32 net_id)
2920 {
2921         struct lnet_net  *net;
2922         struct lnet_ping_buffer *pbuf;
2923         struct lnet_handle_md ping_mdh;
2924         int               rc;
2925         int               net_ni_count;
2926
2927         /* don't allow userspace to shutdown the LOLND */
2928         if (LNET_NETTYP(net_id) == LOLND)
2929                 return -EINVAL;
2930
2931         mutex_lock(&the_lnet.ln_api_mutex);
2932
2933         lnet_net_lock(0);
2934
2935         net = lnet_get_net_locked(net_id);
2936         if (net == NULL) {
2937                 lnet_net_unlock(0);
2938                 rc = -EINVAL;
2939                 goto out;
2940         }
2941
2942         net_ni_count = lnet_get_net_ni_count_locked(net);
2943
2944         lnet_net_unlock(0);
2945
2946         /* create and link a new ping info, before removing the old one */
2947         rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2948                                     lnet_get_ni_count() - net_ni_count, false);
2949         if (rc != 0)
2950                 goto out;
2951
2952         lnet_shutdown_lndnet(net);
2953
2954         if (lnet_count_acceptor_nets() == 0)
2955                 lnet_acceptor_stop();
2956
2957         lnet_ping_target_update(pbuf, ping_mdh);
2958
2959 out:
2960         mutex_unlock(&the_lnet.ln_api_mutex);
2961
2962         return rc;
2963 }
2964
2965 void lnet_incr_dlc_seq(void)
2966 {
2967         atomic_inc(&lnet_dlc_seq_no);
2968 }
2969
2970 __u32 lnet_get_dlc_seq_locked(void)
2971 {
2972         return atomic_read(&lnet_dlc_seq_no);
2973 }
2974
2975 /**
2976  * LNet ioctl handler.
2977  *
2978  */
2979 int
2980 LNetCtl(unsigned int cmd, void *arg)
2981 {
2982         struct libcfs_ioctl_data *data = arg;
2983         struct lnet_ioctl_config_data *config;
2984         struct lnet_process_id    id = {0};
2985         struct lnet_ni           *ni;
2986         int                       rc;
2987
2988         BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
2989                      sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
2990
2991         switch (cmd) {
2992         case IOC_LIBCFS_GET_NI:
2993                 rc = LNetGetId(data->ioc_count, &id);
2994                 data->ioc_nid = id.nid;
2995                 return rc;
2996
2997         case IOC_LIBCFS_FAIL_NID:
2998                 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
2999
3000         case IOC_LIBCFS_ADD_ROUTE:
3001                 config = arg;
3002
3003                 if (config->cfg_hdr.ioc_len < sizeof(*config))
3004                         return -EINVAL;
3005
3006                 mutex_lock(&the_lnet.ln_api_mutex);
3007                 rc = lnet_add_route(config->cfg_net,
3008                                     config->cfg_config_u.cfg_route.rtr_hop,
3009                                     config->cfg_nid,
3010                                     config->cfg_config_u.cfg_route.
3011                                         rtr_priority);
3012                 if (rc == 0) {
3013                         rc = lnet_check_routes();
3014                         if (rc != 0)
3015                                 lnet_del_route(config->cfg_net,
3016                                                config->cfg_nid);
3017                 }
3018                 mutex_unlock(&the_lnet.ln_api_mutex);
3019                 return rc;
3020
3021         case IOC_LIBCFS_DEL_ROUTE:
3022                 config = arg;
3023
3024                 if (config->cfg_hdr.ioc_len < sizeof(*config))
3025                         return -EINVAL;
3026
3027                 mutex_lock(&the_lnet.ln_api_mutex);
3028                 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3029                 mutex_unlock(&the_lnet.ln_api_mutex);
3030                 return rc;
3031
3032         case IOC_LIBCFS_GET_ROUTE:
3033                 config = arg;
3034
3035                 if (config->cfg_hdr.ioc_len < sizeof(*config))
3036                         return -EINVAL;
3037
3038                 mutex_lock(&the_lnet.ln_api_mutex);
3039                 rc = lnet_get_route(config->cfg_count,
3040                                     &config->cfg_net,
3041                                     &config->cfg_config_u.cfg_route.rtr_hop,
3042                                     &config->cfg_nid,
3043                                     &config->cfg_config_u.cfg_route.rtr_flags,
3044                                     &config->cfg_config_u.cfg_route.
3045                                         rtr_priority);
3046                 mutex_unlock(&the_lnet.ln_api_mutex);
3047                 return rc;
3048
3049         case IOC_LIBCFS_GET_LOCAL_NI: {
3050                 struct lnet_ioctl_config_ni *cfg_ni;
3051                 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3052                 struct lnet_ioctl_element_stats *stats;
3053                 __u32 tun_size;
3054
3055                 cfg_ni = arg;
3056
3057                 /* get the tunables if they are available */
3058                 if (cfg_ni->lic_cfg_hdr.ioc_len <
3059                     sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
3060                         return -EINVAL;
3061
3062                 stats = (struct lnet_ioctl_element_stats *)
3063                         cfg_ni->lic_bulk;
3064                 tun = (struct lnet_ioctl_config_lnd_tunables *)
3065                                 (cfg_ni->lic_bulk + sizeof(*stats));
3066
3067                 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3068                         sizeof(*stats);
3069
3070                 mutex_lock(&the_lnet.ln_api_mutex);
3071                 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3072                 mutex_unlock(&the_lnet.ln_api_mutex);
3073                 return rc;
3074         }
3075
3076         case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
3077                 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
3078
3079                 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
3080                         return -EINVAL;
3081
3082                 mutex_lock(&the_lnet.ln_api_mutex);
3083                 rc = lnet_get_ni_stats(msg_stats);
3084                 mutex_unlock(&the_lnet.ln_api_mutex);
3085
3086                 return rc;
3087         }
3088
3089         case IOC_LIBCFS_GET_NET: {
3090                 size_t total = sizeof(*config) +
3091                                sizeof(struct lnet_ioctl_net_config);
3092                 config = arg;
3093
3094                 if (config->cfg_hdr.ioc_len < total)
3095                         return -EINVAL;
3096
3097                 mutex_lock(&the_lnet.ln_api_mutex);
3098                 rc = lnet_get_net_config(config);
3099                 mutex_unlock(&the_lnet.ln_api_mutex);
3100                 return rc;
3101         }
3102
3103         case IOC_LIBCFS_GET_LNET_STATS:
3104         {
3105                 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3106
3107                 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3108                         return -EINVAL;
3109
3110                 mutex_lock(&the_lnet.ln_api_mutex);
3111                 lnet_counters_get(&lnet_stats->st_cntrs);
3112                 mutex_unlock(&the_lnet.ln_api_mutex);
3113                 return 0;
3114         }
3115
3116         case IOC_LIBCFS_CONFIG_RTR:
3117                 config = arg;
3118
3119                 if (config->cfg_hdr.ioc_len < sizeof(*config))
3120                         return -EINVAL;
3121
3122                 mutex_lock(&the_lnet.ln_api_mutex);
3123                 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3124                         rc = lnet_rtrpools_enable();
3125                         mutex_unlock(&the_lnet.ln_api_mutex);
3126                         return rc;
3127                 }
3128                 lnet_rtrpools_disable();
3129                 mutex_unlock(&the_lnet.ln_api_mutex);
3130                 return 0;
3131
3132         case IOC_LIBCFS_ADD_BUF:
3133                 config = arg;
3134
3135                 if (config->cfg_hdr.ioc_len < sizeof(*config))
3136                         return -EINVAL;
3137
3138                 mutex_lock(&the_lnet.ln_api_mutex);
3139                 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3140                                                 buf_tiny,
3141                                           config->cfg_config_u.cfg_buffers.
3142                                                 buf_small,
3143                                           config->cfg_config_u.cfg_buffers.
3144                                                 buf_large);
3145                 mutex_unlock(&the_lnet.ln_api_mutex);
3146                 return rc;
3147
3148         case IOC_LIBCFS_SET_NUMA_RANGE: {
3149                 struct lnet_ioctl_set_value *numa;
3150                 numa = arg;
3151                 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3152                         return -EINVAL;
3153                 lnet_net_lock(LNET_LOCK_EX);
3154                 lnet_numa_range = numa->sv_value;
3155                 lnet_net_unlock(LNET_LOCK_EX);
3156                 return 0;
3157         }
3158
3159         case IOC_LIBCFS_GET_NUMA_RANGE: {
3160                 struct lnet_ioctl_set_value *numa;
3161                 numa = arg;
3162                 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3163                         return -EINVAL;
3164                 numa->sv_value = lnet_numa_range;
3165                 return 0;
3166         }
3167
3168         case IOC_LIBCFS_GET_BUF: {
3169                 struct lnet_ioctl_pool_cfg *pool_cfg;
3170                 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3171
3172                 config = arg;
3173
3174                 if (config->cfg_hdr.ioc_len < total)
3175                         return -EINVAL;
3176
3177                 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3178
3179                 mutex_lock(&the_lnet.ln_api_mutex);
3180                 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3181                 mutex_unlock(&the_lnet.ln_api_mutex);
3182                 return rc;
3183         }
3184
3185         case IOC_LIBCFS_ADD_PEER_NI: {
3186                 struct lnet_ioctl_peer_cfg *cfg = arg;
3187
3188                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3189                         return -EINVAL;
3190
3191                 mutex_lock(&the_lnet.ln_api_mutex);
3192                 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3193                                       cfg->prcfg_cfg_nid,
3194                                       cfg->prcfg_mr);
3195                 mutex_unlock(&the_lnet.ln_api_mutex);
3196                 return rc;
3197         }
3198
3199         case IOC_LIBCFS_DEL_PEER_NI: {
3200                 struct lnet_ioctl_peer_cfg *cfg = arg;
3201
3202                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3203                         return -EINVAL;
3204
3205                 mutex_lock(&the_lnet.ln_api_mutex);
3206                 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3207                                       cfg->prcfg_cfg_nid);
3208                 mutex_unlock(&the_lnet.ln_api_mutex);
3209                 return rc;
3210         }
3211
3212         case IOC_LIBCFS_GET_PEER_INFO: {
3213                 struct lnet_ioctl_peer *peer_info = arg;
3214
3215                 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3216                         return -EINVAL;
3217
3218                 mutex_lock(&the_lnet.ln_api_mutex);
3219                 rc = lnet_get_peer_ni_info(
3220                    peer_info->pr_count,
3221                    &peer_info->pr_nid,
3222                    peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3223                    &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3224                    &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3225                    &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3226                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3227                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3228                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3229                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3230                 mutex_unlock(&the_lnet.ln_api_mutex);
3231                 return rc;
3232         }
3233
3234         case IOC_LIBCFS_GET_PEER_NI: {
3235                 struct lnet_ioctl_peer_cfg *cfg = arg;
3236
3237                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3238                         return -EINVAL;
3239
3240                 mutex_lock(&the_lnet.ln_api_mutex);
3241                 rc = lnet_get_peer_info(cfg,
3242                                         (void __user *)cfg->prcfg_bulk);
3243                 mutex_unlock(&the_lnet.ln_api_mutex);
3244                 return rc;
3245         }
3246
3247         case IOC_LIBCFS_GET_PEER_LIST: {
3248                 struct lnet_ioctl_peer_cfg *cfg = arg;
3249
3250                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3251                         return -EINVAL;
3252
3253                 mutex_lock(&the_lnet.ln_api_mutex);
3254                 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
3255                                 (lnet_process_id_t __user *)cfg->prcfg_bulk);
3256                 mutex_unlock(&the_lnet.ln_api_mutex);
3257                 return rc;
3258         }
3259
3260         case IOC_LIBCFS_NOTIFY_ROUTER: {
3261                 unsigned long jiffies_passed;
3262
3263                 jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
3264                 jiffies_passed = cfs_time_seconds(jiffies_passed);
3265
3266                 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
3267                                    jiffies - jiffies_passed);
3268         }
3269
3270         case IOC_LIBCFS_LNET_DIST:
3271                 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3272                 if (rc < 0 && rc != -EHOSTUNREACH)
3273                         return rc;
3274
3275                 data->ioc_u32[0] = rc;
3276                 return 0;
3277
3278         case IOC_LIBCFS_TESTPROTOCOMPAT:
3279                 lnet_net_lock(LNET_LOCK_EX);
3280                 the_lnet.ln_testprotocompat = data->ioc_flags;
3281                 lnet_net_unlock(LNET_LOCK_EX);
3282                 return 0;
3283
3284         case IOC_LIBCFS_LNET_FAULT:
3285                 return lnet_fault_ctl(data->ioc_flags, data);
3286
3287         case IOC_LIBCFS_PING: {
3288                 signed long timeout;
3289
3290                 id.nid = data->ioc_nid;
3291                 id.pid = data->ioc_u32[0];
3292
3293                 /* If timeout is negative then set default of 3 minutes */
3294                 if (((s32)data->ioc_u32[1] <= 0) ||
3295                     data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3296                         timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3297                 else
3298                         timeout = msecs_to_jiffies(data->ioc_u32[1]);
3299
3300                 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3301                                data->ioc_plen1 / sizeof(struct lnet_process_id));
3302
3303                 if (rc < 0)
3304                         return rc;
3305
3306                 data->ioc_count = rc;
3307                 return 0;
3308         }
3309
3310         case IOC_LIBCFS_PING_PEER: {
3311                 struct lnet_ioctl_ping_data *ping = arg;
3312                 struct lnet_peer *lp;
3313                 signed long timeout;
3314
3315                 /* If timeout is negative then set default of 3 minutes */
3316                 if (((s32)ping->op_param) <= 0 ||
3317                     ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3318                         timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3319                 else
3320                         timeout = msecs_to_jiffies(ping->op_param);
3321
3322                 rc = lnet_ping(ping->ping_id, timeout,
3323                                ping->ping_buf,
3324                                ping->ping_count);
3325                 if (rc < 0)
3326                         return rc;
3327
3328                 mutex_lock(&the_lnet.ln_api_mutex);
3329                 lp = lnet_find_peer(ping->ping_id.nid);
3330                 if (lp) {
3331                         ping->ping_id.nid = lp->lp_primary_nid;
3332                         ping->mr_info = lnet_peer_is_multi_rail(lp);
3333                         lnet_peer_decref_locked(lp);
3334                 }
3335                 mutex_unlock(&the_lnet.ln_api_mutex);
3336
3337                 ping->ping_count = rc;
3338                 return 0;
3339         }
3340
3341         case IOC_LIBCFS_DISCOVER: {
3342                 struct lnet_ioctl_ping_data *discover = arg;
3343                 struct lnet_peer *lp;
3344
3345                 rc = lnet_discover(discover->ping_id, discover->op_param,
3346                                    discover->ping_buf,
3347                                    discover->ping_count);
3348                 if (rc < 0)
3349                         return rc;
3350
3351                 mutex_lock(&the_lnet.ln_api_mutex);
3352                 lp = lnet_find_peer(discover->ping_id.nid);
3353                 if (lp) {
3354                         discover->ping_id.nid = lp->lp_primary_nid;
3355                         discover->mr_info = lnet_peer_is_multi_rail(lp);
3356                         lnet_peer_decref_locked(lp);
3357                 }
3358                 mutex_unlock(&the_lnet.ln_api_mutex);
3359
3360                 discover->ping_count = rc;
3361                 return 0;
3362         }
3363
3364         default:
3365                 ni = lnet_net2ni_addref(data->ioc_net);
3366                 if (ni == NULL)
3367                         return -EINVAL;
3368
3369                 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3370                         rc = -EINVAL;
3371                 else
3372                         rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3373
3374                 lnet_ni_decref(ni);
3375                 return rc;
3376         }
3377         /* not reached */
3378 }
3379 EXPORT_SYMBOL(LNetCtl);
3380
3381 void LNetDebugPeer(struct lnet_process_id id)
3382 {
3383         lnet_debug_peer(id.nid);
3384 }
3385 EXPORT_SYMBOL(LNetDebugPeer);
3386
3387 /**
3388  * Determine if the specified peer \a nid is on the local node.
3389  *
3390  * \param nid   peer nid to check
3391  *
3392  * \retval true         If peer NID is on the local node.
3393  * \retval false        If peer NID is not on the local node.
3394  */
3395 bool LNetIsPeerLocal(lnet_nid_t nid)
3396 {
3397         struct lnet_net *net;
3398         struct lnet_ni *ni;
3399         int cpt;
3400
3401         cpt = lnet_net_lock_current();
3402         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3403                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3404                         if (ni->ni_nid == nid) {
3405                                 lnet_net_unlock(cpt);
3406                                 return true;
3407                         }
3408                 }
3409         }
3410         lnet_net_unlock(cpt);
3411
3412         return false;
3413 }
3414 EXPORT_SYMBOL(LNetIsPeerLocal);
3415
3416 /**
3417  * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
3418  * Note that all interfaces share a same PID, as requested by LNetNIInit().
3419  *
3420  * \param index Index of the interface to look up.
3421  * \param id On successful return, this location will hold the
3422  * struct lnet_process_id ID of the interface.
3423  *
3424  * \retval 0 If an interface exists at \a index.
3425  * \retval -ENOENT If no interface has been found.
3426  */
3427 int
3428 LNetGetId(unsigned int index, struct lnet_process_id *id)
3429 {
3430         struct lnet_ni   *ni;
3431         struct lnet_net  *net;
3432         int               cpt;
3433         int               rc = -ENOENT;
3434
3435         LASSERT(the_lnet.ln_refcount > 0);
3436
3437         cpt = lnet_net_lock_current();
3438
3439         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3440                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3441                         if (index-- != 0)
3442                                 continue;
3443
3444                         id->nid = ni->ni_nid;
3445                         id->pid = the_lnet.ln_pid;
3446                         rc = 0;
3447                         break;
3448                 }
3449         }
3450
3451         lnet_net_unlock(cpt);
3452         return rc;
3453 }
3454 EXPORT_SYMBOL(LNetGetId);
3455
3456 static int lnet_ping(struct lnet_process_id id, signed long timeout,
3457                      struct lnet_process_id __user *ids, int n_ids)
3458 {
3459         struct lnet_handle_eq eqh;
3460         struct lnet_handle_md mdh;
3461         struct lnet_event event;
3462         struct lnet_md md = { NULL };
3463         int which;
3464         int unlinked = 0;
3465         int replied = 0;
3466         const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3467         struct lnet_ping_buffer *pbuf;
3468         struct lnet_process_id tmpid;
3469         int i;
3470         int nob;
3471         int rc;
3472         int rc2;
3473         sigset_t blocked;
3474
3475         /* n_ids limit is arbitrary */
3476         if (n_ids <= 0 || id.nid == LNET_NID_ANY)
3477                 return -EINVAL;
3478
3479         /*
3480          * if the user buffer has more space than the lnet_interfaces_max
3481          * then only fill it up to lnet_interfaces_max
3482          */
3483         if (n_ids > lnet_interfaces_max)
3484                 n_ids = lnet_interfaces_max;
3485
3486         if (id.pid == LNET_PID_ANY)
3487                 id.pid = LNET_PID_LUSTRE;
3488
3489         pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
3490         if (!pbuf)
3491                 return -ENOMEM;
3492
3493         /* NB 2 events max (including any unlink event) */
3494         rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3495         if (rc != 0) {
3496                 CERROR("Can't allocate EQ: %d\n", rc);
3497                 goto fail_ping_buffer_decref;
3498         }
3499
3500         /* initialize md content */
3501         md.start     = &pbuf->pb_info;
3502         md.length    = LNET_PING_INFO_SIZE(n_ids);
3503         md.threshold = 2; /* GET/REPLY */
3504         md.max_size  = 0;
3505         md.options   = LNET_MD_TRUNCATE;
3506         md.user_ptr  = NULL;
3507         md.eq_handle = eqh;
3508
3509         rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3510         if (rc != 0) {
3511                 CERROR("Can't bind MD: %d\n", rc);
3512                 goto fail_free_eq;
3513         }
3514
3515         rc = LNetGet(LNET_NID_ANY, mdh, id,
3516                      LNET_RESERVED_PORTAL,
3517                      LNET_PROTO_PING_MATCHBITS, 0);
3518
3519         if (rc != 0) {
3520                 /* Don't CERROR; this could be deliberate! */
3521                 rc2 = LNetMDUnlink(mdh);
3522                 LASSERT(rc2 == 0);
3523
3524                 /* NB must wait for the UNLINK event below... */
3525                 unlinked = 1;
3526                 timeout = a_long_time;
3527         }
3528
3529         do {
3530                 /* MUST block for unlink to complete */
3531                 if (unlinked)
3532                         blocked = cfs_block_allsigs();
3533
3534                 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3535
3536                 if (unlinked)
3537                         cfs_restore_sigs(blocked);
3538
3539                 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3540                        (rc2 <= 0) ? -1 : event.type,
3541                        (rc2 <= 0) ? -1 : event.status,
3542                        (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3543
3544                 LASSERT(rc2 != -EOVERFLOW);     /* can't miss anything */
3545
3546                 if (rc2 <= 0 || event.status != 0) {
3547                         /* timeout or error */
3548                         if (!replied && rc == 0)
3549                                 rc = (rc2 < 0) ? rc2 :
3550                                      (rc2 == 0) ? -ETIMEDOUT :
3551                                      event.status;
3552
3553                         if (!unlinked) {
3554                                 /* Ensure completion in finite time... */
3555                                 LNetMDUnlink(mdh);
3556                                 /* No assertion (racing with network) */
3557                                 unlinked = 1;
3558                                 timeout = a_long_time;
3559                         } else if (rc2 == 0) {
3560                                 /* timed out waiting for unlink */
3561                                 CWARN("ping %s: late network completion\n",
3562                                       libcfs_id2str(id));
3563                         }
3564                 } else if (event.type == LNET_EVENT_REPLY) {
3565                         replied = 1;
3566                         rc = event.mlength;
3567                 }
3568         } while (rc2 <= 0 || !event.unlinked);
3569
3570         if (!replied) {
3571                 if (rc >= 0)
3572                         CWARN("%s: Unexpected rc >= 0 but no reply!\n",
3573                               libcfs_id2str(id));
3574                 rc = -EIO;
3575                 goto fail_free_eq;
3576         }
3577
3578         nob = rc;
3579         LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
3580
3581         rc = -EPROTO;           /* if I can't parse... */
3582
3583         if (nob < 8) {
3584                 CERROR("%s: ping info too short %d\n",
3585                        libcfs_id2str(id), nob);
3586                 goto fail_free_eq;
3587         }
3588
3589         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
3590                 lnet_swap_pinginfo(pbuf);
3591         } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
3592                 CERROR("%s: Unexpected magic %08x\n",
3593                        libcfs_id2str(id), pbuf->pb_info.pi_magic);
3594                 goto fail_free_eq;
3595         }
3596
3597         if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
3598                 CERROR("%s: ping w/o NI status: 0x%x\n",
3599                        libcfs_id2str(id), pbuf->pb_info.pi_features);
3600                 goto fail_free_eq;
3601         }
3602
3603         if (nob < LNET_PING_INFO_SIZE(0)) {
3604                 CERROR("%s: Short reply %d(%d min)\n",
3605                        libcfs_id2str(id),
3606                        nob, (int)LNET_PING_INFO_SIZE(0));
3607                 goto fail_free_eq;
3608         }
3609
3610         if (pbuf->pb_info.pi_nnis < n_ids)
3611                 n_ids = pbuf->pb_info.pi_nnis;
3612
3613         if (nob < LNET_PING_INFO_SIZE(n_ids)) {
3614                 CERROR("%s: Short reply %d(%d expected)\n",
3615                        libcfs_id2str(id),
3616                        nob, (int)LNET_PING_INFO_SIZE(n_ids));
3617                 goto fail_free_eq;
3618         }
3619
3620         rc = -EFAULT;           /* if I segv in copy_to_user()... */
3621
3622         memset(&tmpid, 0, sizeof(tmpid));
3623         for (i = 0; i < n_ids; i++) {
3624                 tmpid.pid = pbuf->pb_info.pi_pid;
3625                 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
3626                 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
3627                         goto fail_free_eq;
3628         }
3629         rc = pbuf->pb_info.pi_nnis;
3630
3631  fail_free_eq:
3632         rc2 = LNetEQFree(eqh);
3633         if (rc2 != 0)
3634                 CERROR("rc2 %d\n", rc2);
3635         LASSERT(rc2 == 0);
3636
3637  fail_ping_buffer_decref:
3638         lnet_ping_buffer_decref(pbuf);
3639         return rc;
3640 }
3641
3642 static int
3643 lnet_discover(lnet_process_id_t id, __u32 force, lnet_process_id_t __user *ids,
3644               int n_ids)
3645 {
3646         struct lnet_peer_ni *lpni;
3647         struct lnet_peer_ni *p;
3648         struct lnet_peer *lp;
3649         lnet_process_id_t *buf;
3650         int cpt;
3651         int i;
3652         int rc;
3653         int max_intf = lnet_interfaces_max;
3654         size_t buf_size;
3655
3656         if (n_ids <= 0 ||
3657             id.nid == LNET_NID_ANY)
3658                 return -EINVAL;
3659
3660         if (id.pid == LNET_PID_ANY)
3661                 id.pid = LNET_PID_LUSTRE;
3662
3663         /*
3664          * if the user buffer has more space than the max_intf
3665          * then only fill it up to max_intf
3666          */
3667         if (n_ids > max_intf)
3668                 n_ids = max_intf;
3669
3670         buf_size = n_ids * sizeof(*buf);
3671
3672         LIBCFS_ALLOC(buf, buf_size);
3673         if (!buf)
3674                 return -ENOMEM;
3675
3676         cpt = lnet_net_lock_current();
3677         lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
3678         if (IS_ERR(lpni)) {
3679                 rc = PTR_ERR(lpni);
3680                 goto out;
3681         }
3682
3683         /*
3684          * Clearing the NIDS_UPTODATE flag ensures the peer will
3685          * be discovered, provided discovery has not been disabled.
3686          */
3687         lp = lpni->lpni_peer_net->lpn_peer;
3688         spin_lock(&lp->lp_lock);
3689         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3690         /* If the force flag is set, force a PING and PUSH as well. */
3691         if (force)
3692                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
3693         spin_unlock(&lp->lp_lock);
3694         rc = lnet_discover_peer_locked(lpni, cpt, true);
3695         if (rc)
3696                 goto out_decref;
3697
3698         /* Peer may have changed. */
3699         lp = lpni->lpni_peer_net->lpn_peer;
3700         if (lp->lp_nnis < n_ids)
3701                 n_ids = lp->lp_nnis;
3702
3703         i = 0;
3704         p = NULL;
3705         while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
3706                 buf[i].pid = id.pid;
3707                 buf[i].nid = p->lpni_nid;
3708                 if (++i >= n_ids)
3709                         break;
3710         }
3711
3712         lnet_net_unlock(cpt);
3713
3714         rc = -EFAULT;
3715         if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
3716                 goto out_relock;
3717         rc = n_ids;
3718 out_relock:
3719         lnet_net_lock(cpt);
3720 out_decref:
3721         lnet_peer_ni_decref_locked(lpni);
3722 out:
3723         lnet_net_unlock(cpt);
3724
3725         LIBCFS_FREE(buf, buf_size);
3726
3727         return rc;
3728 }