Whamcloud - gitweb
ee3608553aff03d7fe3efcfcec6cad520b5f16a6
[fs/lustre-release.git] / lnet / lnet / api-ni.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
36 #include <linux/moduleparam.h>
37
38 #include <lnet/lib-lnet.h>
39
40 #define D_LNI D_CONSOLE
41
42 struct lnet the_lnet;           /* THE state of the network */
43 EXPORT_SYMBOL(the_lnet);
44
45 static char *ip2nets = "";
46 module_param(ip2nets, charp, 0444);
47 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
48
49 static char *networks = "";
50 module_param(networks, charp, 0444);
51 MODULE_PARM_DESC(networks, "local networks");
52
53 static char *routes = "";
54 module_param(routes, charp, 0444);
55 MODULE_PARM_DESC(routes, "routes to non-local networks");
56
57 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
58 module_param(rnet_htable_size, int, 0444);
59 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
60
61 static int use_tcp_bonding = false;
62 module_param(use_tcp_bonding, int, 0444);
63 MODULE_PARM_DESC(use_tcp_bonding,
64                  "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
65
66 unsigned int lnet_numa_range = 0;
67 module_param(lnet_numa_range, uint, 0444);
68 MODULE_PARM_DESC(lnet_numa_range,
69                 "NUMA range to consider during Multi-Rail selection");
70
71 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
72 static int intf_max_set(const char *val, struct kernel_param *kp);
73 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
74                   &lnet_interfaces_max, S_IRUGO|S_IWUSR);
75 MODULE_PARM_DESC(lnet_interfaces_max,
76                 "Maximum number of interfaces in a node.");
77
78 /*
79  * This sequence number keeps track of how many times DLC was used to
80  * update the local NIs. It is incremented when a NI is added or
81  * removed and checked when sending a message to determine if there is
82  * a need to re-run the selection algorithm. See lnet_select_pathway()
83  * for more details on its usage.
84  */
85 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
86
87 static int lnet_ping(struct lnet_process_id id, signed long timeout,
88                      struct lnet_process_id __user *ids, int n_ids);
89
90 static int
91 intf_max_set(const char *val, struct kernel_param *kp)
92 {
93         int value, rc;
94
95         rc = kstrtoint(val, 0, &value);
96         if (rc) {
97                 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
98                 return rc;
99         }
100
101         if (value < LNET_INTERFACES_MIN) {
102                 CWARN("max interfaces provided are too small, setting to %d\n",
103                       LNET_INTERFACES_MIN);
104                 value = LNET_INTERFACES_MIN;
105         }
106
107         *(int *)kp->arg = value;
108
109         return 0;
110 }
111
112 static char *
113 lnet_get_routes(void)
114 {
115         return routes;
116 }
117
118 static char *
119 lnet_get_networks(void)
120 {
121         char   *nets;
122         int     rc;
123
124         if (*networks != 0 && *ip2nets != 0) {
125                 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
126                                    "'ip2nets' but not both at once\n");
127                 return NULL;
128         }
129
130         if (*ip2nets != 0) {
131                 rc = lnet_parse_ip2nets(&nets, ip2nets);
132                 return (rc == 0) ? nets : NULL;
133         }
134
135         if (*networks != 0)
136                 return networks;
137
138         return "tcp";
139 }
140
141 static void
142 lnet_init_locks(void)
143 {
144         spin_lock_init(&the_lnet.ln_eq_wait_lock);
145         init_waitqueue_head(&the_lnet.ln_eq_waitq);
146         init_waitqueue_head(&the_lnet.ln_rc_waitq);
147         mutex_init(&the_lnet.ln_lnd_mutex);
148         mutex_init(&the_lnet.ln_api_mutex);
149 }
150
151 static void
152 lnet_fini_locks(void)
153 {
154 }
155
156 struct kmem_cache *lnet_mes_cachep;        /* MEs kmem_cache */
157 struct kmem_cache *lnet_small_mds_cachep;  /* <= LNET_SMALL_MD_SIZE bytes
158                                             *  MDs kmem_cache */
159
160 static int
161 lnet_descriptor_setup(void)
162 {
163         /* create specific kmem_cache for MEs and small MDs (i.e., originally
164          * allocated in <size-xxx> kmem_cache).
165          */
166         lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
167                                             0, 0, NULL);
168         if (!lnet_mes_cachep)
169                 return -ENOMEM;
170
171         lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
172                                                   LNET_SMALL_MD_SIZE, 0, 0,
173                                                   NULL);
174         if (!lnet_small_mds_cachep)
175                 return -ENOMEM;
176
177         return 0;
178 }
179
180 static void
181 lnet_descriptor_cleanup(void)
182 {
183
184         if (lnet_small_mds_cachep) {
185                 kmem_cache_destroy(lnet_small_mds_cachep);
186                 lnet_small_mds_cachep = NULL;
187         }
188
189         if (lnet_mes_cachep) {
190                 kmem_cache_destroy(lnet_mes_cachep);
191                 lnet_mes_cachep = NULL;
192         }
193 }
194
195 static int
196 lnet_create_remote_nets_table(void)
197 {
198         int               i;
199         struct list_head *hash;
200
201         LASSERT(the_lnet.ln_remote_nets_hash == NULL);
202         LASSERT(the_lnet.ln_remote_nets_hbits > 0);
203         LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
204         if (hash == NULL) {
205                 CERROR("Failed to create remote nets hash table\n");
206                 return -ENOMEM;
207         }
208
209         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
210                 INIT_LIST_HEAD(&hash[i]);
211         the_lnet.ln_remote_nets_hash = hash;
212         return 0;
213 }
214
215 static void
216 lnet_destroy_remote_nets_table(void)
217 {
218         int i;
219
220         if (the_lnet.ln_remote_nets_hash == NULL)
221                 return;
222
223         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
224                 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
225
226         LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
227                     LNET_REMOTE_NETS_HASH_SIZE *
228                     sizeof(the_lnet.ln_remote_nets_hash[0]));
229         the_lnet.ln_remote_nets_hash = NULL;
230 }
231
232 static void
233 lnet_destroy_locks(void)
234 {
235         if (the_lnet.ln_res_lock != NULL) {
236                 cfs_percpt_lock_free(the_lnet.ln_res_lock);
237                 the_lnet.ln_res_lock = NULL;
238         }
239
240         if (the_lnet.ln_net_lock != NULL) {
241                 cfs_percpt_lock_free(the_lnet.ln_net_lock);
242                 the_lnet.ln_net_lock = NULL;
243         }
244
245         lnet_fini_locks();
246 }
247
248 static int
249 lnet_create_locks(void)
250 {
251         lnet_init_locks();
252
253         the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
254         if (the_lnet.ln_res_lock == NULL)
255                 goto failed;
256
257         the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
258         if (the_lnet.ln_net_lock == NULL)
259                 goto failed;
260
261         return 0;
262
263  failed:
264         lnet_destroy_locks();
265         return -ENOMEM;
266 }
267
268 static void lnet_assert_wire_constants(void)
269 {
270         /* Wire protocol assertions generated by 'wirecheck'
271          * running on Linux robert.bartonsoftware.com 2.6.8-1.521
272          * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
273          * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
274
275         /* Constants... */
276         CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
277         CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
278         CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
279         CLASSERT(LNET_MSG_ACK == 0);
280         CLASSERT(LNET_MSG_PUT == 1);
281         CLASSERT(LNET_MSG_GET == 2);
282         CLASSERT(LNET_MSG_REPLY == 3);
283         CLASSERT(LNET_MSG_HELLO == 4);
284
285         /* Checks for struct lnet_handle_wire */
286         CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
287         CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
288         CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
289         CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
290         CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
291
292         /* Checks for struct struct lnet_magicversion */
293         CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
294         CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
295         CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
296         CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
297         CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
298         CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
299         CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
300
301         /* Checks for struct struct lnet_hdr */
302         CLASSERT((int)sizeof(struct lnet_hdr) == 72);
303         CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
304         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
305         CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
306         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
307         CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
308         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
309         CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
310         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
311         CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
312         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
313         CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
314         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
315         CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
316         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
317
318         /* Ack */
319         CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
320         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
321         CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
322         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
323         CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
324         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
325
326         /* Put */
327         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
328         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
329         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
330         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
331         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
332         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
333         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
334         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
335         CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
336         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
337
338         /* Get */
339         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
340         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
341         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
342         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
343         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
344         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
345         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
346         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
347         CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
348         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
349
350         /* Reply */
351         CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
352         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
353
354         /* Hello */
355         CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
356         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
357         CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
358         CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
359
360         /* Checks for struct lnet_ni_status and related constants */
361         CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
362         CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
363         CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
364
365         /* Checks for struct lnet_ni_status */
366         CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
367         CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
368         CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
369         CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
370         CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
371         CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
372         CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
373
374         /* Checks for struct lnet_ping_info and related constants */
375         CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
376         CLASSERT(LNET_PING_FEAT_INVAL == 0);
377         CLASSERT(LNET_PING_FEAT_BASE == 1);
378         CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
379         CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
380         CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
381         CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
382         CLASSERT(LNET_PING_FEAT_BITS == 31);
383
384         /* Checks for struct lnet_ping_info */
385         CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
386         CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
387         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
388         CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
389         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
390         CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
391         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
392         CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
393         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
394         CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
395         CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
396 }
397
398 static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
399 {
400         struct lnet_lnd *lnd;
401         struct list_head *tmp;
402
403         /* holding lnd mutex */
404         list_for_each(tmp, &the_lnet.ln_lnds) {
405                 lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
406
407                 if (lnd->lnd_type == type)
408                         return lnd;
409         }
410         return NULL;
411 }
412
413 void
414 lnet_register_lnd(struct lnet_lnd *lnd)
415 {
416         mutex_lock(&the_lnet.ln_lnd_mutex);
417
418         LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
419         LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
420
421         list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
422         lnd->lnd_refcount = 0;
423
424         CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
425
426         mutex_unlock(&the_lnet.ln_lnd_mutex);
427 }
428 EXPORT_SYMBOL(lnet_register_lnd);
429
430 void
431 lnet_unregister_lnd(struct lnet_lnd *lnd)
432 {
433         mutex_lock(&the_lnet.ln_lnd_mutex);
434
435         LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
436         LASSERT(lnd->lnd_refcount == 0);
437
438         list_del(&lnd->lnd_list);
439         CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
440
441         mutex_unlock(&the_lnet.ln_lnd_mutex);
442 }
443 EXPORT_SYMBOL(lnet_unregister_lnd);
444
445 void
446 lnet_counters_get(struct lnet_counters *counters)
447 {
448         struct lnet_counters *ctr;
449         int             i;
450
451         memset(counters, 0, sizeof(*counters));
452
453         lnet_net_lock(LNET_LOCK_EX);
454
455         cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
456                 counters->msgs_max     += ctr->msgs_max;
457                 counters->msgs_alloc   += ctr->msgs_alloc;
458                 counters->errors       += ctr->errors;
459                 counters->send_count   += ctr->send_count;
460                 counters->recv_count   += ctr->recv_count;
461                 counters->route_count  += ctr->route_count;
462                 counters->drop_count   += ctr->drop_count;
463                 counters->send_length  += ctr->send_length;
464                 counters->recv_length  += ctr->recv_length;
465                 counters->route_length += ctr->route_length;
466                 counters->drop_length  += ctr->drop_length;
467
468         }
469         lnet_net_unlock(LNET_LOCK_EX);
470 }
471 EXPORT_SYMBOL(lnet_counters_get);
472
473 void
474 lnet_counters_reset(void)
475 {
476         struct lnet_counters *counters;
477         int             i;
478
479         lnet_net_lock(LNET_LOCK_EX);
480
481         cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
482                 memset(counters, 0, sizeof(struct lnet_counters));
483
484         lnet_net_unlock(LNET_LOCK_EX);
485 }
486
487 static char *
488 lnet_res_type2str(int type)
489 {
490         switch (type) {
491         default:
492                 LBUG();
493         case LNET_COOKIE_TYPE_MD:
494                 return "MD";
495         case LNET_COOKIE_TYPE_ME:
496                 return "ME";
497         case LNET_COOKIE_TYPE_EQ:
498                 return "EQ";
499         }
500 }
501
502 static void
503 lnet_res_container_cleanup(struct lnet_res_container *rec)
504 {
505         int     count = 0;
506
507         if (rec->rec_type == 0) /* not set yet, it's uninitialized */
508                 return;
509
510         while (!list_empty(&rec->rec_active)) {
511                 struct list_head *e = rec->rec_active.next;
512
513                 list_del_init(e);
514                 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
515                         lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
516
517                 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
518                         lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
519
520                 } else { /* NB: Active MEs should be attached on portals */
521                         LBUG();
522                 }
523                 count++;
524         }
525
526         if (count > 0) {
527                 /* Found alive MD/ME/EQ, user really should unlink/free
528                  * all of them before finalize LNet, but if someone didn't,
529                  * we have to recycle garbage for him */
530                 CERROR("%d active elements on exit of %s container\n",
531                        count, lnet_res_type2str(rec->rec_type));
532         }
533
534         if (rec->rec_lh_hash != NULL) {
535                 LIBCFS_FREE(rec->rec_lh_hash,
536                             LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
537                 rec->rec_lh_hash = NULL;
538         }
539
540         rec->rec_type = 0; /* mark it as finalized */
541 }
542
543 static int
544 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
545 {
546         int     rc = 0;
547         int     i;
548
549         LASSERT(rec->rec_type == 0);
550
551         rec->rec_type = type;
552         INIT_LIST_HEAD(&rec->rec_active);
553
554         rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
555
556         /* Arbitrary choice of hash table size */
557         LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
558                          LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
559         if (rec->rec_lh_hash == NULL) {
560                 rc = -ENOMEM;
561                 goto out;
562         }
563
564         for (i = 0; i < LNET_LH_HASH_SIZE; i++)
565                 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
566
567         return 0;
568
569 out:
570         CERROR("Failed to setup %s resource container\n",
571                lnet_res_type2str(type));
572         lnet_res_container_cleanup(rec);
573         return rc;
574 }
575
576 static void
577 lnet_res_containers_destroy(struct lnet_res_container **recs)
578 {
579         struct lnet_res_container       *rec;
580         int                             i;
581
582         cfs_percpt_for_each(rec, i, recs)
583                 lnet_res_container_cleanup(rec);
584
585         cfs_percpt_free(recs);
586 }
587
588 static struct lnet_res_container **
589 lnet_res_containers_create(int type)
590 {
591         struct lnet_res_container       **recs;
592         struct lnet_res_container       *rec;
593         int                             rc;
594         int                             i;
595
596         recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
597         if (recs == NULL) {
598                 CERROR("Failed to allocate %s resource containers\n",
599                        lnet_res_type2str(type));
600                 return NULL;
601         }
602
603         cfs_percpt_for_each(rec, i, recs) {
604                 rc = lnet_res_container_setup(rec, i, type);
605                 if (rc != 0) {
606                         lnet_res_containers_destroy(recs);
607                         return NULL;
608                 }
609         }
610
611         return recs;
612 }
613
614 struct lnet_libhandle *
615 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
616 {
617         /* ALWAYS called with lnet_res_lock held */
618         struct list_head        *head;
619         struct lnet_libhandle   *lh;
620         unsigned int            hash;
621
622         if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
623                 return NULL;
624
625         hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
626         head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
627
628         list_for_each_entry(lh, head, lh_hash_chain) {
629                 if (lh->lh_cookie == cookie)
630                         return lh;
631         }
632
633         return NULL;
634 }
635
636 void
637 lnet_res_lh_initialize(struct lnet_res_container *rec,
638                        struct lnet_libhandle *lh)
639 {
640         /* ALWAYS called with lnet_res_lock held */
641         unsigned int    ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
642         unsigned int    hash;
643
644         lh->lh_cookie = rec->rec_lh_cookie;
645         rec->rec_lh_cookie += 1 << ibits;
646
647         hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
648
649         list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
650 }
651
652 static int lnet_unprepare(void);
653
654 static int
655 lnet_prepare(lnet_pid_t requested_pid)
656 {
657         /* Prepare to bring up the network */
658         struct lnet_res_container **recs;
659         int                       rc = 0;
660
661         if (requested_pid == LNET_PID_ANY) {
662                 /* Don't instantiate LNET just for me */
663                 return -ENETDOWN;
664         }
665
666         LASSERT(the_lnet.ln_refcount == 0);
667
668         the_lnet.ln_routing = 0;
669
670         LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
671         the_lnet.ln_pid = requested_pid;
672
673         INIT_LIST_HEAD(&the_lnet.ln_test_peers);
674         INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
675         INIT_LIST_HEAD(&the_lnet.ln_nets);
676         INIT_LIST_HEAD(&the_lnet.ln_routers);
677         INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
678         INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
679
680         rc = lnet_descriptor_setup();
681         if (rc != 0)
682                 goto failed;
683
684         rc = lnet_create_remote_nets_table();
685         if (rc != 0)
686                 goto failed;
687
688         /*
689          * NB the interface cookie in wire handles guards against delayed
690          * replies and ACKs appearing valid after reboot.
691          */
692         the_lnet.ln_interface_cookie = ktime_get_real_ns();
693
694         the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
695                                                 sizeof(struct lnet_counters));
696         if (the_lnet.ln_counters == NULL) {
697                 CERROR("Failed to allocate counters for LNet\n");
698                 rc = -ENOMEM;
699                 goto failed;
700         }
701
702         rc = lnet_peer_tables_create();
703         if (rc != 0)
704                 goto failed;
705
706         rc = lnet_msg_containers_create();
707         if (rc != 0)
708                 goto failed;
709
710         rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
711                                       LNET_COOKIE_TYPE_EQ);
712         if (rc != 0)
713                 goto failed;
714
715         recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
716         if (recs == NULL) {
717                 rc = -ENOMEM;
718                 goto failed;
719         }
720
721         the_lnet.ln_me_containers = recs;
722
723         recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
724         if (recs == NULL) {
725                 rc = -ENOMEM;
726                 goto failed;
727         }
728
729         the_lnet.ln_md_containers = recs;
730
731         rc = lnet_portals_create();
732         if (rc != 0) {
733                 CERROR("Failed to create portals for LNet: %d\n", rc);
734                 goto failed;
735         }
736
737         return 0;
738
739  failed:
740         lnet_unprepare();
741         return rc;
742 }
743
744 static int
745 lnet_unprepare (void)
746 {
747         /* NB no LNET_LOCK since this is the last reference.  All LND instances
748          * have shut down already, so it is safe to unlink and free all
749          * descriptors, even those that appear committed to a network op (eg MD
750          * with non-zero pending count) */
751
752         lnet_fail_nid(LNET_NID_ANY, 0);
753
754         LASSERT(the_lnet.ln_refcount == 0);
755         LASSERT(list_empty(&the_lnet.ln_test_peers));
756         LASSERT(list_empty(&the_lnet.ln_nets));
757
758         lnet_portals_destroy();
759
760         if (the_lnet.ln_md_containers != NULL) {
761                 lnet_res_containers_destroy(the_lnet.ln_md_containers);
762                 the_lnet.ln_md_containers = NULL;
763         }
764
765         if (the_lnet.ln_me_containers != NULL) {
766                 lnet_res_containers_destroy(the_lnet.ln_me_containers);
767                 the_lnet.ln_me_containers = NULL;
768         }
769
770         lnet_res_container_cleanup(&the_lnet.ln_eq_container);
771
772         lnet_msg_containers_destroy();
773         lnet_peer_uninit();
774         lnet_rtrpools_free(0);
775
776         if (the_lnet.ln_counters != NULL) {
777                 cfs_percpt_free(the_lnet.ln_counters);
778                 the_lnet.ln_counters = NULL;
779         }
780         lnet_destroy_remote_nets_table();
781         lnet_descriptor_cleanup();
782
783         return 0;
784 }
785
786 struct lnet_ni  *
787 lnet_net2ni_locked(__u32 net_id, int cpt)
788 {
789         struct lnet_ni   *ni;
790         struct lnet_net  *net;
791
792         LASSERT(cpt != LNET_LOCK_EX);
793
794         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
795                 if (net->net_id == net_id) {
796                         ni = list_entry(net->net_ni_list.next, struct lnet_ni,
797                                         ni_netlist);
798                         return ni;
799                 }
800         }
801
802         return NULL;
803 }
804
805 struct lnet_ni *
806 lnet_net2ni_addref(__u32 net)
807 {
808         struct lnet_ni *ni;
809
810         lnet_net_lock(0);
811         ni = lnet_net2ni_locked(net, 0);
812         if (ni)
813                 lnet_ni_addref_locked(ni, 0);
814         lnet_net_unlock(0);
815
816         return ni;
817 }
818 EXPORT_SYMBOL(lnet_net2ni_addref);
819
820 struct lnet_net *
821 lnet_get_net_locked(__u32 net_id)
822 {
823         struct lnet_net  *net;
824
825         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
826                 if (net->net_id == net_id)
827                         return net;
828         }
829
830         return NULL;
831 }
832
833 unsigned int
834 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
835 {
836         __u64           key = nid;
837         unsigned int    val;
838
839         LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
840
841         if (number == 1)
842                 return 0;
843
844         val = hash_long(key, LNET_CPT_BITS);
845         /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
846         if (val < number)
847                 return val;
848
849         return (unsigned int)(key + val + (val >> 1)) % number;
850 }
851
852 int
853 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
854 {
855         struct lnet_net *net;
856
857         /* must called with hold of lnet_net_lock */
858         if (LNET_CPT_NUMBER == 1)
859                 return 0; /* the only one */
860
861         /*
862          * If NI is provided then use the CPT identified in the NI cpt
863          * list if one exists. If one doesn't exist, then that NI is
864          * associated with all CPTs and it follows that the net it belongs
865          * to is implicitly associated with all CPTs, so just hash the nid
866          * and return that.
867          */
868         if (ni != NULL) {
869                 if (ni->ni_cpts != NULL)
870                         return ni->ni_cpts[lnet_nid_cpt_hash(nid,
871                                                              ni->ni_ncpts)];
872                 else
873                         return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
874         }
875
876         /* no NI provided so look at the net */
877         net = lnet_get_net_locked(LNET_NIDNET(nid));
878
879         if (net != NULL && net->net_cpts != NULL) {
880                 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
881         }
882
883         return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
884 }
885
886 int
887 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
888 {
889         int     cpt;
890         int     cpt2;
891
892         if (LNET_CPT_NUMBER == 1)
893                 return 0; /* the only one */
894
895         cpt = lnet_net_lock_current();
896
897         cpt2 = lnet_cpt_of_nid_locked(nid, ni);
898
899         lnet_net_unlock(cpt);
900
901         return cpt2;
902 }
903 EXPORT_SYMBOL(lnet_cpt_of_nid);
904
905 int
906 lnet_islocalnet(__u32 net_id)
907 {
908         struct lnet_net *net;
909         int             cpt;
910         bool            local;
911
912         cpt = lnet_net_lock_current();
913
914         net = lnet_get_net_locked(net_id);
915
916         local = net != NULL;
917
918         lnet_net_unlock(cpt);
919
920         return local;
921 }
922
923 bool
924 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
925 {
926         if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
927             ni->ni_state == LNET_NI_STATE_DEGRADED)
928                 return true;
929
930         return false;
931 }
932
933 struct lnet_ni  *
934 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
935 {
936         struct lnet_net  *net;
937         struct lnet_ni   *ni;
938
939         LASSERT(cpt != LNET_LOCK_EX);
940
941         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
942                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
943                         if (ni->ni_nid == nid)
944                                 return ni;
945                 }
946         }
947
948         return NULL;
949 }
950
951 struct lnet_ni *
952 lnet_nid2ni_addref(lnet_nid_t nid)
953 {
954         struct lnet_ni *ni;
955
956         lnet_net_lock(0);
957         ni = lnet_nid2ni_locked(nid, 0);
958         if (ni)
959                 lnet_ni_addref_locked(ni, 0);
960         lnet_net_unlock(0);
961
962         return ni;
963 }
964 EXPORT_SYMBOL(lnet_nid2ni_addref);
965
966 int
967 lnet_islocalnid(lnet_nid_t nid)
968 {
969         struct lnet_ni  *ni;
970         int             cpt;
971
972         cpt = lnet_net_lock_current();
973         ni = lnet_nid2ni_locked(nid, cpt);
974         lnet_net_unlock(cpt);
975
976         return ni != NULL;
977 }
978
979 int
980 lnet_count_acceptor_nets(void)
981 {
982         /* Return the # of NIs that need the acceptor. */
983         int              count = 0;
984         struct lnet_net  *net;
985         int              cpt;
986
987         cpt = lnet_net_lock_current();
988         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
989                 /* all socklnd type networks should have the acceptor
990                  * thread started */
991                 if (net->net_lnd->lnd_accept != NULL)
992                         count++;
993         }
994
995         lnet_net_unlock(cpt);
996
997         return count;
998 }
999
1000 struct lnet_ping_buffer *
1001 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1002 {
1003         struct lnet_ping_buffer *pbuf;
1004
1005         LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1006         if (pbuf) {
1007                 pbuf->pb_nnis = nnis;
1008                 atomic_set(&pbuf->pb_refcnt, 1);
1009         }
1010
1011         return pbuf;
1012 }
1013
1014 void
1015 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1016 {
1017         LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
1018         LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1019 }
1020
1021 static struct lnet_ping_buffer *
1022 lnet_ping_target_create(int nnis)
1023 {
1024         struct lnet_ping_buffer *pbuf;
1025
1026         pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1027         if (pbuf == NULL) {
1028                 CERROR("Can't allocate ping source [%d]\n", nnis);
1029                 return NULL;
1030         }
1031
1032         pbuf->pb_info.pi_nnis = nnis;
1033         pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1034         pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1035         pbuf->pb_info.pi_features = LNET_PING_FEAT_NI_STATUS;
1036
1037         return pbuf;
1038 }
1039
1040 static inline int
1041 lnet_get_net_ni_count_locked(struct lnet_net *net)
1042 {
1043         struct lnet_ni  *ni;
1044         int             count = 0;
1045
1046         list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1047                 count++;
1048
1049         return count;
1050 }
1051
1052 static inline int
1053 lnet_get_net_ni_count_pre(struct lnet_net *net)
1054 {
1055         struct lnet_ni  *ni;
1056         int             count = 0;
1057
1058         list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1059                 count++;
1060
1061         return count;
1062 }
1063
1064 static inline int
1065 lnet_get_ni_count(void)
1066 {
1067         struct lnet_ni  *ni;
1068         struct lnet_net *net;
1069         int             count = 0;
1070
1071         lnet_net_lock(0);
1072
1073         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1074                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1075                         count++;
1076         }
1077
1078         lnet_net_unlock(0);
1079
1080         return count;
1081 }
1082
1083 int
1084 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1085 {
1086         if (!pinfo)
1087                 return -EINVAL;
1088         if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1089                 return -EPROTO;
1090         if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1091                 return -EPROTO;
1092         /* Loopback is guaranteed to be present */
1093         if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1094                 return -ERANGE;
1095         if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1096                 return -EPROTO;
1097         return 0;
1098 }
1099
1100 static void
1101 lnet_ping_target_destroy(void)
1102 {
1103         struct lnet_net *net;
1104         struct lnet_ni  *ni;
1105
1106         lnet_net_lock(LNET_LOCK_EX);
1107
1108         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1109                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1110                         lnet_ni_lock(ni);
1111                         ni->ni_status = NULL;
1112                         lnet_ni_unlock(ni);
1113                 }
1114         }
1115
1116         lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1117         the_lnet.ln_ping_target = NULL;
1118
1119         lnet_net_unlock(LNET_LOCK_EX);
1120 }
1121
1122 static void
1123 lnet_ping_target_event_handler(struct lnet_event *event)
1124 {
1125         struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1126
1127         if (event->unlinked)
1128                 lnet_ping_buffer_decref(pbuf);
1129 }
1130
1131 static int
1132 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1133                        struct lnet_handle_md *ping_mdh,
1134                        int ni_count, bool set_eq)
1135 {
1136         struct lnet_process_id id = {
1137                 .nid = LNET_NID_ANY,
1138                 .pid = LNET_PID_ANY
1139         };
1140         struct lnet_handle_me me_handle;
1141         struct lnet_md md = { NULL };
1142         int rc, rc2;
1143
1144         if (set_eq) {
1145                 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1146                                  &the_lnet.ln_ping_target_eq);
1147                 if (rc != 0) {
1148                         CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1149                         return rc;
1150                 }
1151         }
1152
1153         *ppbuf = lnet_ping_target_create(ni_count);
1154         if (*ppbuf == NULL) {
1155                 rc = -ENOMEM;
1156                 goto fail_free_eq;
1157         }
1158
1159         /* Ping target ME/MD */
1160         rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1161                           LNET_PROTO_PING_MATCHBITS, 0,
1162                           LNET_UNLINK, LNET_INS_AFTER,
1163                           &me_handle);
1164         if (rc != 0) {
1165                 CERROR("Can't create ping target ME: %d\n", rc);
1166                 goto fail_decref_ping_buffer;
1167         }
1168
1169         /* initialize md content */
1170         md.start     = &(*ppbuf)->pb_info;
1171         md.length    = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1172         md.threshold = LNET_MD_THRESH_INF;
1173         md.max_size  = 0;
1174         md.options   = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1175                        LNET_MD_MANAGE_REMOTE;
1176         md.eq_handle = the_lnet.ln_ping_target_eq;
1177         md.user_ptr  = *ppbuf;
1178
1179         rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1180         if (rc != 0) {
1181                 CERROR("Can't attach ping target MD: %d\n", rc);
1182                 goto fail_unlink_ping_me;
1183         }
1184         lnet_ping_buffer_addref(*ppbuf);
1185
1186         return 0;
1187
1188 fail_unlink_ping_me:
1189         rc2 = LNetMEUnlink(me_handle);
1190         LASSERT(rc2 == 0);
1191 fail_decref_ping_buffer:
1192         LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1193         lnet_ping_buffer_decref(*ppbuf);
1194         *ppbuf = NULL;
1195 fail_free_eq:
1196         if (set_eq) {
1197                 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1198                 LASSERT(rc2 == 0);
1199         }
1200         return rc;
1201 }
1202
1203 static void
1204 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1205                     struct lnet_handle_md *ping_mdh)
1206 {
1207         sigset_t        blocked = cfs_block_allsigs();
1208
1209         LNetMDUnlink(*ping_mdh);
1210         LNetInvalidateMDHandle(ping_mdh);
1211
1212         /* NB the MD could be busy; this just starts the unlink */
1213         while (lnet_ping_buffer_numref(pbuf) > 1) {
1214                 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1215                 set_current_state(TASK_UNINTERRUPTIBLE);
1216                 schedule_timeout(cfs_time_seconds(1));
1217         }
1218
1219         cfs_restore_sigs(blocked);
1220 }
1221
1222 static void
1223 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1224 {
1225         struct lnet_ni          *ni;
1226         struct lnet_net         *net;
1227         struct lnet_ni_status *ns;
1228         int                     i;
1229         int                     rc;
1230
1231         i = 0;
1232         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1233                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1234                         LASSERT(i < pbuf->pb_nnis);
1235
1236                         ns = &pbuf->pb_info.pi_ni[i];
1237
1238                         ns->ns_nid = ni->ni_nid;
1239
1240                         lnet_ni_lock(ni);
1241                         ns->ns_status = (ni->ni_status != NULL) ?
1242                                          ni->ni_status->ns_status :
1243                                                 LNET_NI_STATUS_UP;
1244                         ni->ni_status = ns;
1245                         lnet_ni_unlock(ni);
1246
1247                         i++;
1248                 }
1249         }
1250         /*
1251          * We (ab)use the ns_status of the loopback interface to
1252          * transmit the sequence number. The first interface listed
1253          * must be the loopback interface.
1254          */
1255         rc = lnet_ping_info_validate(&pbuf->pb_info);
1256         if (rc) {
1257                 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1258                 LBUG();
1259         }
1260         LNET_PING_BUFFER_SEQNO(pbuf) =
1261                 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1262 }
1263
1264 static void
1265 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1266                         struct lnet_handle_md ping_mdh)
1267 {
1268         struct lnet_ping_buffer *old_pbuf = NULL;
1269         struct lnet_handle_md old_ping_md;
1270
1271         /* switch the NIs to point to the new ping info created */
1272         lnet_net_lock(LNET_LOCK_EX);
1273
1274         if (!the_lnet.ln_routing)
1275                 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1276
1277         /* Ensure only known feature bits have been set. */
1278         LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1279         LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1280
1281         lnet_ping_target_install_locked(pbuf);
1282
1283         if (the_lnet.ln_ping_target) {
1284                 old_pbuf = the_lnet.ln_ping_target;
1285                 old_ping_md = the_lnet.ln_ping_target_md;
1286         }
1287         the_lnet.ln_ping_target_md = ping_mdh;
1288         the_lnet.ln_ping_target = pbuf;
1289
1290         lnet_net_unlock(LNET_LOCK_EX);
1291
1292         if (old_pbuf) {
1293                 /* unlink and free the old ping info */
1294                 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1295                 lnet_ping_buffer_decref(old_pbuf);
1296         }
1297 }
1298
1299 static void
1300 lnet_ping_target_fini(void)
1301 {
1302         int             rc;
1303
1304         lnet_ping_md_unlink(the_lnet.ln_ping_target,
1305                             &the_lnet.ln_ping_target_md);
1306
1307         rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1308         LASSERT(rc == 0);
1309
1310         lnet_ping_target_destroy();
1311 }
1312
1313 static int
1314 lnet_ni_tq_credits(struct lnet_ni *ni)
1315 {
1316         int     credits;
1317
1318         LASSERT(ni->ni_ncpts >= 1);
1319
1320         if (ni->ni_ncpts == 1)
1321                 return ni->ni_net->net_tunables.lct_max_tx_credits;
1322
1323         credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1324         credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1325         credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1326
1327         return credits;
1328 }
1329
1330 static void
1331 lnet_ni_unlink_locked(struct lnet_ni *ni)
1332 {
1333         if (!list_empty(&ni->ni_cptlist)) {
1334                 list_del_init(&ni->ni_cptlist);
1335                 lnet_ni_decref_locked(ni, 0);
1336         }
1337
1338         /* move it to zombie list and nobody can find it anymore */
1339         LASSERT(!list_empty(&ni->ni_netlist));
1340         list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1341         lnet_ni_decref_locked(ni, 0);
1342 }
1343
1344 static void
1345 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1346 {
1347         int             i;
1348         int             islo;
1349         struct lnet_ni  *ni;
1350         struct list_head *zombie_list = &net->net_ni_zombie;
1351
1352         /*
1353          * Now wait for the NIs I just nuked to show up on the zombie
1354          * list and shut them down in guaranteed thread context
1355          */
1356         i = 2;
1357         while (!list_empty(zombie_list)) {
1358                 int     *ref;
1359                 int     j;
1360
1361                 ni = list_entry(zombie_list->next,
1362                                 struct lnet_ni, ni_netlist);
1363                 list_del_init(&ni->ni_netlist);
1364                 /* the ni should be in deleting state. If it's not it's
1365                  * a bug */
1366                 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1367                 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1368                         if (*ref == 0)
1369                                 continue;
1370                         /* still busy, add it back to zombie list */
1371                         list_add(&ni->ni_netlist, zombie_list);
1372                         break;
1373                 }
1374
1375                 if (!list_empty(&ni->ni_netlist)) {
1376                         lnet_net_unlock(LNET_LOCK_EX);
1377                         ++i;
1378                         if ((i & (-i)) == i) {
1379                                 CDEBUG(D_WARNING,
1380                                        "Waiting for zombie LNI %s\n",
1381                                        libcfs_nid2str(ni->ni_nid));
1382                         }
1383                         set_current_state(TASK_UNINTERRUPTIBLE);
1384                         schedule_timeout(cfs_time_seconds(1));
1385                         lnet_net_lock(LNET_LOCK_EX);
1386                         continue;
1387                 }
1388
1389                 lnet_net_unlock(LNET_LOCK_EX);
1390
1391                 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1392
1393                 LASSERT(!in_interrupt());
1394                 (net->net_lnd->lnd_shutdown)(ni);
1395
1396                 if (!islo)
1397                         CDEBUG(D_LNI, "Removed LNI %s\n",
1398                               libcfs_nid2str(ni->ni_nid));
1399
1400                 lnet_ni_free(ni);
1401                 i = 2;
1402                 lnet_net_lock(LNET_LOCK_EX);
1403         }
1404 }
1405
1406 /* shutdown down the NI and release refcount */
1407 static void
1408 lnet_shutdown_lndni(struct lnet_ni *ni)
1409 {
1410         int i;
1411         struct lnet_net *net = ni->ni_net;
1412
1413         lnet_net_lock(LNET_LOCK_EX);
1414         ni->ni_state = LNET_NI_STATE_DELETING;
1415         lnet_ni_unlink_locked(ni);
1416         lnet_incr_dlc_seq();
1417         lnet_net_unlock(LNET_LOCK_EX);
1418
1419         /* clear messages for this NI on the lazy portal */
1420         for (i = 0; i < the_lnet.ln_nportals; i++)
1421                 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1422
1423         lnet_net_lock(LNET_LOCK_EX);
1424         lnet_clear_zombies_nis_locked(net);
1425         lnet_net_unlock(LNET_LOCK_EX);
1426 }
1427
1428 static void
1429 lnet_shutdown_lndnet(struct lnet_net *net)
1430 {
1431         struct lnet_ni *ni;
1432
1433         lnet_net_lock(LNET_LOCK_EX);
1434
1435         net->net_state = LNET_NET_STATE_DELETING;
1436
1437         list_del_init(&net->net_list);
1438
1439         while (!list_empty(&net->net_ni_list)) {
1440                 ni = list_entry(net->net_ni_list.next,
1441                                 struct lnet_ni, ni_netlist);
1442                 lnet_net_unlock(LNET_LOCK_EX);
1443                 lnet_shutdown_lndni(ni);
1444                 lnet_net_lock(LNET_LOCK_EX);
1445         }
1446
1447         lnet_net_unlock(LNET_LOCK_EX);
1448
1449         /* Do peer table cleanup for this net */
1450         lnet_peer_tables_cleanup(net);
1451
1452         lnet_net_lock(LNET_LOCK_EX);
1453         /*
1454          * decrement ref count on lnd only when the entire network goes
1455          * away
1456          */
1457         net->net_lnd->lnd_refcount--;
1458
1459         lnet_net_unlock(LNET_LOCK_EX);
1460
1461         lnet_net_free(net);
1462 }
1463
1464 static void
1465 lnet_shutdown_lndnets(void)
1466 {
1467         struct lnet_net *net;
1468
1469         /* NB called holding the global mutex */
1470
1471         /* All quiet on the API front */
1472         LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
1473         LASSERT(the_lnet.ln_refcount == 0);
1474
1475         lnet_net_lock(LNET_LOCK_EX);
1476         the_lnet.ln_state = LNET_STATE_STOPPING;
1477
1478         while (!list_empty(&the_lnet.ln_nets)) {
1479                 /*
1480                  * move the nets to the zombie list to avoid them being
1481                  * picked up for new work. LONET is also included in the
1482                  * Nets that will be moved to the zombie list
1483                  */
1484                 net = list_entry(the_lnet.ln_nets.next,
1485                                  struct lnet_net, net_list);
1486                 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1487         }
1488
1489         /* Drop the cached loopback Net. */
1490         if (the_lnet.ln_loni != NULL) {
1491                 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1492                 the_lnet.ln_loni = NULL;
1493         }
1494         lnet_net_unlock(LNET_LOCK_EX);
1495
1496         /* iterate through the net zombie list and delete each net */
1497         while (!list_empty(&the_lnet.ln_net_zombie)) {
1498                 net = list_entry(the_lnet.ln_net_zombie.next,
1499                                  struct lnet_net, net_list);
1500                 lnet_shutdown_lndnet(net);
1501         }
1502
1503         lnet_net_lock(LNET_LOCK_EX);
1504         the_lnet.ln_state = LNET_STATE_SHUTDOWN;
1505         lnet_net_unlock(LNET_LOCK_EX);
1506 }
1507
1508 static int
1509 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1510 {
1511         int                     rc = -EINVAL;
1512         struct lnet_tx_queue    *tq;
1513         int                     i;
1514         struct lnet_net         *net = ni->ni_net;
1515
1516         mutex_lock(&the_lnet.ln_lnd_mutex);
1517
1518         if (tun) {
1519                 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1520                 ni->ni_lnd_tunables_set = true;
1521         }
1522
1523         rc = (net->net_lnd->lnd_startup)(ni);
1524
1525         mutex_unlock(&the_lnet.ln_lnd_mutex);
1526
1527         if (rc != 0) {
1528                 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1529                                    rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1530                 lnet_net_lock(LNET_LOCK_EX);
1531                 net->net_lnd->lnd_refcount--;
1532                 lnet_net_unlock(LNET_LOCK_EX);
1533                 goto failed0;
1534         }
1535
1536         ni->ni_state = LNET_NI_STATE_ACTIVE;
1537
1538         /* We keep a reference on the loopback net through the loopback NI */
1539         if (net->net_lnd->lnd_type == LOLND) {
1540                 lnet_ni_addref(ni);
1541                 LASSERT(the_lnet.ln_loni == NULL);
1542                 the_lnet.ln_loni = ni;
1543                 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1544                 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1545                 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1546                 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1547                 return 0;
1548         }
1549
1550         if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1551             ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1552                 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1553                                    libcfs_lnd2str(net->net_lnd->lnd_type),
1554                                    ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1555                                         "" : "per-peer ");
1556                 /* shutdown the NI since if we get here then it must've already
1557                  * been started
1558                  */
1559                 lnet_shutdown_lndni(ni);
1560                 return -EINVAL;
1561         }
1562
1563         cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1564                 tq->tq_credits_min =
1565                 tq->tq_credits_max =
1566                 tq->tq_credits = lnet_ni_tq_credits(ni);
1567         }
1568
1569         atomic_set(&ni->ni_tx_credits,
1570                    lnet_ni_tq_credits(ni) * ni->ni_ncpts);
1571
1572         CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1573                 libcfs_nid2str(ni->ni_nid),
1574                 ni->ni_net->net_tunables.lct_peer_tx_credits,
1575                 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1576                 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1577                 ni->ni_net->net_tunables.lct_peer_timeout);
1578
1579         return 0;
1580 failed0:
1581         lnet_ni_free(ni);
1582         return rc;
1583 }
1584
1585 static int
1586 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1587 {
1588         struct lnet_ni *ni;
1589         struct lnet_net *net_l = NULL;
1590         struct list_head        local_ni_list;
1591         int                     rc;
1592         int                     ni_count = 0;
1593         __u32                   lnd_type;
1594         struct lnet_lnd *lnd;
1595         int                     peer_timeout =
1596                 net->net_tunables.lct_peer_timeout;
1597         int                     maxtxcredits =
1598                 net->net_tunables.lct_max_tx_credits;
1599         int                     peerrtrcredits =
1600                 net->net_tunables.lct_peer_rtr_credits;
1601
1602         INIT_LIST_HEAD(&local_ni_list);
1603
1604         /*
1605          * make sure that this net is unique. If it isn't then
1606          * we are adding interfaces to an already existing network, and
1607          * 'net' is just a convenient way to pass in the list.
1608          * if it is unique we need to find the LND and load it if
1609          * necessary.
1610          */
1611         if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1612                 lnd_type = LNET_NETTYP(net->net_id);
1613
1614                 LASSERT(libcfs_isknown_lnd(lnd_type));
1615
1616                 mutex_lock(&the_lnet.ln_lnd_mutex);
1617                 lnd = lnet_find_lnd_by_type(lnd_type);
1618
1619                 if (lnd == NULL) {
1620                         mutex_unlock(&the_lnet.ln_lnd_mutex);
1621                         rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1622                         mutex_lock(&the_lnet.ln_lnd_mutex);
1623
1624                         lnd = lnet_find_lnd_by_type(lnd_type);
1625                         if (lnd == NULL) {
1626                                 mutex_unlock(&the_lnet.ln_lnd_mutex);
1627                                 CERROR("Can't load LND %s, module %s, rc=%d\n",
1628                                 libcfs_lnd2str(lnd_type),
1629                                 libcfs_lnd2modname(lnd_type), rc);
1630 #ifndef HAVE_MODULE_LOADING_SUPPORT
1631                                 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1632                                                 "compiled with kernel module "
1633                                                 "loading support.");
1634 #endif
1635                                 rc = -EINVAL;
1636                                 goto failed0;
1637                         }
1638                 }
1639
1640                 lnet_net_lock(LNET_LOCK_EX);
1641                 lnd->lnd_refcount++;
1642                 lnet_net_unlock(LNET_LOCK_EX);
1643
1644                 net->net_lnd = lnd;
1645
1646                 mutex_unlock(&the_lnet.ln_lnd_mutex);
1647
1648                 net_l = net;
1649         }
1650
1651         /*
1652          * net_l: if the network being added is unique then net_l
1653          *        will point to that network
1654          *        if the network being added is not unique then
1655          *        net_l points to the existing network.
1656          *
1657          * When we enter the loop below, we'll pick NIs off he
1658          * network beign added and start them up, then add them to
1659          * a local ni list. Once we've successfully started all
1660          * the NIs then we join the local NI list (of started up
1661          * networks) with the net_l->net_ni_list, which should
1662          * point to the correct network to add the new ni list to
1663          *
1664          * If any of the new NIs fail to start up, then we want to
1665          * iterate through the local ni list, which should include
1666          * any NIs which were successfully started up, and shut
1667          * them down.
1668          *
1669          * After than we want to delete the network being added,
1670          * to avoid a memory leak.
1671          */
1672
1673         /*
1674          * When a network uses TCP bonding then all its interfaces
1675          * must be specified when the network is first defined: the
1676          * TCP bonding code doesn't allow for interfaces to be added
1677          * or removed.
1678          */
1679         if (net_l != net && net_l != NULL && use_tcp_bonding &&
1680             LNET_NETTYP(net_l->net_id) == SOCKLND) {
1681                 rc = -EINVAL;
1682                 goto failed0;
1683         }
1684
1685         while (!list_empty(&net->net_ni_added)) {
1686                 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
1687                                 ni_netlist);
1688                 list_del_init(&ni->ni_netlist);
1689
1690                 /* make sure that the the NI we're about to start
1691                  * up is actually unique. if it's not fail. */
1692                 if (!lnet_ni_unique_net(&net_l->net_ni_list,
1693                                         ni->ni_interfaces[0])) {
1694                         rc = -EINVAL;
1695                         goto failed1;
1696                 }
1697
1698                 /* adjust the pointer the parent network, just in case it
1699                  * the net is a duplicate */
1700                 ni->ni_net = net_l;
1701
1702                 rc = lnet_startup_lndni(ni, tun);
1703
1704                 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
1705                         ni->ni_net->net_lnd->lnd_query != NULL);
1706
1707                 if (rc < 0)
1708                         goto failed1;
1709
1710                 lnet_ni_addref(ni);
1711                 list_add_tail(&ni->ni_netlist, &local_ni_list);
1712
1713                 ni_count++;
1714         }
1715
1716         lnet_net_lock(LNET_LOCK_EX);
1717         list_splice_tail(&local_ni_list, &net_l->net_ni_list);
1718         lnet_incr_dlc_seq();
1719         lnet_net_unlock(LNET_LOCK_EX);
1720
1721         /* if the network is not unique then we don't want to keep
1722          * it around after we're done. Free it. Otherwise add that
1723          * net to the global the_lnet.ln_nets */
1724         if (net_l != net && net_l != NULL) {
1725                 /*
1726                  * TODO - note. currently the tunables can not be updated
1727                  * once added
1728                  */
1729                 lnet_net_free(net);
1730         } else {
1731                 net->net_state = LNET_NET_STATE_ACTIVE;
1732                 /*
1733                  * restore tunables after it has been overwitten by the
1734                  * lnd
1735                  */
1736                 if (peer_timeout != -1)
1737                         net->net_tunables.lct_peer_timeout = peer_timeout;
1738                 if (maxtxcredits != -1)
1739                         net->net_tunables.lct_max_tx_credits = maxtxcredits;
1740                 if (peerrtrcredits != -1)
1741                         net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
1742
1743                 lnet_net_lock(LNET_LOCK_EX);
1744                 list_add_tail(&net->net_list, &the_lnet.ln_nets);
1745                 lnet_net_unlock(LNET_LOCK_EX);
1746         }
1747
1748         return ni_count;
1749
1750 failed1:
1751         /*
1752          * shutdown the new NIs that are being started up
1753          * free the NET being started
1754          */
1755         while (!list_empty(&local_ni_list)) {
1756                 ni = list_entry(local_ni_list.next, struct lnet_ni,
1757                                 ni_netlist);
1758
1759                 lnet_shutdown_lndni(ni);
1760         }
1761
1762 failed0:
1763         lnet_net_free(net);
1764
1765         return rc;
1766 }
1767
1768 static int
1769 lnet_startup_lndnets(struct list_head *netlist)
1770 {
1771         struct lnet_net         *net;
1772         int                     rc;
1773         int                     ni_count = 0;
1774
1775         /*
1776          * Change to running state before bringing up the LNDs. This
1777          * allows lnet_shutdown_lndnets() to assert that we've passed
1778          * through here.
1779          */
1780         lnet_net_lock(LNET_LOCK_EX);
1781         the_lnet.ln_state = LNET_STATE_RUNNING;
1782         lnet_net_unlock(LNET_LOCK_EX);
1783
1784         while (!list_empty(netlist)) {
1785                 net = list_entry(netlist->next, struct lnet_net, net_list);
1786                 list_del_init(&net->net_list);
1787
1788                 rc = lnet_startup_lndnet(net, NULL);
1789
1790                 if (rc < 0)
1791                         goto failed;
1792
1793                 ni_count += rc;
1794         }
1795
1796         return ni_count;
1797 failed:
1798         lnet_shutdown_lndnets();
1799
1800         return rc;
1801 }
1802
1803 /**
1804  * Initialize LNet library.
1805  *
1806  * Automatically called at module loading time. Caller has to call
1807  * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
1808  * latter returned 0. It must be called exactly once.
1809  *
1810  * \retval 0 on success
1811  * \retval -ve on failures.
1812  */
1813 int lnet_lib_init(void)
1814 {
1815         int rc;
1816
1817         lnet_assert_wire_constants();
1818
1819         memset(&the_lnet, 0, sizeof(the_lnet));
1820
1821         /* refer to global cfs_cpt_table for now */
1822         the_lnet.ln_cpt_table   = cfs_cpt_table;
1823         the_lnet.ln_cpt_number  = cfs_cpt_number(cfs_cpt_table);
1824
1825         LASSERT(the_lnet.ln_cpt_number > 0);
1826         if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
1827                 /* we are under risk of consuming all lh_cookie */
1828                 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
1829                        "please change setting of CPT-table and retry\n",
1830                        the_lnet.ln_cpt_number, LNET_CPT_MAX);
1831                 return -E2BIG;
1832         }
1833
1834         while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
1835                 the_lnet.ln_cpt_bits++;
1836
1837         rc = lnet_create_locks();
1838         if (rc != 0) {
1839                 CERROR("Can't create LNet global locks: %d\n", rc);
1840                 return rc;
1841         }
1842
1843         the_lnet.ln_refcount = 0;
1844         LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
1845         INIT_LIST_HEAD(&the_lnet.ln_lnds);
1846         INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
1847         INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
1848         INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
1849
1850         /* The hash table size is the number of bits it takes to express the set
1851          * ln_num_routes, minus 1 (better to under estimate than over so we
1852          * don't waste memory). */
1853         if (rnet_htable_size <= 0)
1854                 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
1855         else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
1856                 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
1857         the_lnet.ln_remote_nets_hbits = max_t(int, 1,
1858                                            order_base_2(rnet_htable_size) - 1);
1859
1860         /* All LNDs apart from the LOLND are in separate modules.  They
1861          * register themselves when their module loads, and unregister
1862          * themselves when their module is unloaded. */
1863         lnet_register_lnd(&the_lolnd);
1864         return 0;
1865 }
1866
1867 /**
1868  * Finalize LNet library.
1869  *
1870  * \pre lnet_lib_init() called with success.
1871  * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
1872  */
1873 void lnet_lib_exit(void)
1874 {
1875         LASSERT(the_lnet.ln_refcount == 0);
1876
1877         while (!list_empty(&the_lnet.ln_lnds))
1878                 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
1879                                                struct lnet_lnd, lnd_list));
1880         lnet_destroy_locks();
1881 }
1882
1883 /**
1884  * Set LNet PID and start LNet interfaces, routing, and forwarding.
1885  *
1886  * Users must call this function at least once before any other functions.
1887  * For each successful call there must be a corresponding call to
1888  * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
1889  * ignored.
1890  *
1891  * The PID used by LNet may be different from the one requested.
1892  * See LNetGetId().
1893  *
1894  * \param requested_pid PID requested by the caller.
1895  *
1896  * \return >= 0 on success, and < 0 error code on failures.
1897  */
1898 int
1899 LNetNIInit(lnet_pid_t requested_pid)
1900 {
1901         int                     im_a_router = 0;
1902         int                     rc;
1903         int                     ni_count;
1904         struct lnet_ping_buffer *pbuf;
1905         struct lnet_handle_md   ping_mdh;
1906         struct list_head        net_head;
1907         struct lnet_net         *net;
1908
1909         INIT_LIST_HEAD(&net_head);
1910
1911         mutex_lock(&the_lnet.ln_api_mutex);
1912
1913         CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
1914
1915         if (the_lnet.ln_refcount > 0) {
1916                 rc = the_lnet.ln_refcount++;
1917                 mutex_unlock(&the_lnet.ln_api_mutex);
1918                 return rc;
1919         }
1920
1921         rc = lnet_prepare(requested_pid);
1922         if (rc != 0) {
1923                 mutex_unlock(&the_lnet.ln_api_mutex);
1924                 return rc;
1925         }
1926
1927         /* create a network for Loopback network */
1928         net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
1929         if (net == NULL) {
1930                 rc = -ENOMEM;
1931                 goto err_empty_list;
1932         }
1933
1934         /* Add in the loopback NI */
1935         if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
1936                 rc = -ENOMEM;
1937                 goto err_empty_list;
1938         }
1939
1940         /* If LNet is being initialized via DLC it is possible
1941          * that the user requests not to load module parameters (ones which
1942          * are supported by DLC) on initialization.  Therefore, make sure not
1943          * to load networks, routes and forwarding from module parameters
1944          * in this case.  On cleanup in case of failure only clean up
1945          * routes if it has been loaded */
1946         if (!the_lnet.ln_nis_from_mod_params) {
1947                 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
1948                                          use_tcp_bonding);
1949                 if (rc < 0)
1950                         goto err_empty_list;
1951         }
1952
1953         ni_count = lnet_startup_lndnets(&net_head);
1954         if (ni_count < 0) {
1955                 rc = ni_count;
1956                 goto err_empty_list;
1957         }
1958
1959         if (!the_lnet.ln_nis_from_mod_params) {
1960                 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
1961                 if (rc != 0)
1962                         goto err_shutdown_lndnis;
1963
1964                 rc = lnet_check_routes();
1965                 if (rc != 0)
1966                         goto err_destroy_routes;
1967
1968                 rc = lnet_rtrpools_alloc(im_a_router);
1969                 if (rc != 0)
1970                         goto err_destroy_routes;
1971         }
1972
1973         rc = lnet_acceptor_start();
1974         if (rc != 0)
1975                 goto err_destroy_routes;
1976
1977         the_lnet.ln_refcount = 1;
1978         /* Now I may use my own API functions... */
1979
1980         rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
1981         if (rc != 0)
1982                 goto err_acceptor_stop;
1983
1984         lnet_ping_target_update(pbuf, ping_mdh);
1985
1986         rc = lnet_router_checker_start();
1987         if (rc != 0)
1988                 goto err_stop_ping;
1989
1990         lnet_fault_init();
1991         lnet_proc_init();
1992
1993         mutex_unlock(&the_lnet.ln_api_mutex);
1994
1995         return 0;
1996
1997 err_stop_ping:
1998         lnet_ping_target_fini();
1999 err_acceptor_stop:
2000         the_lnet.ln_refcount = 0;
2001         lnet_acceptor_stop();
2002 err_destroy_routes:
2003         if (!the_lnet.ln_nis_from_mod_params)
2004                 lnet_destroy_routes();
2005 err_shutdown_lndnis:
2006         lnet_shutdown_lndnets();
2007 err_empty_list:
2008         lnet_unprepare();
2009         LASSERT(rc < 0);
2010         mutex_unlock(&the_lnet.ln_api_mutex);
2011         while (!list_empty(&net_head)) {
2012                 struct lnet_net *net;
2013
2014                 net = list_entry(net_head.next, struct lnet_net, net_list);
2015                 list_del_init(&net->net_list);
2016                 lnet_net_free(net);
2017         }
2018         return rc;
2019 }
2020 EXPORT_SYMBOL(LNetNIInit);
2021
2022 /**
2023  * Stop LNet interfaces, routing, and forwarding.
2024  *
2025  * Users must call this function once for each successful call to LNetNIInit().
2026  * Once the LNetNIFini() operation has been started, the results of pending
2027  * API operations are undefined.
2028  *
2029  * \return always 0 for current implementation.
2030  */
2031 int
2032 LNetNIFini()
2033 {
2034         mutex_lock(&the_lnet.ln_api_mutex);
2035
2036         LASSERT(the_lnet.ln_refcount > 0);
2037
2038         if (the_lnet.ln_refcount != 1) {
2039                 the_lnet.ln_refcount--;
2040         } else {
2041                 LASSERT(!the_lnet.ln_niinit_self);
2042
2043                 lnet_fault_fini();
2044
2045                 lnet_proc_fini();
2046                 lnet_router_checker_stop();
2047                 lnet_ping_target_fini();
2048
2049                 /* Teardown fns that use my own API functions BEFORE here */
2050                 the_lnet.ln_refcount = 0;
2051
2052                 lnet_acceptor_stop();
2053                 lnet_destroy_routes();
2054                 lnet_shutdown_lndnets();
2055                 lnet_unprepare();
2056         }
2057
2058         mutex_unlock(&the_lnet.ln_api_mutex);
2059         return 0;
2060 }
2061 EXPORT_SYMBOL(LNetNIFini);
2062
2063 /**
2064  * Grabs the ni data from the ni structure and fills the out
2065  * parameters
2066  *
2067  * \param[in] ni network        interface structure
2068  * \param[out] cfg_ni           NI config information
2069  * \param[out] tun              network and LND tunables
2070  */
2071 static void
2072 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2073                    struct lnet_ioctl_config_lnd_tunables *tun,
2074                    struct lnet_ioctl_element_stats *stats,
2075                    __u32 tun_size)
2076 {
2077         size_t min_size = 0;
2078         int i;
2079
2080         if (!ni || !cfg_ni || !tun)
2081                 return;
2082
2083         if (ni->ni_interfaces[0] != NULL) {
2084                 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2085                         if (ni->ni_interfaces[i] != NULL) {
2086                                 strncpy(cfg_ni->lic_ni_intf[i],
2087                                         ni->ni_interfaces[i],
2088                                         sizeof(cfg_ni->lic_ni_intf[i]));
2089                         }
2090                 }
2091         }
2092
2093         cfg_ni->lic_nid = ni->ni_nid;
2094         if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2095                 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2096         else
2097                 cfg_ni->lic_status = ni->ni_status->ns_status;
2098         cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2099         cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2100
2101         memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2102
2103         if (stats) {
2104                 stats->iel_send_count = atomic_read(&ni->ni_stats.send_count);
2105                 stats->iel_recv_count = atomic_read(&ni->ni_stats.recv_count);
2106         }
2107
2108         /*
2109          * tun->lt_tun will always be present, but in order to be
2110          * backwards compatible, we need to deal with the cases when
2111          * tun->lt_tun is smaller than what the kernel has, because it
2112          * comes from an older version of a userspace program, then we'll
2113          * need to copy as much information as we have available space.
2114          */
2115         min_size = tun_size - sizeof(tun->lt_cmn);
2116         memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2117
2118         /* copy over the cpts */
2119         if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2120             ni->ni_cpts == NULL)  {
2121                 for (i = 0; i < ni->ni_ncpts; i++)
2122                         cfg_ni->lic_cpts[i] = i;
2123         } else {
2124                 for (i = 0;
2125                      ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2126                      i < LNET_MAX_SHOW_NUM_CPT;
2127                      i++)
2128                         cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2129         }
2130         cfg_ni->lic_ncpts = ni->ni_ncpts;
2131 }
2132
2133 /**
2134  * NOTE: This is a legacy function left in the code to be backwards
2135  * compatible with older userspace programs. It should eventually be
2136  * removed.
2137  *
2138  * Grabs the ni data from the ni structure and fills the out
2139  * parameters
2140  *
2141  * \param[in] ni network        interface structure
2142  * \param[out] config           config information
2143  */
2144 static void
2145 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2146                          struct lnet_ioctl_config_data *config)
2147 {
2148         struct lnet_ioctl_net_config *net_config;
2149         struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2150         size_t min_size, tunable_size = 0;
2151         int i;
2152
2153         if (!ni || !config)
2154                 return;
2155
2156         net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2157         if (!net_config)
2158                 return;
2159
2160         BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2161                      ARRAY_SIZE(net_config->ni_interfaces));
2162
2163         for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2164                 if (!ni->ni_interfaces[i])
2165                         break;
2166
2167                 strncpy(net_config->ni_interfaces[i],
2168                         ni->ni_interfaces[i],
2169                         sizeof(net_config->ni_interfaces[i]));
2170         }
2171
2172         config->cfg_nid = ni->ni_nid;
2173         config->cfg_config_u.cfg_net.net_peer_timeout =
2174                 ni->ni_net->net_tunables.lct_peer_timeout;
2175         config->cfg_config_u.cfg_net.net_max_tx_credits =
2176                 ni->ni_net->net_tunables.lct_max_tx_credits;
2177         config->cfg_config_u.cfg_net.net_peer_tx_credits =
2178                 ni->ni_net->net_tunables.lct_peer_tx_credits;
2179         config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2180                 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2181
2182         if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2183                 net_config->ni_status = LNET_NI_STATUS_UP;
2184         else
2185                 net_config->ni_status = ni->ni_status->ns_status;
2186
2187         if (ni->ni_cpts) {
2188                 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2189
2190                 for (i = 0; i < num_cpts; i++)
2191                         net_config->ni_cpts[i] = ni->ni_cpts[i];
2192
2193                 config->cfg_ncpts = num_cpts;
2194         }
2195
2196         /*
2197          * See if user land tools sent in a newer and larger version
2198          * of struct lnet_tunables than what the kernel uses.
2199          */
2200         min_size = sizeof(*config) + sizeof(*net_config);
2201
2202         if (config->cfg_hdr.ioc_len > min_size)
2203                 tunable_size = config->cfg_hdr.ioc_len - min_size;
2204
2205         /* Don't copy too much data to user space */
2206         min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2207         lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2208
2209         if (lnd_cfg && min_size) {
2210                 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2211                 config->cfg_config_u.cfg_net.net_interface_count = 1;
2212
2213                 /* Tell user land that kernel side has less data */
2214                 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2215                         min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2216                         config->cfg_hdr.ioc_len -= min_size;
2217                 }
2218         }
2219 }
2220
2221 struct lnet_ni *
2222 lnet_get_ni_idx_locked(int idx)
2223 {
2224         struct lnet_ni          *ni;
2225         struct lnet_net         *net;
2226
2227         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2228                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2229                         if (idx-- == 0)
2230                                 return ni;
2231                 }
2232         }
2233
2234         return NULL;
2235 }
2236
2237 struct lnet_ni *
2238 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2239 {
2240         struct lnet_ni          *ni;
2241         struct lnet_net         *net = mynet;
2242
2243         if (prev == NULL) {
2244                 if (net == NULL)
2245                         net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2246                                         net_list);
2247                 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2248                                 ni_netlist);
2249
2250                 return ni;
2251         }
2252
2253         if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2254                 /* if you reached the end of the ni list and the net is
2255                  * specified, then there are no more nis in that net */
2256                 if (net != NULL)
2257                         return NULL;
2258
2259                 /* we reached the end of this net ni list. move to the
2260                  * next net */
2261                 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2262                         /* no more nets and no more NIs. */
2263                         return NULL;
2264
2265                 /* get the next net */
2266                 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2267                                  net_list);
2268                 /* get the ni on it */
2269                 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2270                                 ni_netlist);
2271
2272                 return ni;
2273         }
2274
2275         /* there are more nis left */
2276         ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2277
2278         return ni;
2279 }
2280
2281 int
2282 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2283 {
2284         struct lnet_ni *ni;
2285         int cpt;
2286         int rc = -ENOENT;
2287         int idx = config->cfg_count;
2288
2289         cpt = lnet_net_lock_current();
2290
2291         ni = lnet_get_ni_idx_locked(idx);
2292
2293         if (ni != NULL) {
2294                 rc = 0;
2295                 lnet_ni_lock(ni);
2296                 lnet_fill_ni_info_legacy(ni, config);
2297                 lnet_ni_unlock(ni);
2298         }
2299
2300         lnet_net_unlock(cpt);
2301         return rc;
2302 }
2303
2304 int
2305 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2306                    struct lnet_ioctl_config_lnd_tunables *tun,
2307                    struct lnet_ioctl_element_stats *stats,
2308                    __u32 tun_size)
2309 {
2310         struct lnet_ni          *ni;
2311         int                     cpt;
2312         int                     rc = -ENOENT;
2313
2314         if (!cfg_ni || !tun || !stats)
2315                 return -EINVAL;
2316
2317         cpt = lnet_net_lock_current();
2318
2319         ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2320
2321         if (ni) {
2322                 rc = 0;
2323                 lnet_ni_lock(ni);
2324                 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2325                 lnet_ni_unlock(ni);
2326         }
2327
2328         lnet_net_unlock(cpt);
2329         return rc;
2330 }
2331
2332 static int lnet_add_net_common(struct lnet_net *net,
2333                                struct lnet_ioctl_config_lnd_tunables *tun)
2334 {
2335         __u32                   net_id;
2336         struct lnet_ping_buffer *pbuf;
2337         struct lnet_handle_md   ping_mdh;
2338         int                     rc;
2339         struct lnet_remotenet *rnet;
2340         int                     net_ni_count;
2341         int                     num_acceptor_nets;
2342
2343         lnet_net_lock(LNET_LOCK_EX);
2344         rnet = lnet_find_rnet_locked(net->net_id);
2345         lnet_net_unlock(LNET_LOCK_EX);
2346         /*
2347          * make sure that the net added doesn't invalidate the current
2348          * configuration LNet is keeping
2349          */
2350         if (rnet) {
2351                 CERROR("Adding net %s will invalidate routing configuration\n",
2352                        libcfs_net2str(net->net_id));
2353                 lnet_net_free(net);
2354                 return -EUSERS;
2355         }
2356
2357         /*
2358          * make sure you calculate the correct number of slots in the ping
2359          * buffer. Since the ping info is a flattened list of all the NIs,
2360          * we should allocate enough slots to accomodate the number of NIs
2361          * which will be added.
2362          *
2363          * since ni hasn't been configured yet, use
2364          * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2365          */
2366         net_ni_count = lnet_get_net_ni_count_pre(net);
2367
2368         rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2369                                     net_ni_count + lnet_get_ni_count(),
2370                                     false);
2371         if (rc < 0) {
2372                 lnet_net_free(net);
2373                 return rc;
2374         }
2375
2376         if (tun)
2377                 memcpy(&net->net_tunables,
2378                        &tun->lt_cmn, sizeof(net->net_tunables));
2379         else
2380                 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2381
2382         /*
2383          * before starting this network get a count of the current TCP
2384          * networks which require the acceptor thread running. If that
2385          * count is == 0 before we start up this network, then we'd want to
2386          * start up the acceptor thread after starting up this network
2387          */
2388         num_acceptor_nets = lnet_count_acceptor_nets();
2389
2390         net_id = net->net_id;
2391
2392         rc = lnet_startup_lndnet(net,
2393                                  (tun) ? &tun->lt_tun : NULL);
2394         if (rc < 0)
2395                 goto failed;
2396
2397         lnet_net_lock(LNET_LOCK_EX);
2398         net = lnet_get_net_locked(net_id);
2399         lnet_net_unlock(LNET_LOCK_EX);
2400
2401         LASSERT(net);
2402
2403         /*
2404          * Start the acceptor thread if this is the first network
2405          * being added that requires the thread.
2406          */
2407         if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
2408                 rc = lnet_acceptor_start();
2409                 if (rc < 0) {
2410                         /* shutdown the net that we just started */
2411                         CERROR("Failed to start up acceptor thread\n");
2412                         lnet_shutdown_lndnet(net);
2413                         goto failed;
2414                 }
2415         }
2416
2417         lnet_net_lock(LNET_LOCK_EX);
2418         lnet_peer_net_added(net);
2419         lnet_net_unlock(LNET_LOCK_EX);
2420
2421         lnet_ping_target_update(pbuf, ping_mdh);
2422
2423         return 0;
2424
2425 failed:
2426         lnet_ping_md_unlink(pbuf, &ping_mdh);
2427         lnet_ping_buffer_decref(pbuf);
2428         return rc;
2429 }
2430
2431 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2432                                       struct lnet_ioctl_config_lnd_tunables *tun)
2433 {
2434         struct lnet_net *net;
2435         char *nets;
2436         int rc;
2437         struct list_head net_head;
2438
2439         INIT_LIST_HEAD(&net_head);
2440
2441         rc = lnet_parse_ip2nets(&nets, ip2nets);
2442         if (rc < 0)
2443                 return rc;
2444
2445         rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2446         if (rc < 0)
2447                 return rc;
2448
2449         mutex_lock(&the_lnet.ln_api_mutex);
2450         while (!list_empty(&net_head)) {
2451                 net = list_entry(net_head.next, struct lnet_net, net_list);
2452                 list_del_init(&net->net_list);
2453                 rc = lnet_add_net_common(net, tun);
2454                 if (rc < 0)
2455                         goto out;
2456         }
2457
2458 out:
2459         mutex_unlock(&the_lnet.ln_api_mutex);
2460
2461         while (!list_empty(&net_head)) {
2462                 net = list_entry(net_head.next, struct lnet_net, net_list);
2463                 list_del_init(&net->net_list);
2464                 lnet_net_free(net);
2465         }
2466         return rc;
2467 }
2468
2469 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
2470 {
2471         struct lnet_net *net;
2472         struct lnet_ni *ni;
2473         struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2474         int rc, i;
2475         __u32 net_id;
2476
2477         /* get the tunables if they are available */
2478         if (conf->lic_cfg_hdr.ioc_len >=
2479             sizeof(*conf) + sizeof(*tun))
2480                 tun = (struct lnet_ioctl_config_lnd_tunables *)
2481                         conf->lic_bulk;
2482
2483         /* handle legacy ip2nets from DLC */
2484         if (conf->lic_legacy_ip2nets[0] != '\0')
2485                 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
2486                                                   tun);
2487
2488         net_id = LNET_NIDNET(conf->lic_nid);
2489
2490         net = lnet_net_alloc(net_id, NULL);
2491         if (!net)
2492                 return -ENOMEM;
2493
2494         for (i = 0; i < conf->lic_ncpts; i++) {
2495                 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
2496                         return -EINVAL;
2497         }
2498
2499         ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
2500                                        conf->lic_ni_intf[0]);
2501         if (!ni)
2502                 return -ENOMEM;
2503
2504         mutex_lock(&the_lnet.ln_api_mutex);
2505
2506         rc = lnet_add_net_common(net, tun);
2507
2508         mutex_unlock(&the_lnet.ln_api_mutex);
2509
2510         return rc;
2511 }
2512
2513 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
2514 {
2515         struct lnet_net  *net;
2516         struct lnet_ni *ni;
2517         __u32 net_id = LNET_NIDNET(conf->lic_nid);
2518         struct lnet_ping_buffer *pbuf;
2519         struct lnet_handle_md  ping_mdh;
2520         int               rc;
2521         int               net_count;
2522         __u32             addr;
2523
2524         /* don't allow userspace to shutdown the LOLND */
2525         if (LNET_NETTYP(net_id) == LOLND)
2526                 return -EINVAL;
2527
2528         mutex_lock(&the_lnet.ln_api_mutex);
2529
2530         lnet_net_lock(0);
2531
2532         net = lnet_get_net_locked(net_id);
2533         if (!net) {
2534                 CERROR("net %s not found\n",
2535                        libcfs_net2str(net_id));
2536                 rc = -ENOENT;
2537                 goto unlock_net;
2538         }
2539
2540         addr = LNET_NIDADDR(conf->lic_nid);
2541         if (addr == 0) {
2542                 /* remove the entire net */
2543                 net_count = lnet_get_net_ni_count_locked(net);
2544
2545                 lnet_net_unlock(0);
2546
2547                 /* create and link a new ping info, before removing the old one */
2548                 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2549                                         lnet_get_ni_count() - net_count,
2550                                         false);
2551                 if (rc != 0)
2552                         goto unlock_api_mutex;
2553
2554                 lnet_shutdown_lndnet(net);
2555
2556                 if (lnet_count_acceptor_nets() == 0)
2557                         lnet_acceptor_stop();
2558
2559                 lnet_ping_target_update(pbuf, ping_mdh);
2560
2561                 goto unlock_api_mutex;
2562         }
2563
2564         ni = lnet_nid2ni_locked(conf->lic_nid, 0);
2565         if (!ni) {
2566                 CERROR("nid %s not found\n",
2567                        libcfs_nid2str(conf->lic_nid));
2568                 rc = -ENOENT;
2569                 goto unlock_net;
2570         }
2571
2572         net_count = lnet_get_net_ni_count_locked(net);
2573
2574         lnet_net_unlock(0);
2575
2576         /* create and link a new ping info, before removing the old one */
2577         rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2578                                   lnet_get_ni_count() - 1, false);
2579         if (rc != 0)
2580                 goto unlock_api_mutex;
2581
2582         lnet_shutdown_lndni(ni);
2583
2584         if (lnet_count_acceptor_nets() == 0)
2585                 lnet_acceptor_stop();
2586
2587         lnet_ping_target_update(pbuf, ping_mdh);
2588
2589         /* check if the net is empty and remove it if it is */
2590         if (net_count == 1)
2591                 lnet_shutdown_lndnet(net);
2592
2593         goto unlock_api_mutex;
2594
2595 unlock_net:
2596         lnet_net_unlock(0);
2597 unlock_api_mutex:
2598         mutex_unlock(&the_lnet.ln_api_mutex);
2599
2600         return rc;
2601 }
2602
2603 /*
2604  * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
2605  * They are only expected to be called for unique networks.
2606  * That can be as a result of older DLC library
2607  * calls. Multi-Rail DLC and beyond no longer uses these APIs.
2608  */
2609 int
2610 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
2611 {
2612         struct lnet_net         *net;
2613         struct list_head        net_head;
2614         int                     rc;
2615         struct lnet_ioctl_config_lnd_tunables tun;
2616         char *nets = conf->cfg_config_u.cfg_net.net_intf;
2617
2618         INIT_LIST_HEAD(&net_head);
2619
2620         /* Create a net/ni structures for the network string */
2621         rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2622         if (rc <= 0)
2623                 return rc == 0 ? -EINVAL : rc;
2624
2625         mutex_lock(&the_lnet.ln_api_mutex);
2626
2627         if (rc > 1) {
2628                 rc = -EINVAL; /* only add one network per call */
2629                 goto out_unlock_clean;
2630         }
2631
2632         net = list_entry(net_head.next, struct lnet_net, net_list);
2633         list_del_init(&net->net_list);
2634
2635         LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
2636
2637         memset(&tun, 0, sizeof(tun));
2638
2639         tun.lt_cmn.lct_peer_timeout =
2640           conf->cfg_config_u.cfg_net.net_peer_timeout;
2641         tun.lt_cmn.lct_peer_tx_credits =
2642           conf->cfg_config_u.cfg_net.net_peer_tx_credits;
2643         tun.lt_cmn.lct_peer_rtr_credits =
2644           conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
2645         tun.lt_cmn.lct_max_tx_credits =
2646           conf->cfg_config_u.cfg_net.net_max_tx_credits;
2647
2648         rc = lnet_add_net_common(net, &tun);
2649
2650 out_unlock_clean:
2651         mutex_unlock(&the_lnet.ln_api_mutex);
2652         while (!list_empty(&net_head)) {
2653                 /* net_head list is empty in success case */
2654                 net = list_entry(net_head.next, struct lnet_net, net_list);
2655                 list_del_init(&net->net_list);
2656                 lnet_net_free(net);
2657         }
2658         return rc;
2659 }
2660
2661 int
2662 lnet_dyn_del_net(__u32 net_id)
2663 {
2664         struct lnet_net  *net;
2665         struct lnet_ping_buffer *pbuf;
2666         struct lnet_handle_md ping_mdh;
2667         int               rc;
2668         int               net_ni_count;
2669
2670         /* don't allow userspace to shutdown the LOLND */
2671         if (LNET_NETTYP(net_id) == LOLND)
2672                 return -EINVAL;
2673
2674         mutex_lock(&the_lnet.ln_api_mutex);
2675
2676         lnet_net_lock(0);
2677
2678         net = lnet_get_net_locked(net_id);
2679         if (net == NULL) {
2680                 lnet_net_unlock(0);
2681                 rc = -EINVAL;
2682                 goto out;
2683         }
2684
2685         net_ni_count = lnet_get_net_ni_count_locked(net);
2686
2687         lnet_net_unlock(0);
2688
2689         /* create and link a new ping info, before removing the old one */
2690         rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2691                                     lnet_get_ni_count() - net_ni_count, false);
2692         if (rc != 0)
2693                 goto out;
2694
2695         lnet_shutdown_lndnet(net);
2696
2697         if (lnet_count_acceptor_nets() == 0)
2698                 lnet_acceptor_stop();
2699
2700         lnet_ping_target_update(pbuf, ping_mdh);
2701
2702 out:
2703         mutex_unlock(&the_lnet.ln_api_mutex);
2704
2705         return rc;
2706 }
2707
2708 void lnet_incr_dlc_seq(void)
2709 {
2710         atomic_inc(&lnet_dlc_seq_no);
2711 }
2712
2713 __u32 lnet_get_dlc_seq_locked(void)
2714 {
2715         return atomic_read(&lnet_dlc_seq_no);
2716 }
2717
2718 /**
2719  * LNet ioctl handler.
2720  *
2721  */
2722 int
2723 LNetCtl(unsigned int cmd, void *arg)
2724 {
2725         struct libcfs_ioctl_data *data = arg;
2726         struct lnet_ioctl_config_data *config;
2727         struct lnet_process_id    id = {0};
2728         struct lnet_ni           *ni;
2729         int                       rc;
2730
2731         BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
2732                      sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
2733
2734         switch (cmd) {
2735         case IOC_LIBCFS_GET_NI:
2736                 rc = LNetGetId(data->ioc_count, &id);
2737                 data->ioc_nid = id.nid;
2738                 return rc;
2739
2740         case IOC_LIBCFS_FAIL_NID:
2741                 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
2742
2743         case IOC_LIBCFS_ADD_ROUTE:
2744                 config = arg;
2745
2746                 if (config->cfg_hdr.ioc_len < sizeof(*config))
2747                         return -EINVAL;
2748
2749                 mutex_lock(&the_lnet.ln_api_mutex);
2750                 rc = lnet_add_route(config->cfg_net,
2751                                     config->cfg_config_u.cfg_route.rtr_hop,
2752                                     config->cfg_nid,
2753                                     config->cfg_config_u.cfg_route.
2754                                         rtr_priority);
2755                 if (rc == 0) {
2756                         rc = lnet_check_routes();
2757                         if (rc != 0)
2758                                 lnet_del_route(config->cfg_net,
2759                                                config->cfg_nid);
2760                 }
2761                 mutex_unlock(&the_lnet.ln_api_mutex);
2762                 return rc;
2763
2764         case IOC_LIBCFS_DEL_ROUTE:
2765                 config = arg;
2766
2767                 if (config->cfg_hdr.ioc_len < sizeof(*config))
2768                         return -EINVAL;
2769
2770                 mutex_lock(&the_lnet.ln_api_mutex);
2771                 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
2772                 mutex_unlock(&the_lnet.ln_api_mutex);
2773                 return rc;
2774
2775         case IOC_LIBCFS_GET_ROUTE:
2776                 config = arg;
2777
2778                 if (config->cfg_hdr.ioc_len < sizeof(*config))
2779                         return -EINVAL;
2780
2781                 mutex_lock(&the_lnet.ln_api_mutex);
2782                 rc = lnet_get_route(config->cfg_count,
2783                                     &config->cfg_net,
2784                                     &config->cfg_config_u.cfg_route.rtr_hop,
2785                                     &config->cfg_nid,
2786                                     &config->cfg_config_u.cfg_route.rtr_flags,
2787                                     &config->cfg_config_u.cfg_route.
2788                                         rtr_priority);
2789                 mutex_unlock(&the_lnet.ln_api_mutex);
2790                 return rc;
2791
2792         case IOC_LIBCFS_GET_LOCAL_NI: {
2793                 struct lnet_ioctl_config_ni *cfg_ni;
2794                 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2795                 struct lnet_ioctl_element_stats *stats;
2796                 __u32 tun_size;
2797
2798                 cfg_ni = arg;
2799                 /* get the tunables if they are available */
2800                 if (cfg_ni->lic_cfg_hdr.ioc_len <
2801                     sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
2802                         return -EINVAL;
2803
2804                 stats = (struct lnet_ioctl_element_stats *)
2805                         cfg_ni->lic_bulk;
2806                 tun = (struct lnet_ioctl_config_lnd_tunables *)
2807                                 (cfg_ni->lic_bulk + sizeof(*stats));
2808
2809                 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
2810                         sizeof(*stats);
2811
2812                 mutex_lock(&the_lnet.ln_api_mutex);
2813                 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
2814                 mutex_unlock(&the_lnet.ln_api_mutex);
2815                 return rc;
2816         }
2817
2818         case IOC_LIBCFS_GET_NET: {
2819                 size_t total = sizeof(*config) +
2820                                sizeof(struct lnet_ioctl_net_config);
2821                 config = arg;
2822
2823                 if (config->cfg_hdr.ioc_len < total)
2824                         return -EINVAL;
2825
2826                 mutex_lock(&the_lnet.ln_api_mutex);
2827                 rc = lnet_get_net_config(config);
2828                 mutex_unlock(&the_lnet.ln_api_mutex);
2829                 return rc;
2830         }
2831
2832         case IOC_LIBCFS_GET_LNET_STATS:
2833         {
2834                 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
2835
2836                 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
2837                         return -EINVAL;
2838
2839                 mutex_lock(&the_lnet.ln_api_mutex);
2840                 lnet_counters_get(&lnet_stats->st_cntrs);
2841                 mutex_unlock(&the_lnet.ln_api_mutex);
2842                 return 0;
2843         }
2844
2845         case IOC_LIBCFS_CONFIG_RTR:
2846                 config = arg;
2847
2848                 if (config->cfg_hdr.ioc_len < sizeof(*config))
2849                         return -EINVAL;
2850
2851                 mutex_lock(&the_lnet.ln_api_mutex);
2852                 if (config->cfg_config_u.cfg_buffers.buf_enable) {
2853                         rc = lnet_rtrpools_enable();
2854                         mutex_unlock(&the_lnet.ln_api_mutex);
2855                         return rc;
2856                 }
2857                 lnet_rtrpools_disable();
2858                 mutex_unlock(&the_lnet.ln_api_mutex);
2859                 return 0;
2860
2861         case IOC_LIBCFS_ADD_BUF:
2862                 config = arg;
2863
2864                 if (config->cfg_hdr.ioc_len < sizeof(*config))
2865                         return -EINVAL;
2866
2867                 mutex_lock(&the_lnet.ln_api_mutex);
2868                 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
2869                                                 buf_tiny,
2870                                           config->cfg_config_u.cfg_buffers.
2871                                                 buf_small,
2872                                           config->cfg_config_u.cfg_buffers.
2873                                                 buf_large);
2874                 mutex_unlock(&the_lnet.ln_api_mutex);
2875                 return rc;
2876
2877         case IOC_LIBCFS_SET_NUMA_RANGE: {
2878                 struct lnet_ioctl_set_value *numa;
2879                 numa = arg;
2880                 if (numa->sv_hdr.ioc_len != sizeof(*numa))
2881                         return -EINVAL;
2882                 lnet_net_lock(LNET_LOCK_EX);
2883                 lnet_numa_range = numa->sv_value;
2884                 lnet_net_unlock(LNET_LOCK_EX);
2885                 return 0;
2886         }
2887
2888         case IOC_LIBCFS_GET_NUMA_RANGE: {
2889                 struct lnet_ioctl_set_value *numa;
2890                 numa = arg;
2891                 if (numa->sv_hdr.ioc_len != sizeof(*numa))
2892                         return -EINVAL;
2893                 numa->sv_value = lnet_numa_range;
2894                 return 0;
2895         }
2896
2897         case IOC_LIBCFS_GET_BUF: {
2898                 struct lnet_ioctl_pool_cfg *pool_cfg;
2899                 size_t total = sizeof(*config) + sizeof(*pool_cfg);
2900
2901                 config = arg;
2902
2903                 if (config->cfg_hdr.ioc_len < total)
2904                         return -EINVAL;
2905
2906                 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
2907
2908                 mutex_lock(&the_lnet.ln_api_mutex);
2909                 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
2910                 mutex_unlock(&the_lnet.ln_api_mutex);
2911                 return rc;
2912         }
2913
2914         case IOC_LIBCFS_ADD_PEER_NI: {
2915                 struct lnet_ioctl_peer_cfg *cfg = arg;
2916
2917                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
2918                         return -EINVAL;
2919
2920                 mutex_lock(&the_lnet.ln_api_mutex);
2921                 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
2922                                       cfg->prcfg_cfg_nid,
2923                                       cfg->prcfg_mr);
2924                 mutex_unlock(&the_lnet.ln_api_mutex);
2925                 return rc;
2926         }
2927
2928         case IOC_LIBCFS_DEL_PEER_NI: {
2929                 struct lnet_ioctl_peer_cfg *cfg = arg;
2930
2931                 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
2932                         return -EINVAL;
2933
2934                 mutex_lock(&the_lnet.ln_api_mutex);
2935                 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
2936                                       cfg->prcfg_cfg_nid);
2937                 mutex_unlock(&the_lnet.ln_api_mutex);
2938                 return rc;
2939         }
2940
2941         case IOC_LIBCFS_GET_PEER_INFO: {
2942                 struct lnet_ioctl_peer *peer_info = arg;
2943
2944                 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
2945                         return -EINVAL;
2946
2947                 mutex_lock(&the_lnet.ln_api_mutex);
2948                 rc = lnet_get_peer_ni_info(
2949                    peer_info->pr_count,
2950                    &peer_info->pr_nid,
2951                    peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
2952                    &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
2953                    &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
2954                    &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
2955                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
2956                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
2957                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
2958                    &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
2959                 mutex_unlock(&the_lnet.ln_api_mutex);
2960                 return rc;
2961         }
2962
2963         case IOC_LIBCFS_GET_PEER_NI: {
2964                 struct lnet_ioctl_peer_cfg *cfg = arg;
2965                 struct lnet_peer_ni_credit_info __user *lpni_cri;
2966                 struct lnet_ioctl_element_stats __user *lpni_stats;
2967                 size_t usr_size = sizeof(*lpni_cri) + sizeof(*lpni_stats);
2968
2969                 if ((cfg->prcfg_hdr.ioc_len != sizeof(*cfg)) ||
2970                     (cfg->prcfg_size != usr_size))
2971                         return -EINVAL;
2972
2973                 lpni_cri = cfg->prcfg_bulk;
2974                 lpni_stats = cfg->prcfg_bulk + sizeof(*lpni_cri);
2975
2976                 mutex_lock(&the_lnet.ln_api_mutex);
2977                 rc = lnet_get_peer_info(cfg->prcfg_count, &cfg->prcfg_prim_nid,
2978                                         &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
2979                                         lpni_cri, lpni_stats);
2980                 mutex_unlock(&the_lnet.ln_api_mutex);
2981                 return rc;
2982         }
2983
2984         case IOC_LIBCFS_NOTIFY_ROUTER: {
2985                 unsigned long jiffies_passed;
2986
2987                 jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
2988                 jiffies_passed = cfs_time_seconds(jiffies_passed);
2989
2990                 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
2991                                    jiffies - jiffies_passed);
2992         }
2993
2994         case IOC_LIBCFS_LNET_DIST:
2995                 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
2996                 if (rc < 0 && rc != -EHOSTUNREACH)
2997                         return rc;
2998
2999                 data->ioc_u32[0] = rc;
3000                 return 0;
3001
3002         case IOC_LIBCFS_TESTPROTOCOMPAT:
3003                 lnet_net_lock(LNET_LOCK_EX);
3004                 the_lnet.ln_testprotocompat = data->ioc_flags;
3005                 lnet_net_unlock(LNET_LOCK_EX);
3006                 return 0;
3007
3008         case IOC_LIBCFS_LNET_FAULT:
3009                 return lnet_fault_ctl(data->ioc_flags, data);
3010
3011         case IOC_LIBCFS_PING: {
3012                 signed long timeout;
3013
3014                 id.nid = data->ioc_nid;
3015                 id.pid = data->ioc_u32[0];
3016
3017                 /* Don't block longer than 2 minutes */
3018                 if (data->ioc_u32[1] > 120 * MSEC_PER_SEC)
3019                         return -EINVAL;
3020
3021                 /* If timestamp is negative then disable timeout */
3022                 if ((s32)data->ioc_u32[1] < 0)
3023                         timeout = MAX_SCHEDULE_TIMEOUT;
3024                 else
3025                         timeout = msecs_to_jiffies(data->ioc_u32[1]);
3026
3027                 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3028                                data->ioc_plen1 / sizeof(struct lnet_process_id));
3029                 if (rc < 0)
3030                         return rc;
3031                 data->ioc_count = rc;
3032                 return 0;
3033         }
3034
3035         default:
3036                 ni = lnet_net2ni_addref(data->ioc_net);
3037                 if (ni == NULL)
3038                         return -EINVAL;
3039
3040                 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3041                         rc = -EINVAL;
3042                 else
3043                         rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3044
3045                 lnet_ni_decref(ni);
3046                 return rc;
3047         }
3048         /* not reached */
3049 }
3050 EXPORT_SYMBOL(LNetCtl);
3051
3052 void LNetDebugPeer(struct lnet_process_id id)
3053 {
3054         lnet_debug_peer(id.nid);
3055 }
3056 EXPORT_SYMBOL(LNetDebugPeer);
3057
3058 /**
3059  * Determine if the specified peer \a nid is on the local node.
3060  *
3061  * \param nid   peer nid to check
3062  *
3063  * \retval true         If peer NID is on the local node.
3064  * \retval false        If peer NID is not on the local node.
3065  */
3066 bool LNetIsPeerLocal(lnet_nid_t nid)
3067 {
3068         struct lnet_net *net;
3069         struct lnet_ni *ni;
3070         int cpt;
3071
3072         cpt = lnet_net_lock_current();
3073         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3074                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3075                         if (ni->ni_nid == nid) {
3076                                 lnet_net_unlock(cpt);
3077                                 return true;
3078                         }
3079                 }
3080         }
3081         lnet_net_unlock(cpt);
3082
3083         return false;
3084 }
3085 EXPORT_SYMBOL(LNetIsPeerLocal);
3086
3087 /**
3088  * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
3089  * Note that all interfaces share a same PID, as requested by LNetNIInit().
3090  *
3091  * \param index Index of the interface to look up.
3092  * \param id On successful return, this location will hold the
3093  * struct lnet_process_id ID of the interface.
3094  *
3095  * \retval 0 If an interface exists at \a index.
3096  * \retval -ENOENT If no interface has been found.
3097  */
3098 int
3099 LNetGetId(unsigned int index, struct lnet_process_id *id)
3100 {
3101         struct lnet_ni   *ni;
3102         struct lnet_net  *net;
3103         int               cpt;
3104         int               rc = -ENOENT;
3105
3106         LASSERT(the_lnet.ln_refcount > 0);
3107
3108         cpt = lnet_net_lock_current();
3109
3110         list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3111                 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3112                         if (index-- != 0)
3113                                 continue;
3114
3115                         id->nid = ni->ni_nid;
3116                         id->pid = the_lnet.ln_pid;
3117                         rc = 0;
3118                         break;
3119                 }
3120         }
3121
3122         lnet_net_unlock(cpt);
3123         return rc;
3124 }
3125 EXPORT_SYMBOL(LNetGetId);
3126
3127 static int lnet_ping(struct lnet_process_id id, signed long timeout,
3128                      struct lnet_process_id __user *ids, int n_ids)
3129 {
3130         struct lnet_handle_eq eqh;
3131         struct lnet_handle_md mdh;
3132         struct lnet_event event;
3133         struct lnet_md md = { NULL };
3134         int which;
3135         int unlinked = 0;
3136         int replied = 0;
3137         const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3138         struct lnet_ping_buffer *pbuf;
3139         struct lnet_process_id tmpid;
3140         int i;
3141         int nob;
3142         int rc;
3143         int rc2;
3144         sigset_t blocked;
3145
3146         /* n_ids limit is arbitrary */
3147         if (n_ids <= 0 || n_ids > lnet_interfaces_max || id.nid == LNET_NID_ANY)
3148                 return -EINVAL;
3149
3150         if (id.pid == LNET_PID_ANY)
3151                 id.pid = LNET_PID_LUSTRE;
3152
3153         pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
3154         if (!pbuf)
3155                 return -ENOMEM;
3156
3157         /* NB 2 events max (including any unlink event) */
3158         rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3159         if (rc != 0) {
3160                 CERROR("Can't allocate EQ: %d\n", rc);
3161                 goto fail_ping_buffer_decref;
3162         }
3163
3164         /* initialize md content */
3165         md.start     = &pbuf->pb_info;
3166         md.length    = LNET_PING_INFO_SIZE(n_ids);
3167         md.threshold = 2; /*GET/REPLY*/
3168         md.max_size  = 0;
3169         md.options   = LNET_MD_TRUNCATE;
3170         md.user_ptr  = NULL;
3171         md.eq_handle = eqh;
3172
3173         rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3174         if (rc != 0) {
3175                 CERROR("Can't bind MD: %d\n", rc);
3176                 goto fail_free_eq;
3177         }
3178
3179         rc = LNetGet(LNET_NID_ANY, mdh, id,
3180                      LNET_RESERVED_PORTAL,
3181                      LNET_PROTO_PING_MATCHBITS, 0);
3182
3183         if (rc != 0) {
3184                 /* Don't CERROR; this could be deliberate! */
3185
3186                 rc2 = LNetMDUnlink(mdh);
3187                 LASSERT(rc2 == 0);
3188
3189                 /* NB must wait for the UNLINK event below... */
3190                 unlinked = 1;
3191                 timeout = a_long_time;
3192         }
3193
3194         do {
3195                 /* MUST block for unlink to complete */
3196                 if (unlinked)
3197                         blocked = cfs_block_allsigs();
3198
3199                 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3200
3201                 if (unlinked)
3202                         cfs_restore_sigs(blocked);
3203
3204                 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3205                        (rc2 <= 0) ? -1 : event.type,
3206                        (rc2 <= 0) ? -1 : event.status,
3207                        (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3208
3209                 LASSERT(rc2 != -EOVERFLOW);     /* can't miss anything */
3210
3211                 if (rc2 <= 0 || event.status != 0) {
3212                         /* timeout or error */
3213                         if (!replied && rc == 0)
3214                                 rc = (rc2 < 0) ? rc2 :
3215                                      (rc2 == 0) ? -ETIMEDOUT :
3216                                      event.status;
3217
3218                         if (!unlinked) {
3219                                 /* Ensure completion in finite time... */
3220                                 LNetMDUnlink(mdh);
3221                                 /* No assertion (racing with network) */
3222                                 unlinked = 1;
3223                                 timeout = a_long_time;
3224                         } else if (rc2 == 0) {
3225                                 /* timed out waiting for unlink */
3226                                 CWARN("ping %s: late network completion\n",
3227                                       libcfs_id2str(id));
3228                         }
3229                 } else if (event.type == LNET_EVENT_REPLY) {
3230                         replied = 1;
3231                         rc = event.mlength;
3232                 }
3233
3234         } while (rc2 <= 0 || !event.unlinked);
3235
3236         if (!replied) {
3237                 if (rc >= 0)
3238                         CWARN("%s: Unexpected rc >= 0 but no reply!\n",
3239                               libcfs_id2str(id));
3240                 rc = -EIO;
3241                 goto fail_free_eq;
3242         }
3243
3244         nob = rc;
3245         LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
3246
3247         rc = -EPROTO;                           /* if I can't parse... */
3248
3249         if (nob < 8) {
3250                 /* can't check magic/version */
3251                 CERROR("%s: ping info too short %d\n",
3252                        libcfs_id2str(id), nob);
3253                 goto fail_free_eq;
3254         }
3255
3256         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
3257                 lnet_swap_pinginfo(pbuf);
3258         } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
3259                 CERROR("%s: Unexpected magic %08x\n",
3260                        libcfs_id2str(id), pbuf->pb_info.pi_magic);
3261                 goto fail_free_eq;
3262         }
3263
3264         if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
3265                 CERROR("%s: ping w/o NI status: 0x%x\n",
3266                        libcfs_id2str(id), pbuf->pb_info.pi_features);
3267                 goto fail_free_eq;
3268         }
3269
3270         if (nob < LNET_PING_INFO_SIZE(0)) {
3271                 CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
3272                        nob, (int)LNET_PING_INFO_SIZE(0));
3273                 goto fail_free_eq;
3274         }
3275
3276         if (pbuf->pb_info.pi_nnis < n_ids)
3277                 n_ids = pbuf->pb_info.pi_nnis;
3278
3279         if (nob < LNET_PING_INFO_SIZE(n_ids)) {
3280                 CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
3281                        nob, (int)LNET_PING_INFO_SIZE(n_ids));
3282                 goto fail_free_eq;
3283         }
3284
3285         rc = -EFAULT;                           /* If I SEGV... */
3286
3287         memset(&tmpid, 0, sizeof(tmpid));
3288         for (i = 0; i < n_ids; i++) {
3289                 tmpid.pid = pbuf->pb_info.pi_pid;
3290                 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
3291                 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
3292                         goto fail_free_eq;
3293         }
3294         rc = pbuf->pb_info.pi_nnis;
3295
3296  fail_free_eq:
3297         rc2 = LNetEQFree(eqh);
3298         if (rc2 != 0)
3299                 CERROR("rc2 %d\n", rc2);
3300         LASSERT(rc2 == 0);
3301
3302  fail_ping_buffer_decref:
3303         lnet_ping_buffer_decref(pbuf);
3304         return rc;
3305 }