Whamcloud - gitweb
LU-11893 ksocklnd: add secondary IP address handling
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/socklnd/socklnd.c
33  *
34  * Author: Zach Brown <zab@zabbo.net>
35  * Author: Peter J. Braam <braam@clusterfs.com>
36  * Author: Phil Schwan <phil@clusterfs.com>
37  * Author: Eric Barton <eric@bartonsoftware.com>
38  */
39
40 #include "socklnd.h"
41 #include <linux/inetdevice.h>
42
43 static struct lnet_lnd the_ksocklnd;
44 struct ksock_nal_data ksocknal_data;
45
46 static struct ksock_interface *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
48 {
49         struct ksock_net *net = ni->ni_data;
50         int i;
51         struct ksock_interface *iface;
52
53         for (i = 0; i < net->ksnn_ninterfaces; i++) {
54                 LASSERT(i < LNET_INTERFACES_NUM);
55                 iface = &net->ksnn_interfaces[i];
56
57                 if (iface->ksni_ipaddr == ip)
58                         return iface;
59         }
60
61         return NULL;
62 }
63
64 static struct ksock_route *
65 ksocknal_create_route(__u32 ipaddr, int port)
66 {
67         struct ksock_route *route;
68
69         LIBCFS_ALLOC (route, sizeof (*route));
70         if (route == NULL)
71                 return (NULL);
72
73         atomic_set (&route->ksnr_refcount, 1);
74         route->ksnr_peer = NULL;
75         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
76         route->ksnr_ipaddr = ipaddr;
77         route->ksnr_port = port;
78         route->ksnr_scheduled = 0;
79         route->ksnr_connecting = 0;
80         route->ksnr_connected = 0;
81         route->ksnr_deleted = 0;
82         route->ksnr_conn_count = 0;
83         route->ksnr_share_count = 0;
84
85         return (route);
86 }
87
88 void
89 ksocknal_destroy_route(struct ksock_route *route)
90 {
91         LASSERT (atomic_read(&route->ksnr_refcount) == 0);
92
93         if (route->ksnr_peer != NULL)
94                 ksocknal_peer_decref(route->ksnr_peer);
95
96         LIBCFS_FREE (route, sizeof (*route));
97 }
98
99 static int
100 ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
101                      struct lnet_process_id id)
102 {
103         int cpt = lnet_cpt_of_nid(id.nid, ni);
104         struct ksock_net *net = ni->ni_data;
105         struct ksock_peer_ni *peer_ni;
106
107         LASSERT(id.nid != LNET_NID_ANY);
108         LASSERT(id.pid != LNET_PID_ANY);
109         LASSERT(!in_interrupt());
110
111         LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
112         if (peer_ni == NULL)
113                 return -ENOMEM;
114
115         peer_ni->ksnp_ni = ni;
116         peer_ni->ksnp_id = id;
117         atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
118         peer_ni->ksnp_closing = 0;
119         peer_ni->ksnp_accepting = 0;
120         peer_ni->ksnp_proto = NULL;
121         peer_ni->ksnp_last_alive = 0;
122         peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
123
124         INIT_LIST_HEAD(&peer_ni->ksnp_conns);
125         INIT_LIST_HEAD(&peer_ni->ksnp_routes);
126         INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
127         INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
128         spin_lock_init(&peer_ni->ksnp_lock);
129
130         spin_lock_bh(&net->ksnn_lock);
131
132         if (net->ksnn_shutdown) {
133                 spin_unlock_bh(&net->ksnn_lock);
134
135                 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
136                 CERROR("Can't create peer_ni: network shutdown\n");
137                 return -ESHUTDOWN;
138         }
139
140         net->ksnn_npeers++;
141
142         spin_unlock_bh(&net->ksnn_lock);
143
144         *peerp = peer_ni;
145         return 0;
146 }
147
148 void
149 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
150 {
151         struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
152
153         CDEBUG (D_NET, "peer_ni %s %p deleted\n",
154                 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
155
156         LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
157         LASSERT(peer_ni->ksnp_accepting == 0);
158         LASSERT(list_empty(&peer_ni->ksnp_conns));
159         LASSERT(list_empty(&peer_ni->ksnp_routes));
160         LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
161         LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
162
163         LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
164
165         /* NB a peer_ni's connections and routes keep a reference on their peer_ni
166          * until they are destroyed, so we can be assured that _all_ state to
167          * do with this peer_ni has been cleaned up when its refcount drops to
168          * zero. */
169         spin_lock_bh(&net->ksnn_lock);
170         net->ksnn_npeers--;
171         spin_unlock_bh(&net->ksnn_lock);
172 }
173
174 struct ksock_peer_ni *
175 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
176 {
177         struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178         struct list_head *tmp;
179         struct ksock_peer_ni *peer_ni;
180
181         list_for_each(tmp, peer_list) {
182                 peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
183
184                 LASSERT(!peer_ni->ksnp_closing);
185
186                 if (peer_ni->ksnp_ni != ni)
187                         continue;
188
189                 if (peer_ni->ksnp_id.nid != id.nid ||
190                     peer_ni->ksnp_id.pid != id.pid)
191                         continue;
192
193                 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
194                        peer_ni, libcfs_id2str(id),
195                        atomic_read(&peer_ni->ksnp_refcount));
196                 return peer_ni;
197         }
198         return NULL;
199 }
200
201 struct ksock_peer_ni *
202 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
203 {
204         struct ksock_peer_ni *peer_ni;
205
206         read_lock(&ksocknal_data.ksnd_global_lock);
207         peer_ni = ksocknal_find_peer_locked(ni, id);
208         if (peer_ni != NULL)                    /* +1 ref for caller? */
209                 ksocknal_peer_addref(peer_ni);
210         read_unlock(&ksocknal_data.ksnd_global_lock);
211
212         return (peer_ni);
213 }
214
215 static void
216 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
217 {
218         int i;
219         __u32 ip;
220         struct ksock_interface *iface;
221
222         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
223                 LASSERT(i < LNET_INTERFACES_NUM);
224                 ip = peer_ni->ksnp_passive_ips[i];
225
226                 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
227                 /*
228                  * All IPs in peer_ni->ksnp_passive_ips[] come from the
229                  * interface list, therefore the call must succeed.
230                  */
231                 LASSERT(iface != NULL);
232
233                 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
234                        peer_ni, iface, iface->ksni_nroutes);
235                 iface->ksni_npeers--;
236         }
237
238         LASSERT(list_empty(&peer_ni->ksnp_conns));
239         LASSERT(list_empty(&peer_ni->ksnp_routes));
240         LASSERT(!peer_ni->ksnp_closing);
241         peer_ni->ksnp_closing = 1;
242         list_del(&peer_ni->ksnp_list);
243         /* lose peerlist's ref */
244         ksocknal_peer_decref(peer_ni);
245 }
246
247 static int
248 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
249                        struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
250                        int *port, int *conn_count, int *share_count)
251 {
252         struct ksock_peer_ni *peer_ni;
253         struct list_head *ptmp;
254         struct ksock_route *route;
255         struct list_head *rtmp;
256         int i;
257         int j;
258         int rc = -ENOENT;
259
260         read_lock(&ksocknal_data.ksnd_global_lock);
261
262         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
263                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
264                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
265
266                         if (peer_ni->ksnp_ni != ni)
267                                 continue;
268
269                         if (peer_ni->ksnp_n_passive_ips == 0 &&
270                             list_empty(&peer_ni->ksnp_routes)) {
271                                 if (index-- > 0)
272                                         continue;
273
274                                 *id = peer_ni->ksnp_id;
275                                 *myip = 0;
276                                 *peer_ip = 0;
277                                 *port = 0;
278                                 *conn_count = 0;
279                                 *share_count = 0;
280                                 rc = 0;
281                                 goto out;
282                         }
283
284                         for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
285                                 if (index-- > 0)
286                                         continue;
287
288                                 *id = peer_ni->ksnp_id;
289                                 *myip = peer_ni->ksnp_passive_ips[j];
290                                 *peer_ip = 0;
291                                 *port = 0;
292                                 *conn_count = 0;
293                                 *share_count = 0;
294                                 rc = 0;
295                                 goto out;
296                         }
297
298                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
299                                 if (index-- > 0)
300                                         continue;
301
302                                 route = list_entry(rtmp, struct ksock_route,
303                                                    ksnr_list);
304
305                                 *id = peer_ni->ksnp_id;
306                                 *myip = route->ksnr_myipaddr;
307                                 *peer_ip = route->ksnr_ipaddr;
308                                 *port = route->ksnr_port;
309                                 *conn_count = route->ksnr_conn_count;
310                                 *share_count = route->ksnr_share_count;
311                                 rc = 0;
312                                 goto out;
313                         }
314                 }
315         }
316 out:
317         read_unlock(&ksocknal_data.ksnd_global_lock);
318         return rc;
319 }
320
321 static void
322 ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
323 {
324         struct ksock_peer_ni *peer_ni = route->ksnr_peer;
325         int type = conn->ksnc_type;
326         struct ksock_interface *iface;
327
328         conn->ksnc_route = route;
329         ksocknal_route_addref(route);
330
331         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
332                 if (route->ksnr_myipaddr == 0) {
333                         /* route wasn't bound locally yet (the initial route) */
334                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
335                                libcfs_id2str(peer_ni->ksnp_id),
336                                &route->ksnr_ipaddr,
337                                &conn->ksnc_myipaddr);
338                 } else {
339                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
340                                "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
341                                &route->ksnr_ipaddr,
342                                &route->ksnr_myipaddr,
343                                &conn->ksnc_myipaddr);
344
345                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
346                                                   route->ksnr_myipaddr);
347                         if (iface != NULL)
348                                 iface->ksni_nroutes--;
349                 }
350                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
351                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
352                                           route->ksnr_myipaddr);
353                 if (iface != NULL)
354                         iface->ksni_nroutes++;
355         }
356
357         route->ksnr_connected |= (1<<type);
358         route->ksnr_conn_count++;
359
360         /* Successful connection => further attempts can
361          * proceed immediately */
362         route->ksnr_retry_interval = 0;
363 }
364
365 static void
366 ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
367 {
368         struct list_head *tmp;
369         struct ksock_conn *conn;
370         struct ksock_route *route2;
371
372         LASSERT(!peer_ni->ksnp_closing);
373         LASSERT(route->ksnr_peer == NULL);
374         LASSERT(!route->ksnr_scheduled);
375         LASSERT(!route->ksnr_connecting);
376         LASSERT(route->ksnr_connected == 0);
377
378         /* LASSERT(unique) */
379         list_for_each(tmp, &peer_ni->ksnp_routes) {
380                 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
381
382                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
383                         CERROR("Duplicate route %s %pI4h\n",
384                                libcfs_id2str(peer_ni->ksnp_id),
385                                &route->ksnr_ipaddr);
386                         LBUG();
387                 }
388         }
389
390         route->ksnr_peer = peer_ni;
391         ksocknal_peer_addref(peer_ni);
392         /* peer_ni's routelist takes over my ref on 'route' */
393         list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
394
395         list_for_each(tmp, &peer_ni->ksnp_conns) {
396                 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
397
398                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
399                         continue;
400
401                 ksocknal_associate_route_conn_locked(route, conn);
402                 /* keep going (typed routes) */
403         }
404 }
405
406 static void
407 ksocknal_del_route_locked(struct ksock_route *route)
408 {
409         struct ksock_peer_ni *peer_ni = route->ksnr_peer;
410         struct ksock_interface *iface;
411         struct ksock_conn *conn;
412         struct list_head *ctmp;
413         struct list_head *cnxt;
414
415         LASSERT(!route->ksnr_deleted);
416
417         /* Close associated conns */
418         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
419                 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
420
421                 if (conn->ksnc_route != route)
422                         continue;
423
424                 ksocknal_close_conn_locked(conn, 0);
425         }
426
427         if (route->ksnr_myipaddr != 0) {
428                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
429                                           route->ksnr_myipaddr);
430                 if (iface != NULL)
431                         iface->ksni_nroutes--;
432         }
433
434         route->ksnr_deleted = 1;
435         list_del(&route->ksnr_list);
436         ksocknal_route_decref(route);           /* drop peer_ni's ref */
437
438         if (list_empty(&peer_ni->ksnp_routes) &&
439             list_empty(&peer_ni->ksnp_conns)) {
440                 /* I've just removed the last route to a peer_ni with no active
441                  * connections */
442                 ksocknal_unlink_peer_locked(peer_ni);
443         }
444 }
445
446 int
447 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
448                   int port)
449 {
450         struct list_head *tmp;
451         struct ksock_peer_ni *peer_ni;
452         struct ksock_peer_ni *peer2;
453         struct ksock_route *route;
454         struct ksock_route *route2;
455         int rc;
456
457         if (id.nid == LNET_NID_ANY ||
458             id.pid == LNET_PID_ANY)
459                 return (-EINVAL);
460
461         /* Have a brand new peer_ni ready... */
462         rc = ksocknal_create_peer(&peer_ni, ni, id);
463         if (rc != 0)
464                 return rc;
465
466         route = ksocknal_create_route (ipaddr, port);
467         if (route == NULL) {
468                 ksocknal_peer_decref(peer_ni);
469                 return (-ENOMEM);
470         }
471
472         write_lock_bh(&ksocknal_data.ksnd_global_lock);
473
474         /* always called with a ref on ni, so shutdown can't have started */
475         LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
476
477         peer2 = ksocknal_find_peer_locked(ni, id);
478         if (peer2 != NULL) {
479                 ksocknal_peer_decref(peer_ni);
480                 peer_ni = peer2;
481         } else {
482                 /* peer_ni table takes my ref on peer_ni */
483                 list_add_tail(&peer_ni->ksnp_list,
484                               ksocknal_nid2peerlist(id.nid));
485         }
486
487         route2 = NULL;
488         list_for_each(tmp, &peer_ni->ksnp_routes) {
489                 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
490
491                 if (route2->ksnr_ipaddr == ipaddr)
492                         break;
493
494                 route2 = NULL;
495         }
496         if (route2 == NULL) {
497                 ksocknal_add_route_locked(peer_ni, route);
498                 route->ksnr_share_count++;
499         } else {
500                 ksocknal_route_decref(route);
501                 route2->ksnr_share_count++;
502         }
503
504         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
505
506         return 0;
507 }
508
509 static void
510 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
511 {
512         struct ksock_conn *conn;
513         struct ksock_route *route;
514         struct list_head *tmp;
515         struct list_head *nxt;
516         int nshared;
517
518         LASSERT(!peer_ni->ksnp_closing);
519
520         /* Extra ref prevents peer_ni disappearing until I'm done with it */
521         ksocknal_peer_addref(peer_ni);
522
523         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
524                 route = list_entry(tmp, struct ksock_route, ksnr_list);
525
526                 /* no match */
527                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
528                         continue;
529
530                 route->ksnr_share_count = 0;
531                 /* This deletes associated conns too */
532                 ksocknal_del_route_locked(route);
533         }
534
535         nshared = 0;
536         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
537                 route = list_entry(tmp, struct ksock_route, ksnr_list);
538                 nshared += route->ksnr_share_count;
539         }
540
541         if (nshared == 0) {
542                 /* remove everything else if there are no explicit entries
543                  * left */
544
545                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
546                         route = list_entry(tmp, struct ksock_route, ksnr_list);
547
548                         /* we should only be removing auto-entries */
549                         LASSERT(route->ksnr_share_count == 0);
550                         ksocknal_del_route_locked(route);
551                 }
552
553                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
554                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
555
556                         ksocknal_close_conn_locked(conn, 0);
557                 }
558         }
559
560         ksocknal_peer_decref(peer_ni);
561         /* NB peer_ni unlinks itself when last conn/route is removed */
562 }
563
564 static int
565 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
566 {
567         struct list_head zombies = LIST_HEAD_INIT(zombies);
568         struct list_head *ptmp;
569         struct list_head *pnxt;
570         struct ksock_peer_ni *peer_ni;
571         int lo;
572         int hi;
573         int i;
574         int rc = -ENOENT;
575
576         write_lock_bh(&ksocknal_data.ksnd_global_lock);
577
578         if (id.nid != LNET_NID_ANY) {
579                 hi = (int)(ksocknal_nid2peerlist(id.nid) -
580                            ksocknal_data.ksnd_peers);
581                 lo = hi;
582         } else {
583                 lo = 0;
584                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
585         }
586
587         for (i = lo; i <= hi; i++) {
588                 list_for_each_safe(ptmp, pnxt,
589                                    &ksocknal_data.ksnd_peers[i]) {
590                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
591
592                         if (peer_ni->ksnp_ni != ni)
593                                 continue;
594
595                         if (!((id.nid == LNET_NID_ANY ||
596                                peer_ni->ksnp_id.nid == id.nid) &&
597                               (id.pid == LNET_PID_ANY ||
598                                peer_ni->ksnp_id.pid == id.pid)))
599                                 continue;
600
601                         ksocknal_peer_addref(peer_ni);  /* a ref for me... */
602
603                         ksocknal_del_peer_locked(peer_ni, ip);
604
605                         if (peer_ni->ksnp_closing &&
606                             !list_empty(&peer_ni->ksnp_tx_queue)) {
607                                 LASSERT(list_empty(&peer_ni->ksnp_conns));
608                                 LASSERT(list_empty(&peer_ni->ksnp_routes));
609
610                                 list_splice_init(&peer_ni->ksnp_tx_queue,
611                                                  &zombies);
612                         }
613
614                         ksocknal_peer_decref(peer_ni);  /* ...till here */
615
616                         rc = 0;                         /* matched! */
617                 }
618         }
619
620         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
621
622         ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
623
624         return rc;
625 }
626
627 static struct ksock_conn *
628 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
629 {
630         struct ksock_peer_ni *peer_ni;
631         struct list_head *ptmp;
632         struct ksock_conn *conn;
633         struct list_head *ctmp;
634         int i;
635
636         read_lock(&ksocknal_data.ksnd_global_lock);
637
638         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
639                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
640                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
641
642                         LASSERT(!peer_ni->ksnp_closing);
643
644                         if (peer_ni->ksnp_ni != ni)
645                                 continue;
646
647                         list_for_each(ctmp, &peer_ni->ksnp_conns) {
648                                 if (index-- > 0)
649                                         continue;
650
651                                 conn = list_entry(ctmp, struct ksock_conn,
652                                                   ksnc_list);
653                                 ksocknal_conn_addref(conn);
654                                 read_unlock(&ksocknal_data. \
655                                             ksnd_global_lock);
656                                 return conn;
657                         }
658                 }
659         }
660
661         read_unlock(&ksocknal_data.ksnd_global_lock);
662         return NULL;
663 }
664
665 static struct ksock_sched *
666 ksocknal_choose_scheduler_locked(unsigned int cpt)
667 {
668         struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
669         int i;
670
671         if (sched->kss_nthreads == 0) {
672                 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
673                         if (sched->kss_nthreads > 0) {
674                                 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
675                                        cpt, sched->kss_cpt);
676                                 return sched;
677                         }
678                 }
679                 return NULL;
680         }
681
682         return sched;
683 }
684
685 static int
686 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
687 {
688         struct ksock_net *net = ni->ni_data;
689         int i;
690         int nip;
691
692         read_lock(&ksocknal_data.ksnd_global_lock);
693
694         nip = net->ksnn_ninterfaces;
695         LASSERT(nip <= LNET_INTERFACES_NUM);
696
697         /*
698          * Only offer interfaces for additional connections if I have
699          * more than one.
700          */
701         if (nip < 2) {
702                 read_unlock(&ksocknal_data.ksnd_global_lock);
703                 return 0;
704         }
705
706         for (i = 0; i < nip; i++) {
707                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
708                 LASSERT(ipaddrs[i] != 0);
709         }
710
711         read_unlock(&ksocknal_data.ksnd_global_lock);
712         return nip;
713 }
714
715 static int
716 ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
717 {
718         int best_netmatch = 0;
719         int best_xor = 0;
720         int best = -1;
721         int this_xor;
722         int this_netmatch;
723         int i;
724
725         for (i = 0; i < nips; i++) {
726                 if (ips[i] == 0)
727                         continue;
728
729                 this_xor = (ips[i] ^ iface->ksni_ipaddr);
730                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
731
732                 if (!(best < 0 ||
733                       best_netmatch < this_netmatch ||
734                       (best_netmatch == this_netmatch &&
735                        best_xor > this_xor)))
736                         continue;
737
738                 best = i;
739                 best_netmatch = this_netmatch;
740                 best_xor = this_xor;
741         }
742
743         LASSERT (best >= 0);
744         return (best);
745 }
746
747 static int
748 ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
749 {
750         rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
751         struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
752         struct ksock_interface *iface;
753         struct ksock_interface *best_iface;
754         int n_ips;
755         int i;
756         int j;
757         int k;
758         u32 ip;
759         u32 xor;
760         int this_netmatch;
761         int best_netmatch;
762         int best_npeers;
763
764         /* CAVEAT EMPTOR: We do all our interface matching with an
765          * exclusive hold of global lock at IRQ priority.  We're only
766          * expecting to be dealing with small numbers of interfaces, so the
767          * O(n**3)-ness shouldn't matter */
768
769         /* Also note that I'm not going to return more than n_peerips
770          * interfaces, even if I have more myself */
771
772         write_lock_bh(global_lock);
773
774         LASSERT(n_peerips <= LNET_INTERFACES_NUM);
775         LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
776
777         /* Only match interfaces for additional connections
778          * if I have > 1 interface */
779         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
780                 MIN(n_peerips, net->ksnn_ninterfaces);
781
782         for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
783                 /*              ^ yes really... */
784
785                 /* If we have any new interfaces, first tick off all the
786                  * peer_ni IPs that match old interfaces, then choose new
787                  * interfaces to match the remaining peer_ni IPS.
788                  * We don't forget interfaces we've stopped using; we might
789                  * start using them again... */
790
791                 if (i < peer_ni->ksnp_n_passive_ips) {
792                         /* Old interface. */
793                         ip = peer_ni->ksnp_passive_ips[i];
794                         best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
795
796                         /* peer_ni passive ips are kept up to date */
797                         LASSERT(best_iface != NULL);
798                 } else {
799                         /* choose a new interface */
800                         LASSERT (i == peer_ni->ksnp_n_passive_ips);
801
802                         best_iface = NULL;
803                         best_netmatch = 0;
804                         best_npeers = 0;
805
806                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
807                                 iface = &net->ksnn_interfaces[j];
808                                 ip = iface->ksni_ipaddr;
809
810                                 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
811                                         if (peer_ni->ksnp_passive_ips[k] == ip)
812                                                 break;
813
814                                 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
815                                         continue;
816
817                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
818                                 xor = (ip ^ peerips[k]);
819                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
820
821                                 if (!(best_iface == NULL ||
822                                       best_netmatch < this_netmatch ||
823                                       (best_netmatch == this_netmatch &&
824                                        best_npeers > iface->ksni_npeers)))
825                                         continue;
826
827                                 best_iface = iface;
828                                 best_netmatch = this_netmatch;
829                                 best_npeers = iface->ksni_npeers;
830                         }
831
832                         LASSERT(best_iface != NULL);
833
834                         best_iface->ksni_npeers++;
835                         ip = best_iface->ksni_ipaddr;
836                         peer_ni->ksnp_passive_ips[i] = ip;
837                         peer_ni->ksnp_n_passive_ips = i+1;
838                 }
839
840                 /* mark the best matching peer_ni IP used */
841                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
842                 peerips[j] = 0;
843         }
844
845         /* Overwrite input peer_ni IP addresses */
846         memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
847
848         write_unlock_bh(global_lock);
849
850         return (n_ips);
851 }
852
853 static void
854 ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
855                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
856 {
857         struct ksock_route              *newroute = NULL;
858         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
859         struct lnet_ni *ni = peer_ni->ksnp_ni;
860         struct ksock_net                *net = ni->ni_data;
861         struct list_head        *rtmp;
862         struct ksock_route              *route;
863         struct ksock_interface  *iface;
864         struct ksock_interface  *best_iface;
865         int                     best_netmatch;
866         int                     this_netmatch;
867         int                     best_nroutes;
868         int                     i;
869         int                     j;
870
871         /* CAVEAT EMPTOR: We do all our interface matching with an
872          * exclusive hold of global lock at IRQ priority.  We're only
873          * expecting to be dealing with small numbers of interfaces, so the
874          * O(n**3)-ness here shouldn't matter */
875
876         write_lock_bh(global_lock);
877
878         if (net->ksnn_ninterfaces < 2) {
879                 /* Only create additional connections
880                  * if I have > 1 interface */
881                 write_unlock_bh(global_lock);
882                 return;
883         }
884
885         LASSERT(npeer_ipaddrs <= LNET_INTERFACES_NUM);
886
887         for (i = 0; i < npeer_ipaddrs; i++) {
888                 if (newroute != NULL) {
889                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
890                 } else {
891                         write_unlock_bh(global_lock);
892
893                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
894                         if (newroute == NULL)
895                                 return;
896
897                         write_lock_bh(global_lock);
898                 }
899
900                 if (peer_ni->ksnp_closing) {
901                         /* peer_ni got closed under me */
902                         break;
903                 }
904
905                 /* Already got a route? */
906                 route = NULL;
907                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
908                         route = list_entry(rtmp, struct ksock_route, ksnr_list);
909
910                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
911                                 break;
912
913                         route = NULL;
914                 }
915                 if (route != NULL)
916                         continue;
917
918                 best_iface = NULL;
919                 best_nroutes = 0;
920                 best_netmatch = 0;
921
922                 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
923
924                 /* Select interface to connect from */
925                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
926                         iface = &net->ksnn_interfaces[j];
927
928                         /* Using this interface already? */
929                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
930                                 route = list_entry(rtmp, struct ksock_route,
931                                                    ksnr_list);
932
933                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
934                                         break;
935
936                                 route = NULL;
937                         }
938                         if (route != NULL)
939                                 continue;
940
941                         this_netmatch = (((iface->ksni_ipaddr ^
942                                            newroute->ksnr_ipaddr) &
943                                            iface->ksni_netmask) == 0) ? 1 : 0;
944
945                         if (!(best_iface == NULL ||
946                               best_netmatch < this_netmatch ||
947                               (best_netmatch == this_netmatch &&
948                                best_nroutes > iface->ksni_nroutes)))
949                                 continue;
950
951                         best_iface = iface;
952                         best_netmatch = this_netmatch;
953                         best_nroutes = iface->ksni_nroutes;
954                 }
955
956                 if (best_iface == NULL)
957                         continue;
958
959                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
960                 best_iface->ksni_nroutes++;
961
962                 ksocknal_add_route_locked(peer_ni, newroute);
963                 newroute = NULL;
964         }
965
966         write_unlock_bh(global_lock);
967         if (newroute != NULL)
968                 ksocknal_route_decref(newroute);
969 }
970
971 int
972 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
973 {
974         struct ksock_connreq *cr;
975         int rc;
976         u32 peer_ip;
977         int peer_port;
978
979         rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
980         LASSERT(rc == 0);               /* we succeeded before */
981
982         LIBCFS_ALLOC(cr, sizeof(*cr));
983         if (cr == NULL) {
984                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
985                                    "%pI4h: memory exhausted\n", &peer_ip);
986                 return -ENOMEM;
987         }
988
989         lnet_ni_addref(ni);
990         cr->ksncr_ni   = ni;
991         cr->ksncr_sock = sock;
992
993         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
994
995         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
996         wake_up(&ksocknal_data.ksnd_connd_waitq);
997
998         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
999         return 0;
1000 }
1001
1002 static int
1003 ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
1004 {
1005         struct ksock_route *route;
1006
1007         list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1008                 if (route->ksnr_ipaddr == ipaddr)
1009                         return route->ksnr_connecting;
1010         }
1011         return 0;
1012 }
1013
1014 int
1015 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
1016                      struct socket *sock, int type)
1017 {
1018         rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1019         struct list_head zombies = LIST_HEAD_INIT(zombies);
1020         struct lnet_process_id peerid;
1021         struct list_head *tmp;
1022         u64 incarnation;
1023         struct ksock_conn *conn;
1024         struct ksock_conn *conn2;
1025         struct ksock_peer_ni *peer_ni = NULL;
1026         struct ksock_peer_ni *peer2;
1027         struct ksock_sched *sched;
1028         struct ksock_hello_msg *hello;
1029         int cpt;
1030         struct ksock_tx *tx;
1031         struct ksock_tx *txtmp;
1032         int rc;
1033         int rc2;
1034         int active;
1035         char *warn = NULL;
1036
1037         active = (route != NULL);
1038
1039         LASSERT (active == (type != SOCKLND_CONN_NONE));
1040
1041         LIBCFS_ALLOC(conn, sizeof(*conn));
1042         if (conn == NULL) {
1043                 rc = -ENOMEM;
1044                 goto failed_0;
1045         }
1046
1047         conn->ksnc_peer = NULL;
1048         conn->ksnc_route = NULL;
1049         conn->ksnc_sock = sock;
1050         /* 2 ref, 1 for conn, another extra ref prevents socket
1051          * being closed before establishment of connection */
1052         atomic_set (&conn->ksnc_sock_refcount, 2);
1053         conn->ksnc_type = type;
1054         ksocknal_lib_save_callback(sock, conn);
1055         atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1056
1057         conn->ksnc_rx_ready = 0;
1058         conn->ksnc_rx_scheduled = 0;
1059
1060         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1061         conn->ksnc_tx_ready = 0;
1062         conn->ksnc_tx_scheduled = 0;
1063         conn->ksnc_tx_carrier = NULL;
1064         atomic_set (&conn->ksnc_tx_nob, 0);
1065
1066         LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1067                                      kshm_ips[LNET_INTERFACES_NUM]));
1068         if (hello == NULL) {
1069                 rc = -ENOMEM;
1070                 goto failed_1;
1071         }
1072
1073         /* stash conn's local and remote addrs */
1074         rc = ksocknal_lib_get_conn_addrs (conn);
1075         if (rc != 0)
1076                 goto failed_1;
1077
1078         /* Find out/confirm peer_ni's NID and connection type and get the
1079          * vector of interfaces she's willing to let me connect to.
1080          * Passive connections use the listener timeout since the peer_ni sends
1081          * eagerly */
1082
1083         if (active) {
1084                 peer_ni = route->ksnr_peer;
1085                 LASSERT(ni == peer_ni->ksnp_ni);
1086
1087                 /* Active connection sends HELLO eagerly */
1088                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1089                 peerid = peer_ni->ksnp_id;
1090
1091                 write_lock_bh(global_lock);
1092                 conn->ksnc_proto = peer_ni->ksnp_proto;
1093                 write_unlock_bh(global_lock);
1094
1095                 if (conn->ksnc_proto == NULL) {
1096                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1097 #if SOCKNAL_VERSION_DEBUG
1098                          if (*ksocknal_tunables.ksnd_protocol == 2)
1099                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1100                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1101                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1102 #endif
1103                 }
1104
1105                 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1106                 if (rc != 0)
1107                         goto failed_1;
1108         } else {
1109                 peerid.nid = LNET_NID_ANY;
1110                 peerid.pid = LNET_PID_ANY;
1111
1112                 /* Passive, get protocol from peer_ni */
1113                 conn->ksnc_proto = NULL;
1114         }
1115
1116         rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1117         if (rc < 0)
1118                 goto failed_1;
1119
1120         LASSERT (rc == 0 || active);
1121         LASSERT (conn->ksnc_proto != NULL);
1122         LASSERT (peerid.nid != LNET_NID_ANY);
1123
1124         cpt = lnet_cpt_of_nid(peerid.nid, ni);
1125
1126         if (active) {
1127                 ksocknal_peer_addref(peer_ni);
1128                 write_lock_bh(global_lock);
1129         } else {
1130                 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1131                 if (rc != 0)
1132                         goto failed_1;
1133
1134                 write_lock_bh(global_lock);
1135
1136                 /* called with a ref on ni, so shutdown can't have started */
1137                 LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
1138
1139                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1140                 if (peer2 == NULL) {
1141                         /* NB this puts an "empty" peer_ni in the peer_ni
1142                          * table (which takes my ref) */
1143                         list_add_tail(&peer_ni->ksnp_list,
1144                                       ksocknal_nid2peerlist(peerid.nid));
1145                 } else {
1146                         ksocknal_peer_decref(peer_ni);
1147                         peer_ni = peer2;
1148                 }
1149
1150                 /* +1 ref for me */
1151                 ksocknal_peer_addref(peer_ni);
1152                 peer_ni->ksnp_accepting++;
1153
1154                 /* Am I already connecting to this guy?  Resolve in
1155                  * favour of higher NID... */
1156                 if (peerid.nid < ni->ni_nid &&
1157                     ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1158                         rc = EALREADY;
1159                         warn = "connection race resolution";
1160                         goto failed_2;
1161                 }
1162         }
1163
1164         if (peer_ni->ksnp_closing ||
1165             (active && route->ksnr_deleted)) {
1166                 /* peer_ni/route got closed under me */
1167                 rc = -ESTALE;
1168                 warn = "peer_ni/route removed";
1169                 goto failed_2;
1170         }
1171
1172         if (peer_ni->ksnp_proto == NULL) {
1173                 /* Never connected before.
1174                  * NB recv_hello may have returned EPROTO to signal my peer_ni
1175                  * wants a different protocol than the one I asked for.
1176                  */
1177                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1178
1179                 peer_ni->ksnp_proto = conn->ksnc_proto;
1180                 peer_ni->ksnp_incarnation = incarnation;
1181         }
1182
1183         if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1184             peer_ni->ksnp_incarnation != incarnation) {
1185                 /* peer_ni rebooted or I've got the wrong protocol version */
1186                 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1187
1188                 peer_ni->ksnp_proto = NULL;
1189                 rc = ESTALE;
1190                 warn = peer_ni->ksnp_incarnation != incarnation ?
1191                        "peer_ni rebooted" :
1192                        "wrong proto version";
1193                 goto failed_2;
1194         }
1195
1196         switch (rc) {
1197         default:
1198                 LBUG();
1199         case 0:
1200                 break;
1201         case EALREADY:
1202                 warn = "lost conn race";
1203                 goto failed_2;
1204         case EPROTO:
1205                 warn = "retry with different protocol version";
1206                 goto failed_2;
1207         }
1208
1209         /* Refuse to duplicate an existing connection, unless this is a
1210          * loopback connection */
1211         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1212                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1213                         conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1214
1215                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1216                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1217                             conn2->ksnc_type != conn->ksnc_type)
1218                                 continue;
1219
1220                         /* Reply on a passive connection attempt so the peer_ni
1221                          * realises we're connected. */
1222                         LASSERT (rc == 0);
1223                         if (!active)
1224                                 rc = EALREADY;
1225
1226                         warn = "duplicate";
1227                         goto failed_2;
1228                 }
1229         }
1230
1231         /* If the connection created by this route didn't bind to the IP
1232          * address the route connected to, the connection/route matching
1233          * code below probably isn't going to work. */
1234         if (active &&
1235             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1236                 CERROR("Route %s %pI4h connected to %pI4h\n",
1237                        libcfs_id2str(peer_ni->ksnp_id),
1238                        &route->ksnr_ipaddr,
1239                        &conn->ksnc_ipaddr);
1240         }
1241
1242         /* Search for a route corresponding to the new connection and
1243          * create an association.  This allows incoming connections created
1244          * by routes in my peer_ni to match my own route entries so I don't
1245          * continually create duplicate routes. */
1246         list_for_each(tmp, &peer_ni->ksnp_routes) {
1247                 route = list_entry(tmp, struct ksock_route, ksnr_list);
1248
1249                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1250                         continue;
1251
1252                 ksocknal_associate_route_conn_locked(route, conn);
1253                 break;
1254         }
1255
1256         conn->ksnc_peer = peer_ni;                 /* conn takes my ref on peer_ni */
1257         peer_ni->ksnp_last_alive = ktime_get_seconds();
1258         peer_ni->ksnp_send_keepalive = 0;
1259         peer_ni->ksnp_error = 0;
1260
1261         sched = ksocknal_choose_scheduler_locked(cpt);
1262         if (!sched) {
1263                 CERROR("no schedulers available. node is unhealthy\n");
1264                 goto failed_2;
1265         }
1266         /*
1267          * The cpt might have changed if we ended up selecting a non cpt
1268          * native scheduler. So use the scheduler's cpt instead.
1269          */
1270         cpt = sched->kss_cpt;
1271         sched->kss_nconns++;
1272         conn->ksnc_scheduler = sched;
1273
1274         conn->ksnc_tx_last_post = ktime_get_seconds();
1275         /* Set the deadline for the outgoing HELLO to drain */
1276         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1277         conn->ksnc_tx_deadline = ktime_get_seconds() +
1278                                  lnet_get_lnd_timeout();
1279         smp_mb();   /* order with adding to peer_ni's conn list */
1280
1281         list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1282         ksocknal_conn_addref(conn);
1283
1284         ksocknal_new_packet(conn, 0);
1285
1286         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1287
1288         /* Take packets blocking for this connection. */
1289         list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1290                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1291                     SOCKNAL_MATCH_NO)
1292                         continue;
1293
1294                 list_del(&tx->tx_list);
1295                 ksocknal_queue_tx_locked(tx, conn);
1296         }
1297
1298         write_unlock_bh(global_lock);
1299
1300         /* We've now got a new connection.  Any errors from here on are just
1301          * like "normal" comms errors and we close the connection normally.
1302          * NB (a) we still have to send the reply HELLO for passive
1303          *        connections,
1304          *    (b) normal I/O on the conn is blocked until I setup and call the
1305          *        socket callbacks.
1306          */
1307
1308         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1309                " incarnation:%lld sched[%d]\n",
1310                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1311                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1312                conn->ksnc_port, incarnation, cpt);
1313
1314         if (active) {
1315                 /* additional routes after interface exchange? */
1316                 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1317                                        hello->kshm_ips, hello->kshm_nips);
1318         } else {
1319                 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1320                                                        hello->kshm_nips);
1321                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1322         }
1323
1324         LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1325                                     kshm_ips[LNET_INTERFACES_NUM]));
1326
1327         /* setup the socket AFTER I've received hello (it disables
1328          * SO_LINGER).  I might call back to the acceptor who may want
1329          * to send a protocol version response and then close the
1330          * socket; this ensures the socket only tears down after the
1331          * response has been sent. */
1332         if (rc == 0)
1333                 rc = ksocknal_lib_setup_sock(sock);
1334
1335         write_lock_bh(global_lock);
1336
1337         /* NB my callbacks block while I hold ksnd_global_lock */
1338         ksocknal_lib_set_callback(sock, conn);
1339
1340         if (!active)
1341                 peer_ni->ksnp_accepting--;
1342
1343         write_unlock_bh(global_lock);
1344
1345         if (rc != 0) {
1346                 write_lock_bh(global_lock);
1347                 if (!conn->ksnc_closing) {
1348                         /* could be closed by another thread */
1349                         ksocknal_close_conn_locked(conn, rc);
1350                 }
1351                 write_unlock_bh(global_lock);
1352         } else if (ksocknal_connsock_addref(conn) == 0) {
1353                 /* Allow I/O to proceed. */
1354                 ksocknal_read_callback(conn);
1355                 ksocknal_write_callback(conn);
1356                 ksocknal_connsock_decref(conn);
1357         }
1358
1359         ksocknal_connsock_decref(conn);
1360         ksocknal_conn_decref(conn);
1361         return rc;
1362
1363 failed_2:
1364         if (!peer_ni->ksnp_closing &&
1365             list_empty(&peer_ni->ksnp_conns) &&
1366             list_empty(&peer_ni->ksnp_routes)) {
1367                 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1368                 list_del_init(&peer_ni->ksnp_tx_queue);
1369                 ksocknal_unlink_peer_locked(peer_ni);
1370         }
1371
1372         write_unlock_bh(global_lock);
1373
1374         if (warn != NULL) {
1375                 if (rc < 0)
1376                         CERROR("Not creating conn %s type %d: %s\n",
1377                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1378                 else
1379                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1380                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1381         }
1382
1383         if (!active) {
1384                 if (rc > 0) {
1385                         /* Request retry by replying with CONN_NONE
1386                          * ksnc_proto has been set already */
1387                         conn->ksnc_type = SOCKLND_CONN_NONE;
1388                         hello->kshm_nips = 0;
1389                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1390                 }
1391
1392                 write_lock_bh(global_lock);
1393                 peer_ni->ksnp_accepting--;
1394                 write_unlock_bh(global_lock);
1395         }
1396
1397         /*
1398          * If we get here without an error code, just use -EALREADY.
1399          * Depending on how we got here, the error may be positive
1400          * or negative. Normalize the value for ksocknal_txlist_done().
1401          */
1402         rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1403         ksocknal_txlist_done(ni, &zombies, rc2);
1404         ksocknal_peer_decref(peer_ni);
1405
1406 failed_1:
1407         if (hello != NULL)
1408                 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1409                                             kshm_ips[LNET_INTERFACES_NUM]));
1410
1411         LIBCFS_FREE(conn, sizeof(*conn));
1412
1413 failed_0:
1414         sock_release(sock);
1415         return rc;
1416 }
1417
1418 void
1419 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1420 {
1421         /* This just does the immmediate housekeeping, and queues the
1422          * connection for the reaper to terminate.
1423          * Caller holds ksnd_global_lock exclusively in irq context */
1424         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1425         struct ksock_route *route;
1426         struct ksock_conn *conn2;
1427         struct list_head *tmp;
1428
1429         LASSERT(peer_ni->ksnp_error == 0);
1430         LASSERT(!conn->ksnc_closing);
1431         conn->ksnc_closing = 1;
1432
1433         /* ksnd_deathrow_conns takes over peer_ni's ref */
1434         list_del(&conn->ksnc_list);
1435
1436         route = conn->ksnc_route;
1437         if (route != NULL) {
1438                 /* dissociate conn from route... */
1439                 LASSERT(!route->ksnr_deleted);
1440                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1441
1442                 conn2 = NULL;
1443                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1444                         conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1445
1446                         if (conn2->ksnc_route == route &&
1447                             conn2->ksnc_type == conn->ksnc_type)
1448                                 break;
1449
1450                         conn2 = NULL;
1451                 }
1452                 if (conn2 == NULL)
1453                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1454
1455                 conn->ksnc_route = NULL;
1456
1457                 ksocknal_route_decref(route);   /* drop conn's ref on route */
1458         }
1459
1460         if (list_empty(&peer_ni->ksnp_conns)) {
1461                 /* No more connections to this peer_ni */
1462
1463                 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1464                                 struct ksock_tx *tx;
1465
1466                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1467
1468                         /* throw them to the last connection...,
1469                          * these TXs will be send to /dev/null by scheduler */
1470                         list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1471                                             tx_list)
1472                                 ksocknal_tx_prep(conn, tx);
1473
1474                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1475                         list_splice_init(&peer_ni->ksnp_tx_queue,
1476                                          &conn->ksnc_tx_queue);
1477                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1478                 }
1479
1480                 /* renegotiate protocol version */
1481                 peer_ni->ksnp_proto = NULL;
1482                 /* stash last conn close reason */
1483                 peer_ni->ksnp_error = error;
1484
1485                 if (list_empty(&peer_ni->ksnp_routes)) {
1486                         /* I've just closed last conn belonging to a
1487                          * peer_ni with no routes to it */
1488                         ksocknal_unlink_peer_locked(peer_ni);
1489                 }
1490         }
1491
1492         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1493
1494         list_add_tail(&conn->ksnc_list,
1495                       &ksocknal_data.ksnd_deathrow_conns);
1496         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1497
1498         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1499 }
1500
1501 void
1502 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1503 {
1504         int notify = 0;
1505         time64_t last_alive = 0;
1506
1507         /* There has been a connection failure or comms error; but I'll only
1508          * tell LNET I think the peer_ni is dead if it's to another kernel and
1509          * there are no connections or connection attempts in existence. */
1510
1511         read_lock(&ksocknal_data.ksnd_global_lock);
1512
1513         if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1514              list_empty(&peer_ni->ksnp_conns) &&
1515              peer_ni->ksnp_accepting == 0 &&
1516              ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1517                 notify = 1;
1518                 last_alive = peer_ni->ksnp_last_alive;
1519         }
1520
1521         read_unlock(&ksocknal_data.ksnd_global_lock);
1522
1523         if (notify)
1524                 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
1525                             last_alive);
1526 }
1527
1528 void
1529 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1530 {
1531         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1532         struct ksock_tx *tx;
1533         struct ksock_tx *tmp;
1534         struct list_head zlist = LIST_HEAD_INIT(zlist);
1535
1536         /* NB safe to finalize TXs because closing of socket will
1537          * abort all buffered data */
1538         LASSERT(conn->ksnc_sock == NULL);
1539
1540         spin_lock(&peer_ni->ksnp_lock);
1541
1542         list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1543                 if (tx->tx_conn != conn)
1544                         continue;
1545
1546                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1547
1548                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1549                 tx->tx_zc_aborted = 1;  /* mark it as not-acked */
1550                 list_del(&tx->tx_zc_list);
1551                 list_add(&tx->tx_zc_list, &zlist);
1552         }
1553
1554         spin_unlock(&peer_ni->ksnp_lock);
1555
1556         while (!list_empty(&zlist)) {
1557                 tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
1558
1559                 list_del(&tx->tx_zc_list);
1560                 ksocknal_tx_decref(tx);
1561         }
1562 }
1563
1564 void
1565 ksocknal_terminate_conn(struct ksock_conn *conn)
1566 {
1567         /* This gets called by the reaper (guaranteed thread context) to
1568          * disengage the socket from its callbacks and close it.
1569          * ksnc_refcount will eventually hit zero, and then the reaper will
1570          * destroy it. */
1571         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1572         struct ksock_sched *sched = conn->ksnc_scheduler;
1573         int failed = 0;
1574
1575         LASSERT(conn->ksnc_closing);
1576
1577         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1578         spin_lock_bh(&sched->kss_lock);
1579
1580         /* a closing conn is always ready to tx */
1581         conn->ksnc_tx_ready = 1;
1582
1583         if (!conn->ksnc_tx_scheduled &&
1584             !list_empty(&conn->ksnc_tx_queue)) {
1585                 list_add_tail(&conn->ksnc_tx_list,
1586                                &sched->kss_tx_conns);
1587                 conn->ksnc_tx_scheduled = 1;
1588                 /* extra ref for scheduler */
1589                 ksocknal_conn_addref(conn);
1590
1591                 wake_up (&sched->kss_waitq);
1592         }
1593
1594         spin_unlock_bh(&sched->kss_lock);
1595
1596         /* serialise with callbacks */
1597         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1598
1599         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1600
1601         /* OK, so this conn may not be completely disengaged from its
1602          * scheduler yet, but it _has_ committed to terminate... */
1603         conn->ksnc_scheduler->kss_nconns--;
1604
1605         if (peer_ni->ksnp_error != 0) {
1606                 /* peer_ni's last conn closed in error */
1607                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1608                 failed = 1;
1609                 peer_ni->ksnp_error = 0;     /* avoid multiple notifications */
1610         }
1611
1612         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1613
1614         if (failed)
1615                 ksocknal_peer_failed(peer_ni);
1616
1617         /* The socket is closed on the final put; either here, or in
1618          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1619          * when the connection was established, this will close the socket
1620          * immediately, aborting anything buffered in it. Any hung
1621          * zero-copy transmits will therefore complete in finite time. */
1622         ksocknal_connsock_decref(conn);
1623 }
1624
1625 void
1626 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1627 {
1628         /* Queue the conn for the reaper to destroy */
1629         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1630         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1631
1632         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1633         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1634
1635         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1636 }
1637
1638 void
1639 ksocknal_destroy_conn(struct ksock_conn *conn)
1640 {
1641         time64_t last_rcv;
1642
1643         /* Final coup-de-grace of the reaper */
1644         CDEBUG (D_NET, "connection %p\n", conn);
1645
1646         LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1647         LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1648         LASSERT (conn->ksnc_sock == NULL);
1649         LASSERT (conn->ksnc_route == NULL);
1650         LASSERT (!conn->ksnc_tx_scheduled);
1651         LASSERT (!conn->ksnc_rx_scheduled);
1652         LASSERT(list_empty(&conn->ksnc_tx_queue));
1653
1654         /* complete current receive if any */
1655         switch (conn->ksnc_rx_state) {
1656         case SOCKNAL_RX_LNET_PAYLOAD:
1657                 last_rcv = conn->ksnc_rx_deadline -
1658                            lnet_get_lnd_timeout();
1659                 CERROR("Completing partial receive from %s[%d], "
1660                        "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1661                        "last alive is %lld secs ago\n",
1662                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1663                        &conn->ksnc_ipaddr, conn->ksnc_port,
1664                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1665                        ktime_get_seconds() - last_rcv);
1666                 if (conn->ksnc_lnet_msg)
1667                         conn->ksnc_lnet_msg->msg_health_status =
1668                                 LNET_MSG_STATUS_REMOTE_ERROR;
1669                 lnet_finalize(conn->ksnc_lnet_msg, -EIO);
1670                 break;
1671         case SOCKNAL_RX_LNET_HEADER:
1672                 if (conn->ksnc_rx_started)
1673                         CERROR("Incomplete receive of lnet header from %s, "
1674                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1675                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1676                                &conn->ksnc_ipaddr, conn->ksnc_port,
1677                                conn->ksnc_proto->pro_version);
1678                 break;
1679         case SOCKNAL_RX_KSM_HEADER:
1680                 if (conn->ksnc_rx_started)
1681                         CERROR("Incomplete receive of ksock message from %s, "
1682                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1683                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1684                                &conn->ksnc_ipaddr, conn->ksnc_port,
1685                                conn->ksnc_proto->pro_version);
1686                 break;
1687         case SOCKNAL_RX_SLOP:
1688                 if (conn->ksnc_rx_started)
1689                         CERROR("Incomplete receive of slops from %s, "
1690                                "ip %pI4h:%d, with error\n",
1691                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1692                                &conn->ksnc_ipaddr, conn->ksnc_port);
1693                break;
1694         default:
1695                 LBUG ();
1696                 break;
1697         }
1698
1699         ksocknal_peer_decref(conn->ksnc_peer);
1700
1701         LIBCFS_FREE (conn, sizeof (*conn));
1702 }
1703
1704 int
1705 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
1706 {
1707         struct ksock_conn *conn;
1708         struct list_head *ctmp;
1709         struct list_head *cnxt;
1710         int count = 0;
1711
1712         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1713                 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
1714
1715                 if (ipaddr == 0 ||
1716                     conn->ksnc_ipaddr == ipaddr) {
1717                         count++;
1718                         ksocknal_close_conn_locked (conn, why);
1719                 }
1720         }
1721
1722         return (count);
1723 }
1724
1725 int
1726 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1727 {
1728         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1729         u32 ipaddr = conn->ksnc_ipaddr;
1730         int count;
1731
1732         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1733
1734         count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1735
1736         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1737
1738         return (count);
1739 }
1740
1741 int
1742 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1743 {
1744         struct ksock_peer_ni *peer_ni;
1745         struct list_head *ptmp;
1746         struct list_head *pnxt;
1747         int lo;
1748         int hi;
1749         int i;
1750         int count = 0;
1751
1752         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1753
1754         if (id.nid != LNET_NID_ANY)
1755                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1756         else {
1757                 lo = 0;
1758                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1759         }
1760
1761         for (i = lo; i <= hi; i++) {
1762                 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1763
1764                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
1765
1766                         if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1767                               (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1768                                 continue;
1769
1770                         count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1771                 }
1772         }
1773
1774         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1775
1776         /* wildcards always succeed */
1777         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1778                 return (0);
1779
1780         return (count == 0 ? -ENOENT : 0);
1781 }
1782
1783 void
1784 ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
1785 {
1786         /* The router is telling me she's been notified of a change in
1787          * gateway state....
1788          */
1789         struct lnet_process_id id = {
1790                 .nid    = gw_nid,
1791                 .pid    = LNET_PID_ANY,
1792         };
1793
1794         CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1795                 alive ? "up" : "down");
1796
1797         if (!alive) {
1798                 /* If the gateway crashed, close all open connections... */
1799                 ksocknal_close_matching_conns (id, 0);
1800                 return;
1801         }
1802
1803         /* ...otherwise do nothing.  We can only establish new connections
1804          * if we have autroutes, and these connect on demand. */
1805 }
1806
1807 void
1808 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
1809 {
1810         int connect = 1;
1811         time64_t last_alive = 0;
1812         time64_t now = ktime_get_seconds();
1813         struct ksock_peer_ni *peer_ni = NULL;
1814         rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1815         struct lnet_process_id id = {
1816                 .nid = nid,
1817                 .pid = LNET_PID_LUSTRE,
1818         };
1819
1820         read_lock(glock);
1821
1822         peer_ni = ksocknal_find_peer_locked(ni, id);
1823         if (peer_ni != NULL) {
1824                 struct list_head *tmp;
1825                 struct ksock_conn *conn;
1826                 int bufnob;
1827
1828                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1829                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
1830                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1831
1832                         if (bufnob < conn->ksnc_tx_bufnob) {
1833                                 /* something got ACKed */
1834                                 conn->ksnc_tx_deadline = ktime_get_seconds() +
1835                                                          lnet_get_lnd_timeout();
1836                                 peer_ni->ksnp_last_alive = now;
1837                                 conn->ksnc_tx_bufnob = bufnob;
1838                         }
1839                 }
1840
1841                 last_alive = peer_ni->ksnp_last_alive;
1842                 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1843                         connect = 0;
1844         }
1845
1846         read_unlock(glock);
1847
1848         if (last_alive != 0)
1849                 *when = last_alive;
1850
1851         CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
1852                libcfs_nid2str(nid), peer_ni,
1853                last_alive ? now - last_alive : -1,
1854                connect);
1855
1856         if (!connect)
1857                 return;
1858
1859         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1860
1861         write_lock_bh(glock);
1862
1863         peer_ni = ksocknal_find_peer_locked(ni, id);
1864         if (peer_ni != NULL)
1865                 ksocknal_launch_all_connections_locked(peer_ni);
1866
1867         write_unlock_bh(glock);
1868         return;
1869 }
1870
1871 static void
1872 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1873 {
1874         int index;
1875         int i;
1876         struct list_head *tmp;
1877         struct ksock_conn *conn;
1878
1879         for (index = 0; ; index++) {
1880                 read_lock(&ksocknal_data.ksnd_global_lock);
1881
1882                 i = 0;
1883                 conn = NULL;
1884
1885                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1886                         if (i++ == index) {
1887                                 conn = list_entry(tmp, struct ksock_conn,
1888                                                   ksnc_list);
1889                                 ksocknal_conn_addref(conn);
1890                                 break;
1891                         }
1892                 }
1893
1894                 read_unlock(&ksocknal_data.ksnd_global_lock);
1895
1896                 if (conn == NULL)
1897                         break;
1898
1899                 ksocknal_lib_push_conn (conn);
1900                 ksocknal_conn_decref(conn);
1901         }
1902 }
1903
1904 static int
1905 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1906 {
1907         struct list_head *start;
1908         struct list_head *end;
1909         struct list_head *tmp;
1910         int               rc = -ENOENT;
1911         unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
1912
1913         if (id.nid == LNET_NID_ANY) {
1914                 start = &ksocknal_data.ksnd_peers[0];
1915                 end = &ksocknal_data.ksnd_peers[hsize - 1];
1916         } else {
1917                 start = end = ksocknal_nid2peerlist(id.nid);
1918         }
1919
1920         for (tmp = start; tmp <= end; tmp++) {
1921                 int     peer_off; /* searching offset in peer_ni hash table */
1922
1923                 for (peer_off = 0; ; peer_off++) {
1924                         struct ksock_peer_ni *peer_ni;
1925                         int           i = 0;
1926
1927                         read_lock(&ksocknal_data.ksnd_global_lock);
1928                         list_for_each_entry(peer_ni, tmp, ksnp_list) {
1929                                 if (!((id.nid == LNET_NID_ANY ||
1930                                        id.nid == peer_ni->ksnp_id.nid) &&
1931                                       (id.pid == LNET_PID_ANY ||
1932                                        id.pid == peer_ni->ksnp_id.pid)))
1933                                         continue;
1934
1935                                 if (i++ == peer_off) {
1936                                         ksocknal_peer_addref(peer_ni);
1937                                         break;
1938                                 }
1939                         }
1940                         read_unlock(&ksocknal_data.ksnd_global_lock);
1941
1942                         if (i == 0) /* no match */
1943                                 break;
1944
1945                         rc = 0;
1946                         ksocknal_push_peer(peer_ni);
1947                         ksocknal_peer_decref(peer_ni);
1948                 }
1949         }
1950         return rc;
1951 }
1952
1953 static int
1954 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1955 {
1956         struct ksock_net *net = ni->ni_data;
1957         struct ksock_interface *iface;
1958         int rc;
1959         int i;
1960         int j;
1961         struct list_head *ptmp;
1962         struct ksock_peer_ni *peer_ni;
1963         struct list_head *rtmp;
1964         struct ksock_route *route;
1965
1966         if (ipaddress == 0 ||
1967             netmask == 0)
1968                 return -EINVAL;
1969
1970         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1971
1972         iface = ksocknal_ip2iface(ni, ipaddress);
1973         if (iface != NULL) {
1974                 /* silently ignore dups */
1975                 rc = 0;
1976         } else if (net->ksnn_ninterfaces == LNET_INTERFACES_NUM) {
1977                 rc = -ENOSPC;
1978         } else {
1979                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1980
1981                 iface->ksni_ipaddr = ipaddress;
1982                 iface->ksni_netmask = netmask;
1983                 iface->ksni_nroutes = 0;
1984                 iface->ksni_npeers = 0;
1985
1986                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1987                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1988                                 peer_ni = list_entry(ptmp, struct ksock_peer_ni,
1989                                                      ksnp_list);
1990
1991                                 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1992                                         if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1993                                                 iface->ksni_npeers++;
1994
1995                                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1996                                         route = list_entry(rtmp,
1997                                                            struct ksock_route,
1998                                                            ksnr_list);
1999
2000                                         if (route->ksnr_myipaddr == ipaddress)
2001                                                 iface->ksni_nroutes++;
2002                                 }
2003                         }
2004                 }
2005
2006                 rc = 0;
2007                 /* NB only new connections will pay attention to the new interface! */
2008         }
2009
2010         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2011
2012         return rc;
2013 }
2014
2015 static void
2016 ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
2017 {
2018         struct list_head *tmp;
2019         struct list_head *nxt;
2020         struct ksock_route *route;
2021         struct ksock_conn *conn;
2022         int i;
2023         int j;
2024
2025         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2026                 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2027                         for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2028                                 peer_ni->ksnp_passive_ips[j-1] =
2029                                         peer_ni->ksnp_passive_ips[j];
2030                         peer_ni->ksnp_n_passive_ips--;
2031                         break;
2032                 }
2033
2034         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2035                 route = list_entry(tmp, struct ksock_route, ksnr_list);
2036
2037                 if (route->ksnr_myipaddr != ipaddr)
2038                         continue;
2039
2040                 if (route->ksnr_share_count != 0) {
2041                         /* Manually created; keep, but unbind */
2042                         route->ksnr_myipaddr = 0;
2043                 } else {
2044                         ksocknal_del_route_locked(route);
2045                 }
2046         }
2047
2048         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2049                 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2050
2051                 if (conn->ksnc_myipaddr == ipaddr)
2052                         ksocknal_close_conn_locked (conn, 0);
2053         }
2054 }
2055
2056 static int
2057 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2058 {
2059         struct ksock_net *net = ni->ni_data;
2060         int rc = -ENOENT;
2061         struct list_head *tmp;
2062         struct list_head *nxt;
2063         struct ksock_peer_ni *peer_ni;
2064         u32 this_ip;
2065         int i;
2066         int j;
2067
2068         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2069
2070         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2071                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2072
2073                 if (!(ipaddress == 0 ||
2074                       ipaddress == this_ip))
2075                         continue;
2076
2077                 rc = 0;
2078
2079                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2080                         net->ksnn_interfaces[j-1] =
2081                                 net->ksnn_interfaces[j];
2082
2083                 net->ksnn_ninterfaces--;
2084
2085                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2086                         list_for_each_safe(tmp, nxt,
2087                                            &ksocknal_data.ksnd_peers[j]) {
2088                                 peer_ni = list_entry(tmp, struct ksock_peer_ni,
2089                                                      ksnp_list);
2090
2091                                 if (peer_ni->ksnp_ni != ni)
2092                                         continue;
2093
2094                                 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2095                         }
2096                 }
2097         }
2098
2099         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2100
2101         return (rc);
2102 }
2103
2104 int
2105 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2106 {
2107         struct lnet_process_id id = {0};
2108         struct libcfs_ioctl_data *data = arg;
2109         int rc;
2110
2111         switch(cmd) {
2112         case IOC_LIBCFS_GET_INTERFACE: {
2113                 struct ksock_net *net = ni->ni_data;
2114                 struct ksock_interface *iface;
2115
2116                 read_lock(&ksocknal_data.ksnd_global_lock);
2117
2118                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2119                         rc = -ENOENT;
2120                 } else {
2121                         rc = 0;
2122                         iface = &net->ksnn_interfaces[data->ioc_count];
2123
2124                         data->ioc_u32[0] = iface->ksni_ipaddr;
2125                         data->ioc_u32[1] = iface->ksni_netmask;
2126                         data->ioc_u32[2] = iface->ksni_npeers;
2127                         data->ioc_u32[3] = iface->ksni_nroutes;
2128                 }
2129
2130                 read_unlock(&ksocknal_data.ksnd_global_lock);
2131                 return rc;
2132         }
2133
2134         case IOC_LIBCFS_ADD_INTERFACE:
2135                 return ksocknal_add_interface(ni,
2136                                               data->ioc_u32[0], /* IP address */
2137                                               data->ioc_u32[1]); /* net mask */
2138
2139         case IOC_LIBCFS_DEL_INTERFACE:
2140                 return ksocknal_del_interface(ni,
2141                                               data->ioc_u32[0]); /* IP address */
2142
2143         case IOC_LIBCFS_GET_PEER: {
2144                 __u32            myip = 0;
2145                 __u32            ip = 0;
2146                 int              port = 0;
2147                 int              conn_count = 0;
2148                 int              share_count = 0;
2149
2150                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2151                                             &id, &myip, &ip, &port,
2152                                             &conn_count,  &share_count);
2153                 if (rc != 0)
2154                         return rc;
2155
2156                 data->ioc_nid    = id.nid;
2157                 data->ioc_count  = share_count;
2158                 data->ioc_u32[0] = ip;
2159                 data->ioc_u32[1] = port;
2160                 data->ioc_u32[2] = myip;
2161                 data->ioc_u32[3] = conn_count;
2162                 data->ioc_u32[4] = id.pid;
2163                 return 0;
2164         }
2165
2166         case IOC_LIBCFS_ADD_PEER:
2167                 id.nid = data->ioc_nid;
2168                 id.pid = LNET_PID_LUSTRE;
2169                 return ksocknal_add_peer (ni, id,
2170                                           data->ioc_u32[0], /* IP */
2171                                           data->ioc_u32[1]); /* port */
2172
2173         case IOC_LIBCFS_DEL_PEER:
2174                 id.nid = data->ioc_nid;
2175                 id.pid = LNET_PID_ANY;
2176                 return ksocknal_del_peer (ni, id,
2177                                           data->ioc_u32[0]); /* IP */
2178
2179         case IOC_LIBCFS_GET_CONN: {
2180                 int           txmem;
2181                 int           rxmem;
2182                 int           nagle;
2183                 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2184
2185                 if (conn == NULL)
2186                         return -ENOENT;
2187
2188                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2189
2190                 data->ioc_count  = txmem;
2191                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2192                 data->ioc_flags  = nagle;
2193                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2194                 data->ioc_u32[1] = conn->ksnc_port;
2195                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2196                 data->ioc_u32[3] = conn->ksnc_type;
2197                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
2198                 data->ioc_u32[5] = rxmem;
2199                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2200                 ksocknal_conn_decref(conn);
2201                 return 0;
2202         }
2203
2204         case IOC_LIBCFS_CLOSE_CONNECTION:
2205                 id.nid = data->ioc_nid;
2206                 id.pid = LNET_PID_ANY;
2207                 return ksocknal_close_matching_conns (id,
2208                                                       data->ioc_u32[0]);
2209
2210         case IOC_LIBCFS_REGISTER_MYNID:
2211                 /* Ignore if this is a noop */
2212                 if (data->ioc_nid == ni->ni_nid)
2213                         return 0;
2214
2215                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2216                        libcfs_nid2str(data->ioc_nid),
2217                        libcfs_nid2str(ni->ni_nid));
2218                 return -EINVAL;
2219
2220         case IOC_LIBCFS_PUSH_CONNECTION:
2221                 id.nid = data->ioc_nid;
2222                 id.pid = LNET_PID_ANY;
2223                 return ksocknal_push(ni, id);
2224
2225         default:
2226                 return -EINVAL;
2227         }
2228         /* not reached */
2229 }
2230
2231 static void
2232 ksocknal_free_buffers (void)
2233 {
2234         LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2235
2236         if (ksocknal_data.ksnd_schedulers != NULL)
2237                 cfs_percpt_free(ksocknal_data.ksnd_schedulers);
2238
2239         LIBCFS_FREE (ksocknal_data.ksnd_peers,
2240                      sizeof(struct list_head) *
2241                      ksocknal_data.ksnd_peer_hash_size);
2242
2243         spin_lock(&ksocknal_data.ksnd_tx_lock);
2244
2245         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2246                 struct list_head zlist;
2247                 struct ksock_tx *tx;
2248
2249                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2250                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2251                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2252
2253                 while (!list_empty(&zlist)) {
2254                         tx = list_entry(zlist.next, struct ksock_tx, tx_list);
2255                         list_del(&tx->tx_list);
2256                         LIBCFS_FREE(tx, tx->tx_desc_size);
2257                 }
2258         } else {
2259                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2260         }
2261 }
2262
2263 static void
2264 ksocknal_base_shutdown(void)
2265 {
2266         struct ksock_sched *sched;
2267         int i;
2268
2269         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2270                atomic_read (&libcfs_kmemory));
2271         LASSERT (ksocknal_data.ksnd_nnets == 0);
2272
2273         switch (ksocknal_data.ksnd_init) {
2274         default:
2275                 LASSERT (0);
2276
2277         case SOCKNAL_INIT_ALL:
2278         case SOCKNAL_INIT_DATA:
2279                 LASSERT (ksocknal_data.ksnd_peers != NULL);
2280                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2281                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2282                 }
2283
2284                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2285                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2286                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2287                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2288                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2289
2290                 if (ksocknal_data.ksnd_schedulers != NULL) {
2291                         cfs_percpt_for_each(sched, i,
2292                                             ksocknal_data.ksnd_schedulers) {
2293
2294                                 LASSERT(list_empty(&sched->kss_tx_conns));
2295                                 LASSERT(list_empty(&sched->kss_rx_conns));
2296                                 LASSERT(list_empty(&sched->kss_zombie_noop_txs));
2297                                 LASSERT(sched->kss_nconns == 0);
2298                         }
2299                 }
2300
2301                 /* flag threads to terminate; wake and wait for them to die */
2302                 ksocknal_data.ksnd_shuttingdown = 1;
2303                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2304                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2305
2306                 if (ksocknal_data.ksnd_schedulers != NULL) {
2307                         cfs_percpt_for_each(sched, i,
2308                                             ksocknal_data.ksnd_schedulers)
2309                                         wake_up_all(&sched->kss_waitq);
2310                 }
2311
2312                 i = 4;
2313                 read_lock(&ksocknal_data.ksnd_global_lock);
2314                 while (ksocknal_data.ksnd_nthreads != 0) {
2315                         i++;
2316                         /* power of 2? */
2317                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2318                                 "waiting for %d threads to terminate\n",
2319                                 ksocknal_data.ksnd_nthreads);
2320                         read_unlock(&ksocknal_data.ksnd_global_lock);
2321                         set_current_state(TASK_UNINTERRUPTIBLE);
2322                         schedule_timeout(cfs_time_seconds(1));
2323                         read_lock(&ksocknal_data.ksnd_global_lock);
2324                 }
2325                 read_unlock(&ksocknal_data.ksnd_global_lock);
2326
2327                 ksocknal_free_buffers();
2328
2329                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2330                 break;
2331         }
2332
2333         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2334                atomic_read (&libcfs_kmemory));
2335
2336         module_put(THIS_MODULE);
2337 }
2338
2339 static int
2340 ksocknal_base_startup(void)
2341 {
2342         struct ksock_sched *sched;
2343         int rc;
2344         int i;
2345
2346         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2347         LASSERT (ksocknal_data.ksnd_nnets == 0);
2348
2349         memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2350
2351         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2352         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2353                      sizeof(struct list_head) *
2354                      ksocknal_data.ksnd_peer_hash_size);
2355         if (ksocknal_data.ksnd_peers == NULL)
2356                 return -ENOMEM;
2357
2358         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2359                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2360
2361         rwlock_init(&ksocknal_data.ksnd_global_lock);
2362         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2363
2364         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2365         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2366         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2367         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2368         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2369
2370         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2371         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2372         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2373         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2374
2375         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2376         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2377
2378         /* NB memset above zeros whole of ksocknal_data */
2379
2380         /* flag lists/ptrs/locks initialised */
2381         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2382         try_module_get(THIS_MODULE);
2383
2384         /* Create a scheduler block per available CPT */
2385         ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
2386                                                          sizeof(*sched));
2387         if (ksocknal_data.ksnd_schedulers == NULL)
2388                 goto failed;
2389
2390         cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
2391                 int nthrs;
2392
2393                 /*
2394                  * make sure not to allocate more threads than there are
2395                  * cores/CPUs in teh CPT
2396                  */
2397                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2398                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2399                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2400                 } else {
2401                         /*
2402                          * max to half of CPUs, assume another half should be
2403                          * reserved for upper layer modules
2404                          */
2405                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2406                 }
2407
2408                 sched->kss_nthreads_max = nthrs;
2409                 sched->kss_cpt = i;
2410
2411                 spin_lock_init(&sched->kss_lock);
2412                 INIT_LIST_HEAD(&sched->kss_rx_conns);
2413                 INIT_LIST_HEAD(&sched->kss_tx_conns);
2414                 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2415                 init_waitqueue_head(&sched->kss_waitq);
2416         }
2417
2418         ksocknal_data.ksnd_connd_starting         = 0;
2419         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2420         ksocknal_data.ksnd_connd_starting_stamp   = ktime_get_real_seconds();
2421         /* must have at least 2 connds to remain responsive to accepts while
2422          * connecting */
2423         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2424                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2425
2426         if (*ksocknal_tunables.ksnd_nconnds_max <
2427             *ksocknal_tunables.ksnd_nconnds) {
2428                 ksocknal_tunables.ksnd_nconnds_max =
2429                         ksocknal_tunables.ksnd_nconnds;
2430         }
2431
2432         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2433                 char name[16];
2434                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2435                 ksocknal_data.ksnd_connd_starting++;
2436                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2437
2438
2439                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2440                 rc = ksocknal_thread_start(ksocknal_connd,
2441                                            (void *)((uintptr_t)i), name);
2442                 if (rc != 0) {
2443                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2444                         ksocknal_data.ksnd_connd_starting--;
2445                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2446                         CERROR("Can't spawn socknal connd: %d\n", rc);
2447                         goto failed;
2448                 }
2449         }
2450
2451         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2452         if (rc != 0) {
2453                 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2454                 goto failed;
2455         }
2456
2457         /* flag everything initialised */
2458         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2459
2460         return 0;
2461
2462  failed:
2463         ksocknal_base_shutdown();
2464         return -ENETDOWN;
2465 }
2466
2467 static void
2468 ksocknal_debug_peerhash(struct lnet_ni *ni)
2469 {
2470         struct ksock_peer_ni *peer_ni = NULL;
2471         struct list_head *tmp;
2472         int i;
2473
2474         read_lock(&ksocknal_data.ksnd_global_lock);
2475
2476         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2477                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2478                         peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
2479
2480                         if (peer_ni->ksnp_ni == ni) break;
2481
2482                         peer_ni = NULL;
2483                 }
2484         }
2485
2486         if (peer_ni != NULL) {
2487                 struct ksock_route *route;
2488                 struct ksock_conn  *conn;
2489
2490                 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2491                        "closing %d, accepting %d, err %d, zcookie %llu, "
2492                        "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2493                        atomic_read(&peer_ni->ksnp_refcount),
2494                        peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2495                        peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2496                        peer_ni->ksnp_zc_next_cookie,
2497                        !list_empty(&peer_ni->ksnp_tx_queue),
2498                        !list_empty(&peer_ni->ksnp_zc_req_list));
2499
2500                 list_for_each(tmp, &peer_ni->ksnp_routes) {
2501                         route = list_entry(tmp, struct ksock_route, ksnr_list);
2502                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2503                                "del %d\n", atomic_read(&route->ksnr_refcount),
2504                                route->ksnr_scheduled, route->ksnr_connecting,
2505                                route->ksnr_connected, route->ksnr_deleted);
2506                 }
2507
2508                 list_for_each(tmp, &peer_ni->ksnp_conns) {
2509                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2510                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2511                                atomic_read(&conn->ksnc_conn_refcount),
2512                                atomic_read(&conn->ksnc_sock_refcount),
2513                                conn->ksnc_type, conn->ksnc_closing);
2514                 }
2515         }
2516
2517         read_unlock(&ksocknal_data.ksnd_global_lock);
2518         return;
2519 }
2520
2521 void
2522 ksocknal_shutdown(struct lnet_ni *ni)
2523 {
2524         struct ksock_net *net = ni->ni_data;
2525         struct lnet_process_id anyid = {
2526                 .nid = LNET_NID_ANY,
2527                 .pid = LNET_PID_ANY,
2528         };
2529         int i;
2530
2531         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2532         LASSERT(ksocknal_data.ksnd_nnets > 0);
2533
2534         spin_lock_bh(&net->ksnn_lock);
2535         net->ksnn_shutdown = 1;                 /* prevent new peers */
2536         spin_unlock_bh(&net->ksnn_lock);
2537
2538         /* Delete all peers */
2539         ksocknal_del_peer(ni, anyid, 0);
2540
2541         /* Wait for all peer_ni state to clean up */
2542         i = 2;
2543         spin_lock_bh(&net->ksnn_lock);
2544         while (net->ksnn_npeers != 0) {
2545                 spin_unlock_bh(&net->ksnn_lock);
2546
2547                 i++;
2548                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2549                        "waiting for %d peers to disconnect\n",
2550                        net->ksnn_npeers);
2551                 set_current_state(TASK_UNINTERRUPTIBLE);
2552                 schedule_timeout(cfs_time_seconds(1));
2553
2554                 ksocknal_debug_peerhash(ni);
2555
2556                 spin_lock_bh(&net->ksnn_lock);
2557         }
2558         spin_unlock_bh(&net->ksnn_lock);
2559
2560         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2561                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2562                 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2563         }
2564
2565         list_del(&net->ksnn_list);
2566         LIBCFS_FREE(net, sizeof(*net));
2567
2568         ksocknal_data.ksnd_nnets--;
2569         if (ksocknal_data.ksnd_nnets == 0)
2570                 ksocknal_base_shutdown();
2571 }
2572
2573 static int
2574 ksocknal_enumerate_interfaces(struct ksock_net *net, char *iname)
2575 {
2576         struct net_device *dev;
2577
2578         rtnl_lock();
2579         for_each_netdev(&init_net, dev) {
2580                 /* The iname specified by an user land configuration can
2581                  * map to an ifa_label so always treat iname as an ifa_label.
2582                  * If iname is NULL then fall back to the net device name.
2583                  */
2584                 const char *name = iname ? iname : dev->name;
2585                 struct in_device *in_dev;
2586
2587                 if (strcmp(dev->name, "lo") == 0) /* skip the loopback IF */
2588                         continue;
2589
2590                 if (!(dev_get_flags(dev) & IFF_UP)) {
2591                         CWARN("Ignoring interface %s (down)\n", dev->name);
2592                         continue;
2593                 }
2594
2595                 in_dev = __in_dev_get_rtnl(dev);
2596                 if (!in_dev) {
2597                         CWARN("Interface %s has no IPv4 status.\n", dev->name);
2598                         continue;
2599                 }
2600
2601                 for_ifa(in_dev)
2602                         if (strcmp(name, ifa->ifa_label) == 0) {
2603                                 int idx = net->ksnn_ninterfaces;
2604                                 struct ksock_interface *ksi;
2605
2606                                 if (idx >= ARRAY_SIZE(net->ksnn_interfaces)) {
2607                                         rtnl_unlock();
2608                                         return -E2BIG;
2609                                 }
2610
2611                                 ksi = &net->ksnn_interfaces[idx];
2612                                 ksi->ksni_ipaddr = ntohl(ifa->ifa_local);
2613                                 ksi->ksni_netmask = ifa->ifa_mask;
2614                                 strlcpy(ksi->ksni_name,
2615                                         name, sizeof(ksi->ksni_name));
2616                                 net->ksnn_ninterfaces++;
2617                                 break;
2618                         }
2619                 endfor_ifa(in_dev);
2620         }
2621         rtnl_unlock();
2622
2623         if (net->ksnn_ninterfaces == 0)
2624                 CERROR("Can't find any usable interfaces\n");
2625
2626         return net->ksnn_ninterfaces > 0 ? 0 : -ENOENT;
2627 }
2628
2629 static int
2630 ksocknal_search_new_ipif(struct ksock_net *net)
2631 {
2632         int new_ipif = 0;
2633         int i;
2634
2635         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2636                 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2637                 char *colon = strchr(ifnam, ':');
2638                 int found  = 0;
2639                 struct ksock_net *tmp;
2640                 int j;
2641
2642                 if (colon != NULL) /* ignore alias device */
2643                         *colon = 0;
2644
2645                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2646                                         ksnn_list) {
2647                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2648                                 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2649                                              ksni_name[0];
2650                                 char *colon2 = strchr(ifnam2, ':');
2651
2652                                 if (colon2 != NULL)
2653                                         *colon2 = 0;
2654
2655                                 found = strcmp(ifnam, ifnam2) == 0;
2656                                 if (colon2 != NULL)
2657                                         *colon2 = ':';
2658                         }
2659                         if (found)
2660                                 break;
2661                 }
2662
2663                 new_ipif += !found;
2664                 if (colon != NULL)
2665                         *colon = ':';
2666         }
2667
2668         return new_ipif;
2669 }
2670
2671 static int
2672 ksocknal_start_schedulers(struct ksock_sched *sched)
2673 {
2674         int     nthrs;
2675         int     rc = 0;
2676         int     i;
2677
2678         if (sched->kss_nthreads == 0) {
2679                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2680                         nthrs = sched->kss_nthreads_max;
2681                 } else {
2682                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2683                                                sched->kss_cpt);
2684                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2685                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2686                 }
2687                 nthrs = min(nthrs, sched->kss_nthreads_max);
2688         } else {
2689                 LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
2690                 /* increase two threads if there is new interface */
2691                 nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
2692         }
2693
2694         for (i = 0; i < nthrs; i++) {
2695                 long id;
2696                 char name[20];
2697
2698                 id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
2699                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2700                          sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
2701
2702                 rc = ksocknal_thread_start(ksocknal_scheduler,
2703                                            (void *)id, name);
2704                 if (rc == 0)
2705                         continue;
2706
2707                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2708                        sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
2709                 break;
2710         }
2711
2712         sched->kss_nthreads += i;
2713         return rc;
2714 }
2715
2716 static int
2717 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2718 {
2719         int newif = ksocknal_search_new_ipif(net);
2720         int rc;
2721         int i;
2722
2723         if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2724                 return -EINVAL;
2725
2726         for (i = 0; i < ncpts; i++) {
2727                 struct ksock_sched *sched;
2728                 int cpt = (cpts == NULL) ? i : cpts[i];
2729
2730                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2731                 sched = ksocknal_data.ksnd_schedulers[cpt];
2732
2733                 if (!newif && sched->kss_nthreads > 0)
2734                         continue;
2735
2736                 rc = ksocknal_start_schedulers(sched);
2737                 if (rc != 0)
2738                         return rc;
2739         }
2740         return 0;
2741 }
2742
2743 int
2744 ksocknal_startup(struct lnet_ni *ni)
2745 {
2746         struct ksock_net *net;
2747         struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
2748         int rc;
2749         int i;
2750         struct net_device *net_dev;
2751         int node_id;
2752
2753         LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2754
2755         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2756                 rc = ksocknal_base_startup();
2757                 if (rc != 0)
2758                         return rc;
2759         }
2760
2761         LIBCFS_ALLOC(net, sizeof(*net));
2762         if (net == NULL)
2763                 goto fail_0;
2764
2765         spin_lock_init(&net->ksnn_lock);
2766         net->ksnn_incarnation = ktime_get_real_ns();
2767         ni->ni_data = net;
2768         net_tunables = &ni->ni_net->net_tunables;
2769
2770         if (net_tunables->lct_peer_timeout == -1)
2771                 net_tunables->lct_peer_timeout =
2772                         *ksocknal_tunables.ksnd_peertimeout;
2773
2774         if (net_tunables->lct_max_tx_credits == -1)
2775                 net_tunables->lct_max_tx_credits =
2776                         *ksocknal_tunables.ksnd_credits;
2777
2778         if (net_tunables->lct_peer_tx_credits == -1)
2779                 net_tunables->lct_peer_tx_credits =
2780                         *ksocknal_tunables.ksnd_peertxcredits;
2781
2782         if (net_tunables->lct_peer_tx_credits >
2783             net_tunables->lct_max_tx_credits)
2784                 net_tunables->lct_peer_tx_credits =
2785                         net_tunables->lct_max_tx_credits;
2786
2787         if (net_tunables->lct_peer_rtr_credits == -1)
2788                 net_tunables->lct_peer_rtr_credits =
2789                         *ksocknal_tunables.ksnd_peerrtrcredits;
2790
2791         if (!ni->ni_interfaces[0]) {
2792                 rc = ksocknal_enumerate_interfaces(net, NULL);
2793                 if (rc < 0)
2794                         goto fail_1;
2795         } else {
2796                 /* Before Multi-Rail ksocklnd would manage
2797                  * multiple interfaces with its own tcp bonding.
2798                  * If we encounter an old configuration using
2799                  * this tcp bonding approach then we need to
2800                  * handle more than one ni_interfaces.
2801                  *
2802                  * In Multi-Rail configuration only ONE ni_interface
2803                  * should exist. Each IP alias should be mapped to
2804                  * each 'struct net_ni'.
2805                  */
2806                 for (i = 0; i < LNET_INTERFACES_NUM; i++) {
2807                         int j;
2808
2809                         if (!ni->ni_interfaces[i])
2810                                 break;
2811
2812                         for (j = 0; j < net->ksnn_ninterfaces;  j++) {
2813                                 struct ksock_interface *ksi;
2814
2815                                 ksi = &net->ksnn_interfaces[j];
2816
2817                                 if (strcmp(ni->ni_interfaces[i],
2818                                            ksi->ksni_name) == 0) {
2819                                         CERROR("found duplicate %s\n",
2820                                                ksi->ksni_name);
2821                                         rc = -EEXIST;
2822                                         goto fail_1;
2823                                 }
2824                         }
2825
2826                         rc = ksocknal_enumerate_interfaces(net, ni->ni_interfaces[i]);
2827                         if (rc < 0)
2828                                 goto fail_1;
2829                 }
2830         }
2831
2832         net_dev = dev_get_by_name(&init_net,
2833                                   net->ksnn_interfaces[0].ksni_name);
2834         if (net_dev != NULL) {
2835                 node_id = dev_to_node(&net_dev->dev);
2836                 ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
2837                 dev_put(net_dev);
2838         } else {
2839                 ni->ni_dev_cpt = CFS_CPT_ANY;
2840         }
2841
2842         /* call it before add it to ksocknal_data.ksnd_nets */
2843         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2844         if (rc != 0)
2845                 goto fail_1;
2846
2847         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2848                                 net->ksnn_interfaces[0].ksni_ipaddr);
2849         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2850
2851         ksocknal_data.ksnd_nnets++;
2852
2853         return 0;
2854
2855  fail_1:
2856         LIBCFS_FREE(net, sizeof(*net));
2857  fail_0:
2858         if (ksocknal_data.ksnd_nnets == 0)
2859                 ksocknal_base_shutdown();
2860
2861         return -ENETDOWN;
2862 }
2863
2864
2865 static void __exit ksocklnd_exit(void)
2866 {
2867         lnet_unregister_lnd(&the_ksocklnd);
2868 }
2869
2870 static int __init ksocklnd_init(void)
2871 {
2872         int rc;
2873
2874         /* check ksnr_connected/connecting field large enough */
2875         CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2876         CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2877
2878         /* initialize the_ksocklnd */
2879         the_ksocklnd.lnd_type     = SOCKLND;
2880         the_ksocklnd.lnd_startup  = ksocknal_startup;
2881         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2882         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2883         the_ksocklnd.lnd_send     = ksocknal_send;
2884         the_ksocklnd.lnd_recv     = ksocknal_recv;
2885         the_ksocklnd.lnd_notify   = ksocknal_notify;
2886         the_ksocklnd.lnd_query    = ksocknal_query;
2887         the_ksocklnd.lnd_accept   = ksocknal_accept;
2888
2889         rc = ksocknal_tunables_init();
2890         if (rc != 0)
2891                 return rc;
2892
2893         lnet_register_lnd(&the_ksocklnd);
2894
2895         return 0;
2896 }
2897
2898 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2899 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2900 MODULE_VERSION("2.8.0");
2901 MODULE_LICENSE("GPL");
2902
2903 module_init(ksocklnd_init);
2904 module_exit(ksocklnd_exit);