Whamcloud - gitweb
LU-7734 lnet: Multi-Rail local NI split
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/socklnd/socklnd.c
33  *
34  * Author: Zach Brown <zab@zabbo.net>
35  * Author: Peter J. Braam <braam@clusterfs.com>
36  * Author: Phil Schwan <phil@clusterfs.com>
37  * Author: Eric Barton <eric@bartonsoftware.com>
38  */
39
40 #include "socklnd.h"
41
42 static lnd_t                   the_ksocklnd;
43 ksock_nal_data_t        ksocknal_data;
44
45 static ksock_interface_t *
46 ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
47 {
48         ksock_net_t       *net = ni->ni_data;
49         int                i;
50         ksock_interface_t *iface;
51
52         for (i = 0; i < net->ksnn_ninterfaces; i++) {
53                 LASSERT(i < LNET_MAX_INTERFACES);
54                 iface = &net->ksnn_interfaces[i];
55
56                 if (iface->ksni_ipaddr == ip)
57                         return (iface);
58         }
59
60         return (NULL);
61 }
62
63 static ksock_route_t *
64 ksocknal_create_route (__u32 ipaddr, int port)
65 {
66         ksock_route_t *route;
67
68         LIBCFS_ALLOC (route, sizeof (*route));
69         if (route == NULL)
70                 return (NULL);
71
72         atomic_set (&route->ksnr_refcount, 1);
73         route->ksnr_peer = NULL;
74         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
75         route->ksnr_ipaddr = ipaddr;
76         route->ksnr_port = port;
77         route->ksnr_scheduled = 0;
78         route->ksnr_connecting = 0;
79         route->ksnr_connected = 0;
80         route->ksnr_deleted = 0;
81         route->ksnr_conn_count = 0;
82         route->ksnr_share_count = 0;
83
84         return (route);
85 }
86
87 void
88 ksocknal_destroy_route (ksock_route_t *route)
89 {
90         LASSERT (atomic_read(&route->ksnr_refcount) == 0);
91
92         if (route->ksnr_peer != NULL)
93                 ksocknal_peer_decref(route->ksnr_peer);
94
95         LIBCFS_FREE (route, sizeof (*route));
96 }
97
98 static int
99 ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
100 {
101         int             cpt = lnet_cpt_of_nid(id.nid, ni);
102         ksock_net_t     *net = ni->ni_data;
103         ksock_peer_t    *peer;
104
105         LASSERT(id.nid != LNET_NID_ANY);
106         LASSERT(id.pid != LNET_PID_ANY);
107         LASSERT(!in_interrupt());
108
109         LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
110         if (peer == NULL)
111                 return -ENOMEM;
112
113         peer->ksnp_ni = ni;
114         peer->ksnp_id = id;
115         atomic_set(&peer->ksnp_refcount, 1);    /* 1 ref for caller */
116         peer->ksnp_closing = 0;
117         peer->ksnp_accepting = 0;
118         peer->ksnp_proto = NULL;
119         peer->ksnp_last_alive = 0;
120         peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
121
122         INIT_LIST_HEAD(&peer->ksnp_conns);
123         INIT_LIST_HEAD(&peer->ksnp_routes);
124         INIT_LIST_HEAD(&peer->ksnp_tx_queue);
125         INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
126         spin_lock_init(&peer->ksnp_lock);
127
128         spin_lock_bh(&net->ksnn_lock);
129
130         if (net->ksnn_shutdown) {
131                 spin_unlock_bh(&net->ksnn_lock);
132
133                 LIBCFS_FREE(peer, sizeof(*peer));
134                 CERROR("Can't create peer: network shutdown\n");
135                 return -ESHUTDOWN;
136         }
137
138         net->ksnn_npeers++;
139
140         spin_unlock_bh(&net->ksnn_lock);
141
142         *peerp = peer;
143         return 0;
144 }
145
146 void
147 ksocknal_destroy_peer (ksock_peer_t *peer)
148 {
149         ksock_net_t    *net = peer->ksnp_ni->ni_data;
150
151         CDEBUG (D_NET, "peer %s %p deleted\n",
152                 libcfs_id2str(peer->ksnp_id), peer);
153
154         LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
155         LASSERT(peer->ksnp_accepting == 0);
156         LASSERT(list_empty(&peer->ksnp_conns));
157         LASSERT(list_empty(&peer->ksnp_routes));
158         LASSERT(list_empty(&peer->ksnp_tx_queue));
159         LASSERT(list_empty(&peer->ksnp_zc_req_list));
160
161         LIBCFS_FREE(peer, sizeof(*peer));
162
163         /* NB a peer's connections and routes keep a reference on their peer
164          * until they are destroyed, so we can be assured that _all_ state to
165          * do with this peer has been cleaned up when its refcount drops to
166          * zero. */
167         spin_lock_bh(&net->ksnn_lock);
168         net->ksnn_npeers--;
169         spin_unlock_bh(&net->ksnn_lock);
170 }
171
172 ksock_peer_t *
173 ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
174 {
175         struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
176         struct list_head *tmp;
177         ksock_peer_t     *peer;
178
179         list_for_each(tmp, peer_list) {
180
181                 peer = list_entry(tmp, ksock_peer_t, ksnp_list);
182
183                 LASSERT(!peer->ksnp_closing);
184
185                 if (peer->ksnp_ni != ni)
186                         continue;
187
188                 if (peer->ksnp_id.nid != id.nid ||
189                     peer->ksnp_id.pid != id.pid)
190                         continue;
191
192                 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
193                        peer, libcfs_id2str(id),
194                        atomic_read(&peer->ksnp_refcount));
195                 return peer;
196         }
197         return NULL;
198 }
199
200 ksock_peer_t *
201 ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
202 {
203         ksock_peer_t     *peer;
204
205         read_lock(&ksocknal_data.ksnd_global_lock);
206         peer = ksocknal_find_peer_locked(ni, id);
207         if (peer != NULL)                       /* +1 ref for caller? */
208                 ksocknal_peer_addref(peer);
209         read_unlock(&ksocknal_data.ksnd_global_lock);
210
211         return (peer);
212 }
213
214 static void
215 ksocknal_unlink_peer_locked (ksock_peer_t *peer)
216 {
217         int                i;
218         __u32              ip;
219         ksock_interface_t *iface;
220
221         for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
222                 LASSERT (i < LNET_MAX_INTERFACES);
223                 ip = peer->ksnp_passive_ips[i];
224
225                 iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
226                 /* All IPs in peer->ksnp_passive_ips[] come from the
227                  * interface list, therefore the call must succeed. */
228                 LASSERT (iface != NULL);
229
230                 CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
231                        peer, iface, iface->ksni_nroutes);
232                 iface->ksni_npeers--;
233         }
234
235         LASSERT(list_empty(&peer->ksnp_conns));
236         LASSERT(list_empty(&peer->ksnp_routes));
237         LASSERT(!peer->ksnp_closing);
238         peer->ksnp_closing = 1;
239         list_del(&peer->ksnp_list);
240         /* lose peerlist's ref */
241         ksocknal_peer_decref(peer);
242 }
243
244 static int
245 ksocknal_get_peer_info (lnet_ni_t *ni, int index,
246                         lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
247                         int *port, int *conn_count, int *share_count)
248 {
249         ksock_peer_t      *peer;
250         struct list_head  *ptmp;
251         ksock_route_t     *route;
252         struct list_head  *rtmp;
253         int                i;
254         int                j;
255         int                rc = -ENOENT;
256
257         read_lock(&ksocknal_data.ksnd_global_lock);
258
259         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
260                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
261                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
262
263                         if (peer->ksnp_ni != ni)
264                                 continue;
265
266                         if (peer->ksnp_n_passive_ips == 0 &&
267                             list_empty(&peer->ksnp_routes)) {
268                                 if (index-- > 0)
269                                         continue;
270
271                                 *id = peer->ksnp_id;
272                                 *myip = 0;
273                                 *peer_ip = 0;
274                                 *port = 0;
275                                 *conn_count = 0;
276                                 *share_count = 0;
277                                 rc = 0;
278                                 goto out;
279                         }
280
281                         for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
282                                 if (index-- > 0)
283                                         continue;
284
285                                 *id = peer->ksnp_id;
286                                 *myip = peer->ksnp_passive_ips[j];
287                                 *peer_ip = 0;
288                                 *port = 0;
289                                 *conn_count = 0;
290                                 *share_count = 0;
291                                 rc = 0;
292                                 goto out;
293                         }
294
295                         list_for_each(rtmp, &peer->ksnp_routes) {
296                                 if (index-- > 0)
297                                         continue;
298
299                                 route = list_entry(rtmp, ksock_route_t,
300                                                    ksnr_list);
301
302                                 *id = peer->ksnp_id;
303                                 *myip = route->ksnr_myipaddr;
304                                 *peer_ip = route->ksnr_ipaddr;
305                                 *port = route->ksnr_port;
306                                 *conn_count = route->ksnr_conn_count;
307                                 *share_count = route->ksnr_share_count;
308                                 rc = 0;
309                                 goto out;
310                         }
311                 }
312         }
313 out:
314         read_unlock(&ksocknal_data.ksnd_global_lock);
315         return rc;
316 }
317
318 static void
319 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
320 {
321         ksock_peer_t      *peer = route->ksnr_peer;
322         int                type = conn->ksnc_type;
323         ksock_interface_t *iface;
324
325         conn->ksnc_route = route;
326         ksocknal_route_addref(route);
327
328         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
329                 if (route->ksnr_myipaddr == 0) {
330                         /* route wasn't bound locally yet (the initial route) */
331                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
332                                libcfs_id2str(peer->ksnp_id),
333                                &route->ksnr_ipaddr,
334                                &conn->ksnc_myipaddr);
335                 } else {
336                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
337                                "to %pI4h\n", libcfs_id2str(peer->ksnp_id),
338                                &route->ksnr_ipaddr,
339                                &route->ksnr_myipaddr,
340                                &conn->ksnc_myipaddr);
341
342                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
343                                                   route->ksnr_myipaddr);
344                         if (iface != NULL)
345                                 iface->ksni_nroutes--;
346                 }
347                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
348                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
349                                           route->ksnr_myipaddr);
350                 if (iface != NULL)
351                         iface->ksni_nroutes++;
352         }
353
354         route->ksnr_connected |= (1<<type);
355         route->ksnr_conn_count++;
356
357         /* Successful connection => further attempts can
358          * proceed immediately */
359         route->ksnr_retry_interval = 0;
360 }
361
362 static void
363 ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
364 {
365         struct list_head *tmp;
366         ksock_conn_t     *conn;
367         ksock_route_t    *route2;
368
369         LASSERT(!peer->ksnp_closing);
370         LASSERT(route->ksnr_peer == NULL);
371         LASSERT(!route->ksnr_scheduled);
372         LASSERT(!route->ksnr_connecting);
373         LASSERT(route->ksnr_connected == 0);
374
375         /* LASSERT(unique) */
376         list_for_each(tmp, &peer->ksnp_routes) {
377                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
378
379                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
380                         CERROR("Duplicate route %s %pI4h\n",
381                                libcfs_id2str(peer->ksnp_id),
382                                &route->ksnr_ipaddr);
383                         LBUG();
384                 }
385         }
386
387         route->ksnr_peer = peer;
388         ksocknal_peer_addref(peer);
389         /* peer's routelist takes over my ref on 'route' */
390         list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
391
392         list_for_each(tmp, &peer->ksnp_conns) {
393                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
394
395                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
396                         continue;
397
398                 ksocknal_associate_route_conn_locked(route, conn);
399                 /* keep going (typed routes) */
400         }
401 }
402
403 static void
404 ksocknal_del_route_locked (ksock_route_t *route)
405 {
406         ksock_peer_t      *peer = route->ksnr_peer;
407         ksock_interface_t *iface;
408         ksock_conn_t      *conn;
409         struct list_head  *ctmp;
410         struct list_head  *cnxt;
411
412         LASSERT(!route->ksnr_deleted);
413
414         /* Close associated conns */
415         list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
416                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
417
418                 if (conn->ksnc_route != route)
419                         continue;
420
421                 ksocknal_close_conn_locked(conn, 0);
422         }
423
424         if (route->ksnr_myipaddr != 0) {
425                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
426                                           route->ksnr_myipaddr);
427                 if (iface != NULL)
428                         iface->ksni_nroutes--;
429         }
430
431         route->ksnr_deleted = 1;
432         list_del(&route->ksnr_list);
433         ksocknal_route_decref(route);           /* drop peer's ref */
434
435         if (list_empty(&peer->ksnp_routes) &&
436             list_empty(&peer->ksnp_conns)) {
437                 /* I've just removed the last route to a peer with no active
438                  * connections */
439                 ksocknal_unlink_peer_locked(peer);
440         }
441 }
442
443 int
444 ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
445 {
446         struct list_head *tmp;
447         ksock_peer_t     *peer;
448         ksock_peer_t     *peer2;
449         ksock_route_t    *route;
450         ksock_route_t    *route2;
451         int               rc;
452
453         if (id.nid == LNET_NID_ANY ||
454             id.pid == LNET_PID_ANY)
455                 return (-EINVAL);
456
457         /* Have a brand new peer ready... */
458         rc = ksocknal_create_peer(&peer, ni, id);
459         if (rc != 0)
460                 return rc;
461
462         route = ksocknal_create_route (ipaddr, port);
463         if (route == NULL) {
464                 ksocknal_peer_decref(peer);
465                 return (-ENOMEM);
466         }
467
468         write_lock_bh(&ksocknal_data.ksnd_global_lock);
469
470         /* always called with a ref on ni, so shutdown can't have started */
471         LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
472
473         peer2 = ksocknal_find_peer_locked(ni, id);
474         if (peer2 != NULL) {
475                 ksocknal_peer_decref(peer);
476                 peer = peer2;
477         } else {
478                 /* peer table takes my ref on peer */
479                 list_add_tail(&peer->ksnp_list,
480                               ksocknal_nid2peerlist(id.nid));
481         }
482
483         route2 = NULL;
484         list_for_each(tmp, &peer->ksnp_routes) {
485                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
486
487                 if (route2->ksnr_ipaddr == ipaddr)
488                         break;
489
490                 route2 = NULL;
491         }
492         if (route2 == NULL) {
493                 ksocknal_add_route_locked(peer, route);
494                 route->ksnr_share_count++;
495         } else {
496                 ksocknal_route_decref(route);
497                 route2->ksnr_share_count++;
498         }
499
500         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
501
502         return 0;
503 }
504
505 static void
506 ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
507 {
508         ksock_conn_t     *conn;
509         ksock_route_t    *route;
510         struct list_head *tmp;
511         struct list_head *nxt;
512         int               nshared;
513
514         LASSERT(!peer->ksnp_closing);
515
516         /* Extra ref prevents peer disappearing until I'm done with it */
517         ksocknal_peer_addref(peer);
518
519         list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
520                 route = list_entry(tmp, ksock_route_t, ksnr_list);
521
522                 /* no match */
523                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
524                         continue;
525
526                 route->ksnr_share_count = 0;
527                 /* This deletes associated conns too */
528                 ksocknal_del_route_locked(route);
529         }
530
531         nshared = 0;
532         list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
533                 route = list_entry(tmp, ksock_route_t, ksnr_list);
534                 nshared += route->ksnr_share_count;
535         }
536
537         if (nshared == 0) {
538                 /* remove everything else if there are no explicit entries
539                  * left */
540
541                 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
542                         route = list_entry(tmp, ksock_route_t, ksnr_list);
543
544                         /* we should only be removing auto-entries */
545                         LASSERT(route->ksnr_share_count == 0);
546                         ksocknal_del_route_locked(route);
547                 }
548
549                 list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
550                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
551
552                         ksocknal_close_conn_locked(conn, 0);
553                 }
554         }
555
556         ksocknal_peer_decref(peer);
557                 /* NB peer unlinks itself when last conn/route is removed */
558 }
559
560 static int
561 ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
562 {
563         struct list_head  zombies = LIST_HEAD_INIT(zombies);
564         struct list_head *ptmp;
565         struct list_head *pnxt;
566         ksock_peer_t     *peer;
567         int               lo;
568         int               hi;
569         int               i;
570         int               rc = -ENOENT;
571
572         write_lock_bh(&ksocknal_data.ksnd_global_lock);
573
574         if (id.nid != LNET_NID_ANY) {
575                 hi = (int)(ksocknal_nid2peerlist(id.nid) -
576                            ksocknal_data.ksnd_peers);
577                 lo = hi;
578         } else {
579                 lo = 0;
580                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
581         }
582
583         for (i = lo; i <= hi; i++) {
584                 list_for_each_safe(ptmp, pnxt,
585                                    &ksocknal_data.ksnd_peers[i]) {
586                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
587
588                         if (peer->ksnp_ni != ni)
589                                 continue;
590
591                         if (!((id.nid == LNET_NID_ANY ||
592                                peer->ksnp_id.nid == id.nid) &&
593                               (id.pid == LNET_PID_ANY ||
594                                peer->ksnp_id.pid == id.pid)))
595                                 continue;
596
597                         ksocknal_peer_addref(peer);     /* a ref for me... */
598
599                         ksocknal_del_peer_locked(peer, ip);
600
601                         if (peer->ksnp_closing &&
602                             !list_empty(&peer->ksnp_tx_queue)) {
603                                 LASSERT(list_empty(&peer->ksnp_conns));
604                                 LASSERT(list_empty(&peer->ksnp_routes));
605
606                                 list_splice_init(&peer->ksnp_tx_queue,
607                                                  &zombies);
608                         }
609
610                         ksocknal_peer_decref(peer);     /* ...till here */
611
612                         rc = 0;                         /* matched! */
613                 }
614         }
615
616         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
617
618         ksocknal_txlist_done(ni, &zombies, 1);
619
620         return rc;
621 }
622
623 static ksock_conn_t *
624 ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
625 {
626         ksock_peer_t     *peer;
627         struct list_head *ptmp;
628         ksock_conn_t     *conn;
629         struct list_head *ctmp;
630         int               i;
631
632         read_lock(&ksocknal_data.ksnd_global_lock);
633
634         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
635                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
636                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
637
638                         LASSERT(!peer->ksnp_closing);
639
640                         if (peer->ksnp_ni != ni)
641                                 continue;
642
643                         list_for_each(ctmp, &peer->ksnp_conns) {
644                                 if (index-- > 0)
645                                         continue;
646
647                                 conn = list_entry(ctmp, ksock_conn_t,
648                                                   ksnc_list);
649                                 ksocknal_conn_addref(conn);
650                                 read_unlock(&ksocknal_data. \
651                                             ksnd_global_lock);
652                                 return conn;
653                         }
654                 }
655         }
656
657         read_unlock(&ksocknal_data.ksnd_global_lock);
658         return NULL;
659 }
660
661 static ksock_sched_t *
662 ksocknal_choose_scheduler_locked(unsigned int cpt)
663 {
664         struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
665         ksock_sched_t           *sched;
666         int                     i;
667
668         LASSERT(info->ksi_nthreads > 0);
669
670         sched = &info->ksi_scheds[0];
671         /*
672          * NB: it's safe so far, but info->ksi_nthreads could be changed
673          * at runtime when we have dynamic LNet configuration, then we
674          * need to take care of this.
675          */
676         for (i = 1; i < info->ksi_nthreads; i++) {
677                 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
678                         sched = &info->ksi_scheds[i];
679         }
680
681         return sched;
682 }
683
684 static int
685 ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
686 {
687         ksock_net_t       *net = ni->ni_data;
688         int                i;
689         int                nip;
690
691         read_lock(&ksocknal_data.ksnd_global_lock);
692
693         nip = net->ksnn_ninterfaces;
694         LASSERT (nip <= LNET_MAX_INTERFACES);
695
696         /* Only offer interfaces for additional connections if I have
697          * more than one. */
698         if (nip < 2) {
699                 read_unlock(&ksocknal_data.ksnd_global_lock);
700                 return 0;
701         }
702
703         for (i = 0; i < nip; i++) {
704                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
705                 LASSERT (ipaddrs[i] != 0);
706         }
707
708         read_unlock(&ksocknal_data.ksnd_global_lock);
709         return (nip);
710 }
711
712 static int
713 ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
714 {
715         int   best_netmatch = 0;
716         int   best_xor      = 0;
717         int   best          = -1;
718         int   this_xor;
719         int   this_netmatch;
720         int   i;
721
722         for (i = 0; i < nips; i++) {
723                 if (ips[i] == 0)
724                         continue;
725
726                 this_xor = (ips[i] ^ iface->ksni_ipaddr);
727                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
728
729                 if (!(best < 0 ||
730                       best_netmatch < this_netmatch ||
731                       (best_netmatch == this_netmatch &&
732                        best_xor > this_xor)))
733                         continue;
734
735                 best = i;
736                 best_netmatch = this_netmatch;
737                 best_xor = this_xor;
738         }
739
740         LASSERT (best >= 0);
741         return (best);
742 }
743
744 static int
745 ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
746 {
747         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
748         ksock_net_t        *net = peer->ksnp_ni->ni_data;
749         ksock_interface_t  *iface;
750         ksock_interface_t  *best_iface;
751         int                 n_ips;
752         int                 i;
753         int                 j;
754         int                 k;
755         __u32               ip;
756         __u32               xor;
757         int                 this_netmatch;
758         int                 best_netmatch;
759         int                 best_npeers;
760
761         /* CAVEAT EMPTOR: We do all our interface matching with an
762          * exclusive hold of global lock at IRQ priority.  We're only
763          * expecting to be dealing with small numbers of interfaces, so the
764          * O(n**3)-ness shouldn't matter */
765
766         /* Also note that I'm not going to return more than n_peerips
767          * interfaces, even if I have more myself */
768
769         write_lock_bh(global_lock);
770
771         LASSERT (n_peerips <= LNET_MAX_INTERFACES);
772         LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
773
774         /* Only match interfaces for additional connections
775          * if I have > 1 interface */
776         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
777                 MIN(n_peerips, net->ksnn_ninterfaces);
778
779         for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
780                 /*              ^ yes really... */
781
782                 /* If we have any new interfaces, first tick off all the
783                  * peer IPs that match old interfaces, then choose new
784                  * interfaces to match the remaining peer IPS.
785                  * We don't forget interfaces we've stopped using; we might
786                  * start using them again... */
787
788                 if (i < peer->ksnp_n_passive_ips) {
789                         /* Old interface. */
790                         ip = peer->ksnp_passive_ips[i];
791                         best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
792
793                         /* peer passive ips are kept up to date */
794                         LASSERT(best_iface != NULL);
795                 } else {
796                         /* choose a new interface */
797                         LASSERT (i == peer->ksnp_n_passive_ips);
798
799                         best_iface = NULL;
800                         best_netmatch = 0;
801                         best_npeers = 0;
802
803                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
804                                 iface = &net->ksnn_interfaces[j];
805                                 ip = iface->ksni_ipaddr;
806
807                                 for (k = 0; k < peer->ksnp_n_passive_ips; k++)
808                                         if (peer->ksnp_passive_ips[k] == ip)
809                                                 break;
810
811                                 if (k < peer->ksnp_n_passive_ips) /* using it already */
812                                         continue;
813
814                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
815                                 xor = (ip ^ peerips[k]);
816                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
817
818                                 if (!(best_iface == NULL ||
819                                       best_netmatch < this_netmatch ||
820                                       (best_netmatch == this_netmatch &&
821                                        best_npeers > iface->ksni_npeers)))
822                                         continue;
823
824                                 best_iface = iface;
825                                 best_netmatch = this_netmatch;
826                                 best_npeers = iface->ksni_npeers;
827                         }
828
829                         LASSERT(best_iface != NULL);
830
831                         best_iface->ksni_npeers++;
832                         ip = best_iface->ksni_ipaddr;
833                         peer->ksnp_passive_ips[i] = ip;
834                         peer->ksnp_n_passive_ips = i+1;
835                 }
836
837                 /* mark the best matching peer IP used */
838                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
839                 peerips[j] = 0;
840         }
841
842         /* Overwrite input peer IP addresses */
843         memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
844
845         write_unlock_bh(global_lock);
846
847         return (n_ips);
848 }
849
850 static void
851 ksocknal_create_routes(ksock_peer_t *peer, int port,
852                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
853 {
854         ksock_route_t           *newroute = NULL;
855         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
856         lnet_ni_t               *ni = peer->ksnp_ni;
857         ksock_net_t             *net = ni->ni_data;
858         struct list_head        *rtmp;
859         ksock_route_t           *route;
860         ksock_interface_t       *iface;
861         ksock_interface_t       *best_iface;
862         int                     best_netmatch;
863         int                     this_netmatch;
864         int                     best_nroutes;
865         int                     i;
866         int                     j;
867
868         /* CAVEAT EMPTOR: We do all our interface matching with an
869          * exclusive hold of global lock at IRQ priority.  We're only
870          * expecting to be dealing with small numbers of interfaces, so the
871          * O(n**3)-ness here shouldn't matter */
872
873         write_lock_bh(global_lock);
874
875         if (net->ksnn_ninterfaces < 2) {
876                 /* Only create additional connections
877                  * if I have > 1 interface */
878                 write_unlock_bh(global_lock);
879                 return;
880         }
881
882         LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
883
884         for (i = 0; i < npeer_ipaddrs; i++) {
885                 if (newroute != NULL) {
886                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
887                 } else {
888                         write_unlock_bh(global_lock);
889
890                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
891                         if (newroute == NULL)
892                                 return;
893
894                         write_lock_bh(global_lock);
895                 }
896
897                 if (peer->ksnp_closing) {
898                         /* peer got closed under me */
899                         break;
900                 }
901
902                 /* Already got a route? */
903                 route = NULL;
904                 list_for_each(rtmp, &peer->ksnp_routes) {
905                         route = list_entry(rtmp, ksock_route_t, ksnr_list);
906
907                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
908                                 break;
909
910                         route = NULL;
911                 }
912                 if (route != NULL)
913                         continue;
914
915                 best_iface = NULL;
916                 best_nroutes = 0;
917                 best_netmatch = 0;
918
919                 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
920
921                 /* Select interface to connect from */
922                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
923                         iface = &net->ksnn_interfaces[j];
924
925                         /* Using this interface already? */
926                         list_for_each(rtmp, &peer->ksnp_routes) {
927                                 route = list_entry(rtmp, ksock_route_t,
928                                                    ksnr_list);
929
930                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
931                                         break;
932
933                                 route = NULL;
934                         }
935                         if (route != NULL)
936                                 continue;
937
938                         this_netmatch = (((iface->ksni_ipaddr ^
939                                            newroute->ksnr_ipaddr) &
940                                            iface->ksni_netmask) == 0) ? 1 : 0;
941
942                         if (!(best_iface == NULL ||
943                               best_netmatch < this_netmatch ||
944                               (best_netmatch == this_netmatch &&
945                                best_nroutes > iface->ksni_nroutes)))
946                                 continue;
947
948                         best_iface = iface;
949                         best_netmatch = this_netmatch;
950                         best_nroutes = iface->ksni_nroutes;
951                 }
952
953                 if (best_iface == NULL)
954                         continue;
955
956                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
957                 best_iface->ksni_nroutes++;
958
959                 ksocknal_add_route_locked(peer, newroute);
960                 newroute = NULL;
961         }
962
963         write_unlock_bh(global_lock);
964         if (newroute != NULL)
965                 ksocknal_route_decref(newroute);
966 }
967
968 int
969 ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
970 {
971         ksock_connreq_t *cr;
972         int              rc;
973         __u32            peer_ip;
974         int              peer_port;
975
976         rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
977         LASSERT(rc == 0);               /* we succeeded before */
978
979         LIBCFS_ALLOC(cr, sizeof(*cr));
980         if (cr == NULL) {
981                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
982                                    "%pI4h: memory exhausted\n", &peer_ip);
983                 return -ENOMEM;
984         }
985
986         lnet_ni_addref(ni);
987         cr->ksncr_ni   = ni;
988         cr->ksncr_sock = sock;
989
990         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
991
992         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
993         wake_up(&ksocknal_data.ksnd_connd_waitq);
994
995         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
996         return 0;
997 }
998
999 static int
1000 ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
1001 {
1002         ksock_route_t *route;
1003
1004         list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
1005                 if (route->ksnr_ipaddr == ipaddr)
1006                         return route->ksnr_connecting;
1007         }
1008         return 0;
1009 }
1010
1011 int
1012 ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
1013                      struct socket *sock, int type)
1014 {
1015         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
1016         struct list_head        zombies = LIST_HEAD_INIT(zombies);
1017         lnet_process_id_t       peerid;
1018         struct list_head        *tmp;
1019         __u64              incarnation;
1020         ksock_conn_t      *conn;
1021         ksock_conn_t      *conn2;
1022         ksock_peer_t      *peer = NULL;
1023         ksock_peer_t      *peer2;
1024         ksock_sched_t     *sched;
1025         struct ksock_hello_msg *hello;
1026         int                cpt;
1027         ksock_tx_t        *tx;
1028         ksock_tx_t        *txtmp;
1029         int                rc;
1030         int                active;
1031         char              *warn = NULL;
1032
1033         active = (route != NULL);
1034
1035         LASSERT (active == (type != SOCKLND_CONN_NONE));
1036
1037         LIBCFS_ALLOC(conn, sizeof(*conn));
1038         if (conn == NULL) {
1039                 rc = -ENOMEM;
1040                 goto failed_0;
1041         }
1042
1043         conn->ksnc_peer = NULL;
1044         conn->ksnc_route = NULL;
1045         conn->ksnc_sock = sock;
1046         /* 2 ref, 1 for conn, another extra ref prevents socket
1047          * being closed before establishment of connection */
1048         atomic_set (&conn->ksnc_sock_refcount, 2);
1049         conn->ksnc_type = type;
1050         ksocknal_lib_save_callback(sock, conn);
1051         atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1052
1053         conn->ksnc_rx_ready = 0;
1054         conn->ksnc_rx_scheduled = 0;
1055
1056         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1057         conn->ksnc_tx_ready = 0;
1058         conn->ksnc_tx_scheduled = 0;
1059         conn->ksnc_tx_carrier = NULL;
1060         atomic_set (&conn->ksnc_tx_nob, 0);
1061
1062         LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1063                                      kshm_ips[LNET_MAX_INTERFACES]));
1064         if (hello == NULL) {
1065                 rc = -ENOMEM;
1066                 goto failed_1;
1067         }
1068
1069         /* stash conn's local and remote addrs */
1070         rc = ksocknal_lib_get_conn_addrs (conn);
1071         if (rc != 0)
1072                 goto failed_1;
1073
1074         /* Find out/confirm peer's NID and connection type and get the
1075          * vector of interfaces she's willing to let me connect to.
1076          * Passive connections use the listener timeout since the peer sends
1077          * eagerly */
1078
1079         if (active) {
1080                 peer = route->ksnr_peer;
1081                 LASSERT(ni == peer->ksnp_ni);
1082
1083                 /* Active connection sends HELLO eagerly */
1084                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1085                 peerid = peer->ksnp_id;
1086
1087                 write_lock_bh(global_lock);
1088                 conn->ksnc_proto = peer->ksnp_proto;
1089                 write_unlock_bh(global_lock);
1090
1091                 if (conn->ksnc_proto == NULL) {
1092                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1093 #if SOCKNAL_VERSION_DEBUG
1094                          if (*ksocknal_tunables.ksnd_protocol == 2)
1095                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1096                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1097                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1098 #endif
1099                 }
1100
1101                 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1102                 if (rc != 0)
1103                         goto failed_1;
1104         } else {
1105                 peerid.nid = LNET_NID_ANY;
1106                 peerid.pid = LNET_PID_ANY;
1107
1108                 /* Passive, get protocol from peer */
1109                 conn->ksnc_proto = NULL;
1110         }
1111
1112         rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1113         if (rc < 0)
1114                 goto failed_1;
1115
1116         LASSERT (rc == 0 || active);
1117         LASSERT (conn->ksnc_proto != NULL);
1118         LASSERT (peerid.nid != LNET_NID_ANY);
1119
1120         cpt = lnet_cpt_of_nid(peerid.nid, ni);
1121
1122         if (active) {
1123                 ksocknal_peer_addref(peer);
1124                 write_lock_bh(global_lock);
1125         } else {
1126                 rc = ksocknal_create_peer(&peer, ni, peerid);
1127                 if (rc != 0)
1128                         goto failed_1;
1129
1130                 write_lock_bh(global_lock);
1131
1132                 /* called with a ref on ni, so shutdown can't have started */
1133                 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1134
1135                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1136                 if (peer2 == NULL) {
1137                         /* NB this puts an "empty" peer in the peer
1138                          * table (which takes my ref) */
1139                         list_add_tail(&peer->ksnp_list,
1140                                       ksocknal_nid2peerlist(peerid.nid));
1141                 } else {
1142                         ksocknal_peer_decref(peer);
1143                         peer = peer2;
1144                 }
1145
1146                 /* +1 ref for me */
1147                 ksocknal_peer_addref(peer);
1148                 peer->ksnp_accepting++;
1149
1150                 /* Am I already connecting to this guy?  Resolve in
1151                  * favour of higher NID... */
1152                 if (peerid.nid < ni->ni_nid &&
1153                     ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
1154                         rc = EALREADY;
1155                         warn = "connection race resolution";
1156                         goto failed_2;
1157                 }
1158         }
1159
1160         if (peer->ksnp_closing ||
1161             (active && route->ksnr_deleted)) {
1162                 /* peer/route got closed under me */
1163                 rc = -ESTALE;
1164                 warn = "peer/route removed";
1165                 goto failed_2;
1166         }
1167
1168         if (peer->ksnp_proto == NULL) {
1169                 /* Never connected before.
1170                  * NB recv_hello may have returned EPROTO to signal my peer
1171                  * wants a different protocol than the one I asked for.
1172                  */
1173                 LASSERT(list_empty(&peer->ksnp_conns));
1174
1175                 peer->ksnp_proto = conn->ksnc_proto;
1176                 peer->ksnp_incarnation = incarnation;
1177         }
1178
1179         if (peer->ksnp_proto != conn->ksnc_proto ||
1180             peer->ksnp_incarnation != incarnation) {
1181                 /* Peer rebooted or I've got the wrong protocol version */
1182                 ksocknal_close_peer_conns_locked(peer, 0, 0);
1183
1184                 peer->ksnp_proto = NULL;
1185                 rc = ESTALE;
1186                 warn = peer->ksnp_incarnation != incarnation ?
1187                        "peer rebooted" :
1188                        "wrong proto version";
1189                 goto failed_2;
1190         }
1191
1192         switch (rc) {
1193         default:
1194                 LBUG();
1195         case 0:
1196                 break;
1197         case EALREADY:
1198                 warn = "lost conn race";
1199                 goto failed_2;
1200         case EPROTO:
1201                 warn = "retry with different protocol version";
1202                 goto failed_2;
1203         }
1204
1205         /* Refuse to duplicate an existing connection, unless this is a
1206          * loopback connection */
1207         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1208                 list_for_each(tmp, &peer->ksnp_conns) {
1209                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1210
1211                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1212                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1213                             conn2->ksnc_type != conn->ksnc_type)
1214                                 continue;
1215
1216                         /* Reply on a passive connection attempt so the peer
1217                          * realises we're connected. */
1218                         LASSERT (rc == 0);
1219                         if (!active)
1220                                 rc = EALREADY;
1221
1222                         warn = "duplicate";
1223                         goto failed_2;
1224                 }
1225         }
1226
1227         /* If the connection created by this route didn't bind to the IP
1228          * address the route connected to, the connection/route matching
1229          * code below probably isn't going to work. */
1230         if (active &&
1231             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1232                 CERROR("Route %s %pI4h connected to %pI4h\n",
1233                        libcfs_id2str(peer->ksnp_id),
1234                        &route->ksnr_ipaddr,
1235                        &conn->ksnc_ipaddr);
1236         }
1237
1238         /* Search for a route corresponding to the new connection and
1239          * create an association.  This allows incoming connections created
1240          * by routes in my peer to match my own route entries so I don't
1241          * continually create duplicate routes. */
1242         list_for_each(tmp, &peer->ksnp_routes) {
1243                 route = list_entry(tmp, ksock_route_t, ksnr_list);
1244
1245                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1246                         continue;
1247
1248                 ksocknal_associate_route_conn_locked(route, conn);
1249                 break;
1250         }
1251
1252         conn->ksnc_peer = peer;                 /* conn takes my ref on peer */
1253         peer->ksnp_last_alive = ktime_get_real_seconds();
1254         peer->ksnp_send_keepalive = 0;
1255         peer->ksnp_error = 0;
1256
1257         sched = ksocknal_choose_scheduler_locked(cpt);
1258         sched->kss_nconns++;
1259         conn->ksnc_scheduler = sched;
1260
1261         conn->ksnc_tx_last_post = ktime_get_real_seconds();
1262         /* Set the deadline for the outgoing HELLO to drain */
1263         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1264         conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1265         smp_mb();   /* order with adding to peer's conn list */
1266
1267         list_add(&conn->ksnc_list, &peer->ksnp_conns);
1268         ksocknal_conn_addref(conn);
1269
1270         ksocknal_new_packet(conn, 0);
1271
1272         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1273
1274         /* Take packets blocking for this connection. */
1275         list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
1276                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1277                     SOCKNAL_MATCH_NO)
1278                         continue;
1279
1280                 list_del(&tx->tx_list);
1281                 ksocknal_queue_tx_locked(tx, conn);
1282         }
1283
1284         write_unlock_bh(global_lock);
1285
1286         /* We've now got a new connection.  Any errors from here on are just
1287          * like "normal" comms errors and we close the connection normally.
1288          * NB (a) we still have to send the reply HELLO for passive
1289          *        connections,
1290          *    (b) normal I/O on the conn is blocked until I setup and call the
1291          *        socket callbacks.
1292          */
1293
1294         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1295                " incarnation:%lld sched[%d:%d]\n",
1296                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1297                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1298                conn->ksnc_port, incarnation, cpt,
1299                (int)(sched - &sched->kss_info->ksi_scheds[0]));
1300
1301         if (active) {
1302                 /* additional routes after interface exchange? */
1303                 ksocknal_create_routes(peer, conn->ksnc_port,
1304                                        hello->kshm_ips, hello->kshm_nips);
1305         } else {
1306                 hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
1307                                                        hello->kshm_nips);
1308                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1309         }
1310
1311         LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1312                                     kshm_ips[LNET_MAX_INTERFACES]));
1313
1314         /* setup the socket AFTER I've received hello (it disables
1315          * SO_LINGER).  I might call back to the acceptor who may want
1316          * to send a protocol version response and then close the
1317          * socket; this ensures the socket only tears down after the
1318          * response has been sent. */
1319         if (rc == 0)
1320                 rc = ksocknal_lib_setup_sock(sock);
1321
1322         write_lock_bh(global_lock);
1323
1324         /* NB my callbacks block while I hold ksnd_global_lock */
1325         ksocknal_lib_set_callback(sock, conn);
1326
1327         if (!active)
1328                 peer->ksnp_accepting--;
1329
1330         write_unlock_bh(global_lock);
1331
1332         if (rc != 0) {
1333                 write_lock_bh(global_lock);
1334                 if (!conn->ksnc_closing) {
1335                         /* could be closed by another thread */
1336                         ksocknal_close_conn_locked(conn, rc);
1337                 }
1338                 write_unlock_bh(global_lock);
1339         } else if (ksocknal_connsock_addref(conn) == 0) {
1340                 /* Allow I/O to proceed. */
1341                 ksocknal_read_callback(conn);
1342                 ksocknal_write_callback(conn);
1343                 ksocknal_connsock_decref(conn);
1344         }
1345
1346         ksocknal_connsock_decref(conn);
1347         ksocknal_conn_decref(conn);
1348         return rc;
1349
1350 failed_2:
1351         if (!peer->ksnp_closing &&
1352             list_empty(&peer->ksnp_conns) &&
1353             list_empty(&peer->ksnp_routes)) {
1354                 list_add(&zombies, &peer->ksnp_tx_queue);
1355                 list_del_init(&peer->ksnp_tx_queue);
1356                 ksocknal_unlink_peer_locked(peer);
1357         }
1358
1359         write_unlock_bh(global_lock);
1360
1361         if (warn != NULL) {
1362                 if (rc < 0)
1363                         CERROR("Not creating conn %s type %d: %s\n",
1364                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1365                 else
1366                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1367                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1368         }
1369
1370         if (!active) {
1371                 if (rc > 0) {
1372                         /* Request retry by replying with CONN_NONE
1373                          * ksnc_proto has been set already */
1374                         conn->ksnc_type = SOCKLND_CONN_NONE;
1375                         hello->kshm_nips = 0;
1376                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1377                 }
1378
1379                 write_lock_bh(global_lock);
1380                 peer->ksnp_accepting--;
1381                 write_unlock_bh(global_lock);
1382         }
1383
1384         ksocknal_txlist_done(ni, &zombies, 1);
1385         ksocknal_peer_decref(peer);
1386
1387 failed_1:
1388         if (hello != NULL)
1389                 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1390                                             kshm_ips[LNET_MAX_INTERFACES]));
1391
1392         LIBCFS_FREE(conn, sizeof(*conn));
1393
1394 failed_0:
1395         sock_release(sock);
1396         return rc;
1397 }
1398
1399 void
1400 ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1401 {
1402         /* This just does the immmediate housekeeping, and queues the
1403          * connection for the reaper to terminate.
1404          * Caller holds ksnd_global_lock exclusively in irq context */
1405         ksock_peer_t      *peer = conn->ksnc_peer;
1406         ksock_route_t     *route;
1407         ksock_conn_t      *conn2;
1408         struct list_head  *tmp;
1409
1410         LASSERT(peer->ksnp_error == 0);
1411         LASSERT(!conn->ksnc_closing);
1412         conn->ksnc_closing = 1;
1413
1414         /* ksnd_deathrow_conns takes over peer's ref */
1415         list_del(&conn->ksnc_list);
1416
1417         route = conn->ksnc_route;
1418         if (route != NULL) {
1419                 /* dissociate conn from route... */
1420                 LASSERT(!route->ksnr_deleted);
1421                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1422
1423                 conn2 = NULL;
1424                 list_for_each(tmp, &peer->ksnp_conns) {
1425                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1426
1427                         if (conn2->ksnc_route == route &&
1428                             conn2->ksnc_type == conn->ksnc_type)
1429                                 break;
1430
1431                         conn2 = NULL;
1432                 }
1433                 if (conn2 == NULL)
1434                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1435
1436                 conn->ksnc_route = NULL;
1437
1438                 ksocknal_route_decref(route);   /* drop conn's ref on route */
1439         }
1440
1441         if (list_empty(&peer->ksnp_conns)) {
1442                 /* No more connections to this peer */
1443
1444                 if (!list_empty(&peer->ksnp_tx_queue)) {
1445                                 ksock_tx_t *tx;
1446
1447                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1448
1449                         /* throw them to the last connection...,
1450                          * these TXs will be send to /dev/null by scheduler */
1451                         list_for_each_entry(tx, &peer->ksnp_tx_queue,
1452                                             tx_list)
1453                                 ksocknal_tx_prep(conn, tx);
1454
1455                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1456                         list_splice_init(&peer->ksnp_tx_queue,
1457                                          &conn->ksnc_tx_queue);
1458                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1459                 }
1460
1461                 /* renegotiate protocol version */
1462                 peer->ksnp_proto = NULL;
1463                 /* stash last conn close reason */
1464                 peer->ksnp_error = error;
1465
1466                 if (list_empty(&peer->ksnp_routes)) {
1467                         /* I've just closed last conn belonging to a
1468                          * peer with no routes to it */
1469                         ksocknal_unlink_peer_locked(peer);
1470                 }
1471         }
1472
1473         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1474
1475         list_add_tail(&conn->ksnc_list,
1476                       &ksocknal_data.ksnd_deathrow_conns);
1477         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1478
1479         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1480 }
1481
1482 void
1483 ksocknal_peer_failed (ksock_peer_t *peer)
1484 {
1485         int        notify = 0;
1486         cfs_time_t last_alive = 0;
1487
1488         /* There has been a connection failure or comms error; but I'll only
1489          * tell LNET I think the peer is dead if it's to another kernel and
1490          * there are no connections or connection attempts in existence. */
1491
1492         read_lock(&ksocknal_data.ksnd_global_lock);
1493
1494         if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1495              list_empty(&peer->ksnp_conns) &&
1496              peer->ksnp_accepting == 0 &&
1497              ksocknal_find_connecting_route_locked(peer) == NULL) {
1498                 notify = 1;
1499                 last_alive = peer->ksnp_last_alive;
1500         }
1501
1502         read_unlock(&ksocknal_data.ksnd_global_lock);
1503
1504         if (notify)
1505                 lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
1506                             last_alive);
1507 }
1508
1509 void
1510 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1511 {
1512         ksock_peer_t     *peer = conn->ksnc_peer;
1513         ksock_tx_t       *tx;
1514         ksock_tx_t       *tmp;
1515         struct list_head  zlist = LIST_HEAD_INIT(zlist);
1516
1517         /* NB safe to finalize TXs because closing of socket will
1518          * abort all buffered data */
1519         LASSERT(conn->ksnc_sock == NULL);
1520
1521         spin_lock(&peer->ksnp_lock);
1522
1523         list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
1524                 if (tx->tx_conn != conn)
1525                         continue;
1526
1527                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1528
1529                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1530                 tx->tx_zc_aborted = 1;  /* mark it as not-acked */
1531                 list_del(&tx->tx_zc_list);
1532                 list_add(&tx->tx_zc_list, &zlist);
1533         }
1534
1535         spin_unlock(&peer->ksnp_lock);
1536
1537         while (!list_empty(&zlist)) {
1538                 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1539
1540                 list_del(&tx->tx_zc_list);
1541                 ksocknal_tx_decref(tx);
1542         }
1543 }
1544
1545 void
1546 ksocknal_terminate_conn(ksock_conn_t *conn)
1547 {
1548         /* This gets called by the reaper (guaranteed thread context) to
1549          * disengage the socket from its callbacks and close it.
1550          * ksnc_refcount will eventually hit zero, and then the reaper will
1551          * destroy it. */
1552         ksock_peer_t     *peer = conn->ksnc_peer;
1553         ksock_sched_t    *sched = conn->ksnc_scheduler;
1554         int               failed = 0;
1555
1556         LASSERT(conn->ksnc_closing);
1557
1558         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1559         spin_lock_bh(&sched->kss_lock);
1560
1561         /* a closing conn is always ready to tx */
1562         conn->ksnc_tx_ready = 1;
1563
1564         if (!conn->ksnc_tx_scheduled &&
1565             !list_empty(&conn->ksnc_tx_queue)) {
1566                 list_add_tail(&conn->ksnc_tx_list,
1567                                &sched->kss_tx_conns);
1568                 conn->ksnc_tx_scheduled = 1;
1569                 /* extra ref for scheduler */
1570                 ksocknal_conn_addref(conn);
1571
1572                 wake_up (&sched->kss_waitq);
1573         }
1574
1575         spin_unlock_bh(&sched->kss_lock);
1576
1577         /* serialise with callbacks */
1578         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1579
1580         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1581
1582         /* OK, so this conn may not be completely disengaged from its
1583          * scheduler yet, but it _has_ committed to terminate... */
1584         conn->ksnc_scheduler->kss_nconns--;
1585
1586         if (peer->ksnp_error != 0) {
1587                 /* peer's last conn closed in error */
1588                 LASSERT(list_empty(&peer->ksnp_conns));
1589                 failed = 1;
1590                 peer->ksnp_error = 0;     /* avoid multiple notifications */
1591         }
1592
1593         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1594
1595         if (failed)
1596                 ksocknal_peer_failed(peer);
1597
1598         /* The socket is closed on the final put; either here, or in
1599          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1600          * when the connection was established, this will close the socket
1601          * immediately, aborting anything buffered in it. Any hung
1602          * zero-copy transmits will therefore complete in finite time. */
1603         ksocknal_connsock_decref(conn);
1604 }
1605
1606 void
1607 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1608 {
1609         /* Queue the conn for the reaper to destroy */
1610
1611         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1612         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1613
1614         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1615         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1616
1617         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1618 }
1619
1620 void
1621 ksocknal_destroy_conn (ksock_conn_t *conn)
1622 {
1623         cfs_time_t      last_rcv;
1624
1625         /* Final coup-de-grace of the reaper */
1626         CDEBUG (D_NET, "connection %p\n", conn);
1627
1628         LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1629         LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1630         LASSERT (conn->ksnc_sock == NULL);
1631         LASSERT (conn->ksnc_route == NULL);
1632         LASSERT (!conn->ksnc_tx_scheduled);
1633         LASSERT (!conn->ksnc_rx_scheduled);
1634         LASSERT(list_empty(&conn->ksnc_tx_queue));
1635
1636         /* complete current receive if any */
1637         switch (conn->ksnc_rx_state) {
1638         case SOCKNAL_RX_LNET_PAYLOAD:
1639                 last_rcv = conn->ksnc_rx_deadline -
1640                            cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1641                 CERROR("Completing partial receive from %s[%d], "
1642                        "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1643                        "last alive is %ld secs ago\n",
1644                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1645                        &conn->ksnc_ipaddr, conn->ksnc_port,
1646                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1647                        cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(),
1648                                         last_rcv)));
1649                 lnet_finalize (conn->ksnc_peer->ksnp_ni,
1650                                conn->ksnc_cookie, -EIO);
1651                 break;
1652         case SOCKNAL_RX_LNET_HEADER:
1653                 if (conn->ksnc_rx_started)
1654                         CERROR("Incomplete receive of lnet header from %s, "
1655                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1656                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1657                                &conn->ksnc_ipaddr, conn->ksnc_port,
1658                                conn->ksnc_proto->pro_version);
1659                 break;
1660         case SOCKNAL_RX_KSM_HEADER:
1661                 if (conn->ksnc_rx_started)
1662                         CERROR("Incomplete receive of ksock message from %s, "
1663                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1664                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1665                                &conn->ksnc_ipaddr, conn->ksnc_port,
1666                                conn->ksnc_proto->pro_version);
1667                 break;
1668         case SOCKNAL_RX_SLOP:
1669                 if (conn->ksnc_rx_started)
1670                         CERROR("Incomplete receive of slops from %s, "
1671                                "ip %pI4h:%d, with error\n",
1672                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1673                                &conn->ksnc_ipaddr, conn->ksnc_port);
1674                break;
1675         default:
1676                 LBUG ();
1677                 break;
1678         }
1679
1680         ksocknal_peer_decref(conn->ksnc_peer);
1681
1682         LIBCFS_FREE (conn, sizeof (*conn));
1683 }
1684
1685 int
1686 ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
1687 {
1688         ksock_conn_t       *conn;
1689         struct list_head         *ctmp;
1690         struct list_head         *cnxt;
1691         int                 count = 0;
1692
1693         list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
1694                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1695
1696                 if (ipaddr == 0 ||
1697                     conn->ksnc_ipaddr == ipaddr) {
1698                         count++;
1699                         ksocknal_close_conn_locked (conn, why);
1700                 }
1701         }
1702
1703         return (count);
1704 }
1705
1706 int
1707 ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1708 {
1709         ksock_peer_t     *peer = conn->ksnc_peer;
1710         __u32             ipaddr = conn->ksnc_ipaddr;
1711         int               count;
1712
1713         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1714
1715         count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
1716
1717         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1718
1719         return (count);
1720 }
1721
1722 int
1723 ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
1724 {
1725         ksock_peer_t       *peer;
1726         struct list_head         *ptmp;
1727         struct list_head         *pnxt;
1728         int                 lo;
1729         int                 hi;
1730         int                 i;
1731         int                 count = 0;
1732
1733         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1734
1735         if (id.nid != LNET_NID_ANY)
1736                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1737         else {
1738                 lo = 0;
1739                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1740         }
1741
1742         for (i = lo; i <= hi; i++) {
1743                 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1744
1745                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
1746
1747                         if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
1748                               (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
1749                                 continue;
1750
1751                         count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0);
1752                 }
1753         }
1754
1755         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1756
1757         /* wildcards always succeed */
1758         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1759                 return (0);
1760
1761         return (count == 0 ? -ENOENT : 0);
1762 }
1763
1764 void
1765 ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
1766 {
1767         /* The router is telling me she's been notified of a change in
1768          * gateway state.... */
1769         lnet_process_id_t  id = {0};
1770
1771         id.nid = gw_nid;
1772         id.pid = LNET_PID_ANY;
1773
1774         CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1775                 alive ? "up" : "down");
1776
1777         if (!alive) {
1778                 /* If the gateway crashed, close all open connections... */
1779                 ksocknal_close_matching_conns (id, 0);
1780                 return;
1781         }
1782
1783         /* ...otherwise do nothing.  We can only establish new connections
1784          * if we have autroutes, and these connect on demand. */
1785 }
1786
1787 void
1788 ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1789 {
1790         int connect = 1;
1791         time64_t last_alive = 0;
1792         time64_t now = ktime_get_real_seconds();
1793         ksock_peer_t *peer = NULL;
1794         rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1795         lnet_process_id_t id = {
1796                 .nid = nid,
1797                 .pid = LNET_PID_LUSTRE,
1798         };
1799
1800         read_lock(glock);
1801
1802         peer = ksocknal_find_peer_locked(ni, id);
1803         if (peer != NULL) {
1804                 struct list_head       *tmp;
1805                 ksock_conn_t     *conn;
1806                 int               bufnob;
1807
1808                 list_for_each(tmp, &peer->ksnp_conns) {
1809                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1810                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1811
1812                         if (bufnob < conn->ksnc_tx_bufnob) {
1813                                 /* something got ACKed */
1814                                 conn->ksnc_tx_deadline =
1815                                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1816                                 peer->ksnp_last_alive = now;
1817                                 conn->ksnc_tx_bufnob = bufnob;
1818                         }
1819                 }
1820
1821                 last_alive = peer->ksnp_last_alive;
1822                 if (ksocknal_find_connectable_route_locked(peer) == NULL)
1823                         connect = 0;
1824         }
1825
1826         read_unlock(glock);
1827
1828         if (last_alive != 0)
1829                 *when = last_alive;
1830
1831         CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
1832                libcfs_nid2str(nid), peer,
1833                last_alive ? cfs_duration_sec(now - last_alive) : -1,
1834                connect);
1835
1836         if (!connect)
1837                 return;
1838
1839         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1840
1841         write_lock_bh(glock);
1842
1843         peer = ksocknal_find_peer_locked(ni, id);
1844         if (peer != NULL)
1845                 ksocknal_launch_all_connections_locked(peer);
1846
1847         write_unlock_bh(glock);
1848         return;
1849 }
1850
1851 static void
1852 ksocknal_push_peer (ksock_peer_t *peer)
1853 {
1854         int               index;
1855         int               i;
1856         struct list_head       *tmp;
1857         ksock_conn_t     *conn;
1858
1859         for (index = 0; ; index++) {
1860                 read_lock(&ksocknal_data.ksnd_global_lock);
1861
1862                 i = 0;
1863                 conn = NULL;
1864
1865                 list_for_each(tmp, &peer->ksnp_conns) {
1866                         if (i++ == index) {
1867                                 conn = list_entry(tmp, ksock_conn_t,
1868                                                        ksnc_list);
1869                                 ksocknal_conn_addref(conn);
1870                                 break;
1871                         }
1872                 }
1873
1874                 read_unlock(&ksocknal_data.ksnd_global_lock);
1875
1876                 if (conn == NULL)
1877                         break;
1878
1879                 ksocknal_lib_push_conn (conn);
1880                 ksocknal_conn_decref(conn);
1881         }
1882 }
1883
1884 static int
1885 ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
1886 {
1887         struct list_head *start;
1888         struct list_head *end;
1889         struct list_head *tmp;
1890         int               rc = -ENOENT;
1891         unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
1892
1893         if (id.nid == LNET_NID_ANY) {
1894                 start = &ksocknal_data.ksnd_peers[0];
1895                 end = &ksocknal_data.ksnd_peers[hsize - 1];
1896         } else {
1897                 start = end = ksocknal_nid2peerlist(id.nid);
1898         }
1899
1900         for (tmp = start; tmp <= end; tmp++) {
1901                 int     peer_off; /* searching offset in peer hash table */
1902
1903                 for (peer_off = 0; ; peer_off++) {
1904                         ksock_peer_t *peer;
1905                         int           i = 0;
1906
1907                         read_lock(&ksocknal_data.ksnd_global_lock);
1908                         list_for_each_entry(peer, tmp, ksnp_list) {
1909                                 if (!((id.nid == LNET_NID_ANY ||
1910                                        id.nid == peer->ksnp_id.nid) &&
1911                                       (id.pid == LNET_PID_ANY ||
1912                                        id.pid == peer->ksnp_id.pid)))
1913                                         continue;
1914
1915                                 if (i++ == peer_off) {
1916                                         ksocknal_peer_addref(peer);
1917                                         break;
1918                                 }
1919                         }
1920                         read_unlock(&ksocknal_data.ksnd_global_lock);
1921
1922                         if (i == 0) /* no match */
1923                                 break;
1924
1925                         rc = 0;
1926                         ksocknal_push_peer(peer);
1927                         ksocknal_peer_decref(peer);
1928                 }
1929         }
1930         return rc;
1931 }
1932
1933 static int
1934 ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1935 {
1936         ksock_net_t       *net = ni->ni_data;
1937         ksock_interface_t *iface;
1938         int                rc;
1939         int                i;
1940         int                j;
1941         struct list_head        *ptmp;
1942         ksock_peer_t      *peer;
1943         struct list_head        *rtmp;
1944         ksock_route_t     *route;
1945
1946         if (ipaddress == 0 ||
1947             netmask == 0)
1948                 return (-EINVAL);
1949
1950         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1951
1952         iface = ksocknal_ip2iface(ni, ipaddress);
1953         if (iface != NULL) {
1954                 /* silently ignore dups */
1955                 rc = 0;
1956         } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1957                 rc = -ENOSPC;
1958         } else {
1959                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1960
1961                 iface->ksni_ipaddr = ipaddress;
1962                 iface->ksni_netmask = netmask;
1963                 iface->ksni_nroutes = 0;
1964                 iface->ksni_npeers = 0;
1965
1966                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1967                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1968                                 peer = list_entry(ptmp, ksock_peer_t,
1969                                                       ksnp_list);
1970
1971                                 for (j = 0; j < peer->ksnp_n_passive_ips; j++)
1972                                         if (peer->ksnp_passive_ips[j] == ipaddress)
1973                                                 iface->ksni_npeers++;
1974
1975                                 list_for_each(rtmp, &peer->ksnp_routes) {
1976                                         route = list_entry(rtmp,
1977                                                                ksock_route_t,
1978                                                                ksnr_list);
1979
1980                                         if (route->ksnr_myipaddr == ipaddress)
1981                                                 iface->ksni_nroutes++;
1982                                 }
1983                         }
1984                 }
1985
1986                 rc = 0;
1987                 /* NB only new connections will pay attention to the new interface! */
1988         }
1989
1990         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1991
1992         return (rc);
1993 }
1994
1995 static void
1996 ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
1997 {
1998         struct list_head         *tmp;
1999         struct list_head         *nxt;
2000         ksock_route_t      *route;
2001         ksock_conn_t       *conn;
2002         int                 i;
2003         int                 j;
2004
2005         for (i = 0; i < peer->ksnp_n_passive_ips; i++)
2006                 if (peer->ksnp_passive_ips[i] == ipaddr) {
2007                         for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
2008                                 peer->ksnp_passive_ips[j-1] =
2009                                         peer->ksnp_passive_ips[j];
2010                         peer->ksnp_n_passive_ips--;
2011                         break;
2012                 }
2013
2014         list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
2015                 route = list_entry(tmp, ksock_route_t, ksnr_list);
2016
2017                 if (route->ksnr_myipaddr != ipaddr)
2018                         continue;
2019
2020                 if (route->ksnr_share_count != 0) {
2021                         /* Manually created; keep, but unbind */
2022                         route->ksnr_myipaddr = 0;
2023                 } else {
2024                         ksocknal_del_route_locked(route);
2025                 }
2026         }
2027
2028         list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
2029                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2030
2031                 if (conn->ksnc_myipaddr == ipaddr)
2032                         ksocknal_close_conn_locked (conn, 0);
2033         }
2034 }
2035
2036 static int
2037 ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2038 {
2039         ksock_net_t       *net = ni->ni_data;
2040         int                rc = -ENOENT;
2041         struct list_head        *tmp;
2042         struct list_head        *nxt;
2043         ksock_peer_t      *peer;
2044         __u32              this_ip;
2045         int                i;
2046         int                j;
2047
2048         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2049
2050         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2051                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2052
2053                 if (!(ipaddress == 0 ||
2054                       ipaddress == this_ip))
2055                         continue;
2056
2057                 rc = 0;
2058
2059                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2060                         net->ksnn_interfaces[j-1] =
2061                                 net->ksnn_interfaces[j];
2062
2063                 net->ksnn_ninterfaces--;
2064
2065                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2066                         list_for_each_safe(tmp, nxt,
2067                                                &ksocknal_data.ksnd_peers[j]) {
2068                                 peer = list_entry(tmp, ksock_peer_t,
2069                                                       ksnp_list);
2070
2071                                 if (peer->ksnp_ni != ni)
2072                                         continue;
2073
2074                                 ksocknal_peer_del_interface_locked(peer, this_ip);
2075                         }
2076                 }
2077         }
2078
2079         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2080
2081         return (rc);
2082 }
2083
2084 int
2085 ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2086 {
2087         lnet_process_id_t id = {0};
2088         struct libcfs_ioctl_data *data = arg;
2089         int rc;
2090
2091         switch(cmd) {
2092         case IOC_LIBCFS_GET_INTERFACE: {
2093                 ksock_net_t       *net = ni->ni_data;
2094                 ksock_interface_t *iface;
2095
2096                 read_lock(&ksocknal_data.ksnd_global_lock);
2097
2098                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2099                         rc = -ENOENT;
2100                 } else {
2101                         rc = 0;
2102                         iface = &net->ksnn_interfaces[data->ioc_count];
2103
2104                         data->ioc_u32[0] = iface->ksni_ipaddr;
2105                         data->ioc_u32[1] = iface->ksni_netmask;
2106                         data->ioc_u32[2] = iface->ksni_npeers;
2107                         data->ioc_u32[3] = iface->ksni_nroutes;
2108                 }
2109
2110                 read_unlock(&ksocknal_data.ksnd_global_lock);
2111                 return rc;
2112         }
2113
2114         case IOC_LIBCFS_ADD_INTERFACE:
2115                 return ksocknal_add_interface(ni,
2116                                               data->ioc_u32[0], /* IP address */
2117                                               data->ioc_u32[1]); /* net mask */
2118
2119         case IOC_LIBCFS_DEL_INTERFACE:
2120                 return ksocknal_del_interface(ni,
2121                                               data->ioc_u32[0]); /* IP address */
2122
2123         case IOC_LIBCFS_GET_PEER: {
2124                 __u32            myip = 0;
2125                 __u32            ip = 0;
2126                 int              port = 0;
2127                 int              conn_count = 0;
2128                 int              share_count = 0;
2129
2130                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2131                                             &id, &myip, &ip, &port,
2132                                             &conn_count,  &share_count);
2133                 if (rc != 0)
2134                         return rc;
2135
2136                 data->ioc_nid    = id.nid;
2137                 data->ioc_count  = share_count;
2138                 data->ioc_u32[0] = ip;
2139                 data->ioc_u32[1] = port;
2140                 data->ioc_u32[2] = myip;
2141                 data->ioc_u32[3] = conn_count;
2142                 data->ioc_u32[4] = id.pid;
2143                 return 0;
2144         }
2145
2146         case IOC_LIBCFS_ADD_PEER:
2147                 id.nid = data->ioc_nid;
2148                 id.pid = LNET_PID_LUSTRE;
2149                 return ksocknal_add_peer (ni, id,
2150                                           data->ioc_u32[0], /* IP */
2151                                           data->ioc_u32[1]); /* port */
2152
2153         case IOC_LIBCFS_DEL_PEER:
2154                 id.nid = data->ioc_nid;
2155                 id.pid = LNET_PID_ANY;
2156                 return ksocknal_del_peer (ni, id,
2157                                           data->ioc_u32[0]); /* IP */
2158
2159         case IOC_LIBCFS_GET_CONN: {
2160                 int           txmem;
2161                 int           rxmem;
2162                 int           nagle;
2163                 ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2164
2165                 if (conn == NULL)
2166                         return -ENOENT;
2167
2168                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2169
2170                 data->ioc_count  = txmem;
2171                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2172                 data->ioc_flags  = nagle;
2173                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2174                 data->ioc_u32[1] = conn->ksnc_port;
2175                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2176                 data->ioc_u32[3] = conn->ksnc_type;
2177                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2178                 data->ioc_u32[5] = rxmem;
2179                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2180                 ksocknal_conn_decref(conn);
2181                 return 0;
2182         }
2183
2184         case IOC_LIBCFS_CLOSE_CONNECTION:
2185                 id.nid = data->ioc_nid;
2186                 id.pid = LNET_PID_ANY;
2187                 return ksocknal_close_matching_conns (id,
2188                                                       data->ioc_u32[0]);
2189
2190         case IOC_LIBCFS_REGISTER_MYNID:
2191                 /* Ignore if this is a noop */
2192                 if (data->ioc_nid == ni->ni_nid)
2193                         return 0;
2194
2195                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2196                        libcfs_nid2str(data->ioc_nid),
2197                        libcfs_nid2str(ni->ni_nid));
2198                 return -EINVAL;
2199
2200         case IOC_LIBCFS_PUSH_CONNECTION:
2201                 id.nid = data->ioc_nid;
2202                 id.pid = LNET_PID_ANY;
2203                 return ksocknal_push(ni, id);
2204
2205         default:
2206                 return -EINVAL;
2207         }
2208         /* not reached */
2209 }
2210
2211 static void
2212 ksocknal_free_buffers (void)
2213 {
2214         LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2215
2216         if (ksocknal_data.ksnd_sched_info != NULL) {
2217                 struct ksock_sched_info *info;
2218                 int                     i;
2219
2220                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2221                         if (info->ksi_scheds != NULL) {
2222                                 LIBCFS_FREE(info->ksi_scheds,
2223                                             info->ksi_nthreads_max *
2224                                             sizeof(info->ksi_scheds[0]));
2225                         }
2226                 }
2227                 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2228         }
2229
2230         LIBCFS_FREE (ksocknal_data.ksnd_peers,
2231                      sizeof(struct list_head) *
2232                      ksocknal_data.ksnd_peer_hash_size);
2233
2234         spin_lock(&ksocknal_data.ksnd_tx_lock);
2235
2236         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2237                 struct list_head        zlist;
2238                 ksock_tx_t      *tx;
2239
2240                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2241                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2242                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2243
2244                 while (!list_empty(&zlist)) {
2245                         tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2246                         list_del(&tx->tx_list);
2247                         LIBCFS_FREE(tx, tx->tx_desc_size);
2248                 }
2249         } else {
2250                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2251         }
2252 }
2253
2254 static void
2255 ksocknal_base_shutdown(void)
2256 {
2257         struct ksock_sched_info *info;
2258         ksock_sched_t           *sched;
2259         int                     i;
2260         int                     j;
2261
2262         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2263                atomic_read (&libcfs_kmemory));
2264         LASSERT (ksocknal_data.ksnd_nnets == 0);
2265
2266         switch (ksocknal_data.ksnd_init) {
2267         default:
2268                 LASSERT (0);
2269
2270         case SOCKNAL_INIT_ALL:
2271         case SOCKNAL_INIT_DATA:
2272                 LASSERT (ksocknal_data.ksnd_peers != NULL);
2273                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2274                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2275                 }
2276
2277                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2278                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2279                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2280                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2281                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2282
2283                 if (ksocknal_data.ksnd_sched_info != NULL) {
2284                         cfs_percpt_for_each(info, i,
2285                                             ksocknal_data.ksnd_sched_info) {
2286                                 if (info->ksi_scheds == NULL)
2287                                         continue;
2288
2289                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2290
2291                                         sched = &info->ksi_scheds[j];
2292                                         LASSERT(list_empty(&sched->\
2293                                                                kss_tx_conns));
2294                                         LASSERT(list_empty(&sched->\
2295                                                                kss_rx_conns));
2296                                         LASSERT(list_empty(&sched-> \
2297                                                   kss_zombie_noop_txs));
2298                                         LASSERT(sched->kss_nconns == 0);
2299                                 }
2300                         }
2301                 }
2302
2303                 /* flag threads to terminate; wake and wait for them to die */
2304                 ksocknal_data.ksnd_shuttingdown = 1;
2305                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2306                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2307
2308                 if (ksocknal_data.ksnd_sched_info != NULL) {
2309                         cfs_percpt_for_each(info, i,
2310                                             ksocknal_data.ksnd_sched_info) {
2311                                 if (info->ksi_scheds == NULL)
2312                                         continue;
2313
2314                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2315                                         sched = &info->ksi_scheds[j];
2316                                         wake_up_all(&sched->kss_waitq);
2317                                 }
2318                         }
2319                 }
2320
2321                 i = 4;
2322                 read_lock(&ksocknal_data.ksnd_global_lock);
2323                 while (ksocknal_data.ksnd_nthreads != 0) {
2324                         i++;
2325                         /* power of 2? */
2326                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2327                                 "waiting for %d threads to terminate\n",
2328                                 ksocknal_data.ksnd_nthreads);
2329                         read_unlock(&ksocknal_data.ksnd_global_lock);
2330                         set_current_state(TASK_UNINTERRUPTIBLE);
2331                         schedule_timeout(cfs_time_seconds(1));
2332                         read_lock(&ksocknal_data.ksnd_global_lock);
2333                 }
2334                 read_unlock(&ksocknal_data.ksnd_global_lock);
2335
2336                 ksocknal_free_buffers();
2337
2338                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2339                 break;
2340         }
2341
2342         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2343                atomic_read (&libcfs_kmemory));
2344
2345         module_put(THIS_MODULE);
2346 }
2347
2348 static __u64 ksocknal_new_incarnation(void)
2349 {
2350         struct timeval tv;
2351
2352         /* The incarnation number is the time this module loaded and it
2353          * identifies this particular instance of the socknal.  Hopefully
2354          * we won't be able to reboot more frequently than 1MHz for the
2355          * forseeable future :) */
2356
2357         do_gettimeofday(&tv);
2358
2359         return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2360 }
2361
2362 static int
2363 ksocknal_base_startup(void)
2364 {
2365         struct ksock_sched_info *info;
2366         int                     rc;
2367         int                     i;
2368
2369         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2370         LASSERT (ksocknal_data.ksnd_nnets == 0);
2371
2372         memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2373
2374         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2375         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2376                      sizeof(struct list_head) *
2377                      ksocknal_data.ksnd_peer_hash_size);
2378         if (ksocknal_data.ksnd_peers == NULL)
2379                 return -ENOMEM;
2380
2381         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2382                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2383
2384         rwlock_init(&ksocknal_data.ksnd_global_lock);
2385         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2386
2387         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2388         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2389         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2390         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2391         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2392
2393         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2394         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2395         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2396         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2397
2398         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2399         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2400
2401         /* NB memset above zeros whole of ksocknal_data */
2402
2403         /* flag lists/ptrs/locks initialised */
2404         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2405         try_module_get(THIS_MODULE);
2406
2407         ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2408                                                          sizeof(*info));
2409         if (ksocknal_data.ksnd_sched_info == NULL)
2410                 goto failed;
2411
2412         cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2413                 ksock_sched_t   *sched;
2414                 int             nthrs;
2415
2416                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2417                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2418                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2419                 } else {
2420                         /* max to half of CPUs, assume another half should be
2421                          * reserved for upper layer modules */
2422                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2423                 }
2424
2425                 info->ksi_nthreads_max = nthrs;
2426                 info->ksi_cpt = i;
2427
2428                 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2429                                  info->ksi_nthreads_max * sizeof(*sched));
2430                 if (info->ksi_scheds == NULL)
2431                         goto failed;
2432
2433                 for (; nthrs > 0; nthrs--) {
2434                         sched = &info->ksi_scheds[nthrs - 1];
2435
2436                         sched->kss_info = info;
2437                         spin_lock_init(&sched->kss_lock);
2438                         INIT_LIST_HEAD(&sched->kss_rx_conns);
2439                         INIT_LIST_HEAD(&sched->kss_tx_conns);
2440                         INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2441                         init_waitqueue_head(&sched->kss_waitq);
2442                 }
2443         }
2444
2445         ksocknal_data.ksnd_connd_starting         = 0;
2446         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2447         ksocknal_data.ksnd_connd_starting_stamp   = ktime_get_real_seconds();
2448         /* must have at least 2 connds to remain responsive to accepts while
2449          * connecting */
2450         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2451                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2452
2453         if (*ksocknal_tunables.ksnd_nconnds_max <
2454             *ksocknal_tunables.ksnd_nconnds) {
2455                 ksocknal_tunables.ksnd_nconnds_max =
2456                         ksocknal_tunables.ksnd_nconnds;
2457         }
2458
2459         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2460                 char name[16];
2461                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2462                 ksocknal_data.ksnd_connd_starting++;
2463                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2464
2465
2466                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2467                 rc = ksocknal_thread_start(ksocknal_connd,
2468                                            (void *)((uintptr_t)i), name);
2469                 if (rc != 0) {
2470                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2471                         ksocknal_data.ksnd_connd_starting--;
2472                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2473                         CERROR("Can't spawn socknal connd: %d\n", rc);
2474                         goto failed;
2475                 }
2476         }
2477
2478         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2479         if (rc != 0) {
2480                 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2481                 goto failed;
2482         }
2483
2484         /* flag everything initialised */
2485         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2486
2487         return 0;
2488
2489  failed:
2490         ksocknal_base_shutdown();
2491         return -ENETDOWN;
2492 }
2493
2494 static void
2495 ksocknal_debug_peerhash (lnet_ni_t *ni)
2496 {
2497         ksock_peer_t    *peer = NULL;
2498         struct list_head        *tmp;
2499         int             i;
2500
2501         read_lock(&ksocknal_data.ksnd_global_lock);
2502
2503         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2504                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2505                         peer = list_entry(tmp, ksock_peer_t, ksnp_list);
2506
2507                         if (peer->ksnp_ni == ni) break;
2508
2509                         peer = NULL;
2510                 }
2511         }
2512
2513         if (peer != NULL) {
2514                 ksock_route_t *route;
2515                 ksock_conn_t  *conn;
2516
2517                 CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
2518                        "closing %d, accepting %d, err %d, zcookie %llu, "
2519                        "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
2520                        atomic_read(&peer->ksnp_refcount),
2521                        peer->ksnp_sharecount, peer->ksnp_closing,
2522                        peer->ksnp_accepting, peer->ksnp_error,
2523                        peer->ksnp_zc_next_cookie,
2524                        !list_empty(&peer->ksnp_tx_queue),
2525                        !list_empty(&peer->ksnp_zc_req_list));
2526
2527                 list_for_each(tmp, &peer->ksnp_routes) {
2528                         route = list_entry(tmp, ksock_route_t, ksnr_list);
2529                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2530                                "del %d\n", atomic_read(&route->ksnr_refcount),
2531                                route->ksnr_scheduled, route->ksnr_connecting,
2532                                route->ksnr_connected, route->ksnr_deleted);
2533                 }
2534
2535                 list_for_each(tmp, &peer->ksnp_conns) {
2536                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2537                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2538                                atomic_read(&conn->ksnc_conn_refcount),
2539                                atomic_read(&conn->ksnc_sock_refcount),
2540                                conn->ksnc_type, conn->ksnc_closing);
2541                 }
2542         }
2543
2544         read_unlock(&ksocknal_data.ksnd_global_lock);
2545         return;
2546 }
2547
2548 void
2549 ksocknal_shutdown (lnet_ni_t *ni)
2550 {
2551         ksock_net_t      *net = ni->ni_data;
2552         int               i;
2553         lnet_process_id_t anyid = {0};
2554
2555         anyid.nid =  LNET_NID_ANY;
2556         anyid.pid =  LNET_PID_ANY;
2557
2558         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2559         LASSERT(ksocknal_data.ksnd_nnets > 0);
2560
2561         spin_lock_bh(&net->ksnn_lock);
2562         net->ksnn_shutdown = 1;                 /* prevent new peers */
2563         spin_unlock_bh(&net->ksnn_lock);
2564
2565         /* Delete all peers */
2566         ksocknal_del_peer(ni, anyid, 0);
2567
2568         /* Wait for all peer state to clean up */
2569         i = 2;
2570         spin_lock_bh(&net->ksnn_lock);
2571         while (net->ksnn_npeers != 0) {
2572                 spin_unlock_bh(&net->ksnn_lock);
2573
2574                 i++;
2575                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2576                        "waiting for %d peers to disconnect\n",
2577                        net->ksnn_npeers);
2578                 set_current_state(TASK_UNINTERRUPTIBLE);
2579                 schedule_timeout(cfs_time_seconds(1));
2580
2581                 ksocknal_debug_peerhash(ni);
2582
2583                 spin_lock_bh(&net->ksnn_lock);
2584         }
2585         spin_unlock_bh(&net->ksnn_lock);
2586
2587         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2588                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2589                 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2590         }
2591
2592         list_del(&net->ksnn_list);
2593         LIBCFS_FREE(net, sizeof(*net));
2594
2595         ksocknal_data.ksnd_nnets--;
2596         if (ksocknal_data.ksnd_nnets == 0)
2597                 ksocknal_base_shutdown();
2598 }
2599
2600 static int
2601 ksocknal_enumerate_interfaces(ksock_net_t *net)
2602 {
2603         char      **names;
2604         int         i;
2605         int         j;
2606         int         rc;
2607         int         n;
2608
2609         n = lnet_ipif_enumerate(&names);
2610         if (n <= 0) {
2611                 CERROR("Can't enumerate interfaces: %d\n", n);
2612                 return n;
2613         }
2614
2615         for (i = j = 0; i < n; i++) {
2616                 int        up;
2617                 __u32      ip;
2618                 __u32      mask;
2619
2620                 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2621                         continue;
2622
2623                 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2624                 if (rc != 0) {
2625                         CWARN("Can't get interface %s info: %d\n",
2626                               names[i], rc);
2627                         continue;
2628                 }
2629
2630                 if (!up) {
2631                         CWARN("Ignoring interface %s (down)\n",
2632                               names[i]);
2633                         continue;
2634                 }
2635
2636                 if (j == LNET_MAX_INTERFACES) {
2637                         CWARN("Ignoring interface %s (too many interfaces)\n",
2638                               names[i]);
2639                         continue;
2640                 }
2641
2642                 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2643                 net->ksnn_interfaces[j].ksni_netmask = mask;
2644                 strlcpy(net->ksnn_interfaces[j].ksni_name,
2645                         names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2646                 j++;
2647         }
2648
2649         lnet_ipif_free_enumeration(names, n);
2650
2651         if (j == 0)
2652                 CERROR("Can't find any usable interfaces\n");
2653
2654         return j;
2655 }
2656
2657 static int
2658 ksocknal_search_new_ipif(ksock_net_t *net)
2659 {
2660         int     new_ipif = 0;
2661         int     i;
2662
2663         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2664                 char            *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2665                 char            *colon = strchr(ifnam, ':');
2666                 int             found  = 0;
2667                 ksock_net_t     *tmp;
2668                 int             j;
2669
2670                 if (colon != NULL) /* ignore alias device */
2671                         *colon = 0;
2672
2673                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2674                                         ksnn_list) {
2675                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2676                                 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2677                                              ksni_name[0];
2678                                 char *colon2 = strchr(ifnam2, ':');
2679
2680                                 if (colon2 != NULL)
2681                                         *colon2 = 0;
2682
2683                                 found = strcmp(ifnam, ifnam2) == 0;
2684                                 if (colon2 != NULL)
2685                                         *colon2 = ':';
2686                         }
2687                         if (found)
2688                                 break;
2689                 }
2690
2691                 new_ipif += !found;
2692                 if (colon != NULL)
2693                         *colon = ':';
2694         }
2695
2696         return new_ipif;
2697 }
2698
2699 static int
2700 ksocknal_start_schedulers(struct ksock_sched_info *info)
2701 {
2702         int     nthrs;
2703         int     rc = 0;
2704         int     i;
2705
2706         if (info->ksi_nthreads == 0) {
2707                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2708                         nthrs = info->ksi_nthreads_max;
2709                 } else {
2710                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2711                                                info->ksi_cpt);
2712                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2713                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2714                 }
2715                 nthrs = min(nthrs, info->ksi_nthreads_max);
2716         } else {
2717                 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2718                 /* increase two threads if there is new interface */
2719                 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2720         }
2721
2722         for (i = 0; i < nthrs; i++) {
2723                 long            id;
2724                 char            name[20];
2725                 ksock_sched_t   *sched;
2726                 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2727                 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2728                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2729                          info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2730
2731                 rc = ksocknal_thread_start(ksocknal_scheduler,
2732                                            (void *)id, name);
2733                 if (rc == 0)
2734                         continue;
2735
2736                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2737                        info->ksi_cpt, info->ksi_nthreads + i, rc);
2738                 break;
2739         }
2740
2741         info->ksi_nthreads += i;
2742         return rc;
2743 }
2744
2745 static int
2746 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2747 {
2748         int     newif = ksocknal_search_new_ipif(net);
2749         int     rc;
2750         int     i;
2751
2752         LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
2753
2754         for (i = 0; i < ncpts; i++) {
2755                 struct ksock_sched_info *info;
2756                 int cpt = (cpts == NULL) ? i : cpts[i];
2757
2758                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2759                 info = ksocknal_data.ksnd_sched_info[cpt];
2760
2761                 if (!newif && info->ksi_nthreads > 0)
2762                         continue;
2763
2764                 rc = ksocknal_start_schedulers(info);
2765                 if (rc != 0)
2766                         return rc;
2767         }
2768         return 0;
2769 }
2770
2771 int
2772 ksocknal_startup (lnet_ni_t *ni)
2773 {
2774         ksock_net_t  *net;
2775         int           rc;
2776         int           i;
2777
2778         LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2779
2780         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2781                 rc = ksocknal_base_startup();
2782                 if (rc != 0)
2783                         return rc;
2784         }
2785
2786         LIBCFS_ALLOC(net, sizeof(*net));
2787         if (net == NULL)
2788                 goto fail_0;
2789
2790         spin_lock_init(&net->ksnn_lock);
2791         net->ksnn_incarnation = ksocknal_new_incarnation();
2792         ni->ni_data = net;
2793         if (!ni->ni_net->net_tunables_set) {
2794                 ni->ni_net->net_tunables.lct_peer_timeout =
2795                         *ksocknal_tunables.ksnd_peertimeout;
2796                 ni->ni_net->net_tunables.lct_max_tx_credits =
2797                         *ksocknal_tunables.ksnd_credits;
2798                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2799                         *ksocknal_tunables.ksnd_peertxcredits;
2800                 ni->ni_net->net_tunables.lct_peer_rtr_credits =
2801                         *ksocknal_tunables.ksnd_peerrtrcredits;
2802                 ni->ni_net->net_tunables_set = true;
2803         }
2804
2805         if (ni->ni_interfaces[0] == NULL) {
2806                 rc = ksocknal_enumerate_interfaces(net);
2807                 if (rc <= 0)
2808                         goto fail_1;
2809
2810                 net->ksnn_ninterfaces = 1;
2811         } else {
2812                 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2813                         int    up;
2814
2815                         if (ni->ni_interfaces[i] == NULL)
2816                                 break;
2817
2818                         rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2819                                 &net->ksnn_interfaces[i].ksni_ipaddr,
2820                                 &net->ksnn_interfaces[i].ksni_netmask);
2821
2822                         if (rc != 0) {
2823                                 CERROR("Can't get interface %s info: %d\n",
2824                                        ni->ni_interfaces[i], rc);
2825                                 goto fail_1;
2826                         }
2827
2828                         if (!up) {
2829                                 CERROR("Interface %s is down\n",
2830                                        ni->ni_interfaces[i]);
2831                                 goto fail_1;
2832                         }
2833
2834                         strlcpy(net->ksnn_interfaces[i].ksni_name,
2835                                 ni->ni_interfaces[i],
2836                                 sizeof(net->ksnn_interfaces[i].ksni_name));
2837                 }
2838                 net->ksnn_ninterfaces = i;
2839         }
2840
2841         /* call it before add it to ksocknal_data.ksnd_nets */
2842         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2843         if (rc != 0)
2844                 goto fail_1;
2845
2846         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2847                                 net->ksnn_interfaces[0].ksni_ipaddr);
2848         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2849
2850         ksocknal_data.ksnd_nnets++;
2851
2852         return 0;
2853
2854  fail_1:
2855         LIBCFS_FREE(net, sizeof(*net));
2856  fail_0:
2857         if (ksocknal_data.ksnd_nnets == 0)
2858                 ksocknal_base_shutdown();
2859
2860         return -ENETDOWN;
2861 }
2862
2863
2864 static void __exit ksocklnd_exit(void)
2865 {
2866         lnet_unregister_lnd(&the_ksocklnd);
2867 }
2868
2869 static int __init ksocklnd_init(void)
2870 {
2871         int rc;
2872
2873         /* check ksnr_connected/connecting field large enough */
2874         CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2875         CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2876
2877         /* initialize the_ksocklnd */
2878         the_ksocklnd.lnd_type     = SOCKLND;
2879         the_ksocklnd.lnd_startup  = ksocknal_startup;
2880         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2881         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2882         the_ksocklnd.lnd_send     = ksocknal_send;
2883         the_ksocklnd.lnd_recv     = ksocknal_recv;
2884         the_ksocklnd.lnd_notify   = ksocknal_notify;
2885         the_ksocklnd.lnd_query    = ksocknal_query;
2886         the_ksocklnd.lnd_accept   = ksocknal_accept;
2887
2888         rc = ksocknal_tunables_init();
2889         if (rc != 0)
2890                 return rc;
2891
2892         lnet_register_lnd(&the_ksocklnd);
2893
2894         return 0;
2895 }
2896
2897 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2898 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2899 MODULE_VERSION("2.8.0");
2900 MODULE_LICENSE("GPL");
2901
2902 module_init(ksocklnd_init);
2903 module_exit(ksocklnd_exit);