Whamcloud - gitweb
ddc14609df99de189d97c4eb0ea1ded23ff41ee1
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/klnds/socklnd/socklnd.c
37  *
38  * Author: Zach Brown <zab@zabbo.net>
39  * Author: Peter J. Braam <braam@clusterfs.com>
40  * Author: Phil Schwan <phil@clusterfs.com>
41  * Author: Eric Barton <eric@bartonsoftware.com>
42  */
43
44 #include "socklnd.h"
45
46 static lnd_t                   the_ksocklnd;
47 ksock_nal_data_t        ksocknal_data;
48
49 static ksock_interface_t *
50 ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
51 {
52         ksock_net_t       *net = ni->ni_data;
53         int                i;
54         ksock_interface_t *iface;
55
56         for (i = 0; i < net->ksnn_ninterfaces; i++) {
57                 LASSERT(i < LNET_MAX_INTERFACES);
58                 iface = &net->ksnn_interfaces[i];
59
60                 if (iface->ksni_ipaddr == ip)
61                         return (iface);
62         }
63
64         return (NULL);
65 }
66
67 static ksock_route_t *
68 ksocknal_create_route (__u32 ipaddr, int port)
69 {
70         ksock_route_t *route;
71
72         LIBCFS_ALLOC (route, sizeof (*route));
73         if (route == NULL)
74                 return (NULL);
75
76         atomic_set (&route->ksnr_refcount, 1);
77         route->ksnr_peer = NULL;
78         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
79         route->ksnr_ipaddr = ipaddr;
80         route->ksnr_port = port;
81         route->ksnr_scheduled = 0;
82         route->ksnr_connecting = 0;
83         route->ksnr_connected = 0;
84         route->ksnr_deleted = 0;
85         route->ksnr_conn_count = 0;
86         route->ksnr_share_count = 0;
87
88         return (route);
89 }
90
91 void
92 ksocknal_destroy_route (ksock_route_t *route)
93 {
94         LASSERT (atomic_read(&route->ksnr_refcount) == 0);
95
96         if (route->ksnr_peer != NULL)
97                 ksocknal_peer_decref(route->ksnr_peer);
98
99         LIBCFS_FREE (route, sizeof (*route));
100 }
101
102 static int
103 ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
104 {
105         int             cpt = lnet_cpt_of_nid(id.nid);
106         ksock_net_t     *net = ni->ni_data;
107         ksock_peer_t    *peer;
108
109         LASSERT(id.nid != LNET_NID_ANY);
110         LASSERT(id.pid != LNET_PID_ANY);
111         LASSERT(!in_interrupt());
112
113         LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
114         if (peer == NULL)
115                 return -ENOMEM;
116
117         peer->ksnp_ni = ni;
118         peer->ksnp_id = id;
119         atomic_set(&peer->ksnp_refcount, 1);    /* 1 ref for caller */
120         peer->ksnp_closing = 0;
121         peer->ksnp_accepting = 0;
122         peer->ksnp_proto = NULL;
123         peer->ksnp_last_alive = 0;
124         peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
125
126         INIT_LIST_HEAD(&peer->ksnp_conns);
127         INIT_LIST_HEAD(&peer->ksnp_routes);
128         INIT_LIST_HEAD(&peer->ksnp_tx_queue);
129         INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
130         spin_lock_init(&peer->ksnp_lock);
131
132         spin_lock_bh(&net->ksnn_lock);
133
134         if (net->ksnn_shutdown) {
135                 spin_unlock_bh(&net->ksnn_lock);
136
137                 LIBCFS_FREE(peer, sizeof(*peer));
138                 CERROR("Can't create peer: network shutdown\n");
139                 return -ESHUTDOWN;
140         }
141
142         net->ksnn_npeers++;
143
144         spin_unlock_bh(&net->ksnn_lock);
145
146         *peerp = peer;
147         return 0;
148 }
149
150 void
151 ksocknal_destroy_peer (ksock_peer_t *peer)
152 {
153         ksock_net_t    *net = peer->ksnp_ni->ni_data;
154
155         CDEBUG (D_NET, "peer %s %p deleted\n",
156                 libcfs_id2str(peer->ksnp_id), peer);
157
158         LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
159         LASSERT(peer->ksnp_accepting == 0);
160         LASSERT(list_empty(&peer->ksnp_conns));
161         LASSERT(list_empty(&peer->ksnp_routes));
162         LASSERT(list_empty(&peer->ksnp_tx_queue));
163         LASSERT(list_empty(&peer->ksnp_zc_req_list));
164
165         LIBCFS_FREE(peer, sizeof(*peer));
166
167         /* NB a peer's connections and routes keep a reference on their peer
168          * until they are destroyed, so we can be assured that _all_ state to
169          * do with this peer has been cleaned up when its refcount drops to
170          * zero. */
171         spin_lock_bh(&net->ksnn_lock);
172         net->ksnn_npeers--;
173         spin_unlock_bh(&net->ksnn_lock);
174 }
175
176 ksock_peer_t *
177 ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
178 {
179         struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
180         struct list_head *tmp;
181         ksock_peer_t     *peer;
182
183         list_for_each(tmp, peer_list) {
184
185                 peer = list_entry(tmp, ksock_peer_t, ksnp_list);
186
187                 LASSERT(!peer->ksnp_closing);
188
189                 if (peer->ksnp_ni != ni)
190                         continue;
191
192                 if (peer->ksnp_id.nid != id.nid ||
193                     peer->ksnp_id.pid != id.pid)
194                         continue;
195
196                 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
197                        peer, libcfs_id2str(id),
198                        atomic_read(&peer->ksnp_refcount));
199                 return peer;
200         }
201         return NULL;
202 }
203
204 ksock_peer_t *
205 ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
206 {
207         ksock_peer_t     *peer;
208
209         read_lock(&ksocknal_data.ksnd_global_lock);
210         peer = ksocknal_find_peer_locked(ni, id);
211         if (peer != NULL)                       /* +1 ref for caller? */
212                 ksocknal_peer_addref(peer);
213         read_unlock(&ksocknal_data.ksnd_global_lock);
214
215         return (peer);
216 }
217
218 static void
219 ksocknal_unlink_peer_locked (ksock_peer_t *peer)
220 {
221         int                i;
222         __u32              ip;
223         ksock_interface_t *iface;
224
225         for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
226                 LASSERT (i < LNET_MAX_INTERFACES);
227                 ip = peer->ksnp_passive_ips[i];
228
229                 iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
230                 /* All IPs in peer->ksnp_passive_ips[] come from the
231                  * interface list, therefore the call must succeed. */
232                 LASSERT (iface != NULL);
233
234                 CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
235                        peer, iface, iface->ksni_nroutes);
236                 iface->ksni_npeers--;
237         }
238
239         LASSERT(list_empty(&peer->ksnp_conns));
240         LASSERT(list_empty(&peer->ksnp_routes));
241         LASSERT(!peer->ksnp_closing);
242         peer->ksnp_closing = 1;
243         list_del(&peer->ksnp_list);
244         /* lose peerlist's ref */
245         ksocknal_peer_decref(peer);
246 }
247
248 static int
249 ksocknal_get_peer_info (lnet_ni_t *ni, int index,
250                         lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
251                         int *port, int *conn_count, int *share_count)
252 {
253         ksock_peer_t      *peer;
254         struct list_head  *ptmp;
255         ksock_route_t     *route;
256         struct list_head  *rtmp;
257         int                i;
258         int                j;
259         int                rc = -ENOENT;
260
261         read_lock(&ksocknal_data.ksnd_global_lock);
262
263         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
264                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
265                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
266
267                         if (peer->ksnp_ni != ni)
268                                 continue;
269
270                         if (peer->ksnp_n_passive_ips == 0 &&
271                             list_empty(&peer->ksnp_routes)) {
272                                 if (index-- > 0)
273                                         continue;
274
275                                 *id = peer->ksnp_id;
276                                 *myip = 0;
277                                 *peer_ip = 0;
278                                 *port = 0;
279                                 *conn_count = 0;
280                                 *share_count = 0;
281                                 rc = 0;
282                                 goto out;
283                         }
284
285                         for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
286                                 if (index-- > 0)
287                                         continue;
288
289                                 *id = peer->ksnp_id;
290                                 *myip = peer->ksnp_passive_ips[j];
291                                 *peer_ip = 0;
292                                 *port = 0;
293                                 *conn_count = 0;
294                                 *share_count = 0;
295                                 rc = 0;
296                                 goto out;
297                         }
298
299                         list_for_each(rtmp, &peer->ksnp_routes) {
300                                 if (index-- > 0)
301                                         continue;
302
303                                 route = list_entry(rtmp, ksock_route_t,
304                                                    ksnr_list);
305
306                                 *id = peer->ksnp_id;
307                                 *myip = route->ksnr_myipaddr;
308                                 *peer_ip = route->ksnr_ipaddr;
309                                 *port = route->ksnr_port;
310                                 *conn_count = route->ksnr_conn_count;
311                                 *share_count = route->ksnr_share_count;
312                                 rc = 0;
313                                 goto out;
314                         }
315                 }
316         }
317 out:
318         read_unlock(&ksocknal_data.ksnd_global_lock);
319         return rc;
320 }
321
322 static void
323 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
324 {
325         ksock_peer_t      *peer = route->ksnr_peer;
326         int                type = conn->ksnc_type;
327         ksock_interface_t *iface;
328
329         conn->ksnc_route = route;
330         ksocknal_route_addref(route);
331
332         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
333                 if (route->ksnr_myipaddr == 0) {
334                         /* route wasn't bound locally yet (the initial route) */
335                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
336                                libcfs_id2str(peer->ksnp_id),
337                                &route->ksnr_ipaddr,
338                                &conn->ksnc_myipaddr);
339                 } else {
340                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
341                                "to %pI4h\n", libcfs_id2str(peer->ksnp_id),
342                                &route->ksnr_ipaddr,
343                                &route->ksnr_myipaddr,
344                                &conn->ksnc_myipaddr);
345
346                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
347                                                   route->ksnr_myipaddr);
348                         if (iface != NULL)
349                                 iface->ksni_nroutes--;
350                 }
351                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
352                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
353                                           route->ksnr_myipaddr);
354                 if (iface != NULL)
355                         iface->ksni_nroutes++;
356         }
357
358         route->ksnr_connected |= (1<<type);
359         route->ksnr_conn_count++;
360
361         /* Successful connection => further attempts can
362          * proceed immediately */
363         route->ksnr_retry_interval = 0;
364 }
365
366 static void
367 ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
368 {
369         struct list_head *tmp;
370         ksock_conn_t     *conn;
371         ksock_route_t    *route2;
372
373         LASSERT(!peer->ksnp_closing);
374         LASSERT(route->ksnr_peer == NULL);
375         LASSERT(!route->ksnr_scheduled);
376         LASSERT(!route->ksnr_connecting);
377         LASSERT(route->ksnr_connected == 0);
378
379         /* LASSERT(unique) */
380         list_for_each(tmp, &peer->ksnp_routes) {
381                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
382
383                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
384                         CERROR("Duplicate route %s %pI4h\n",
385                                libcfs_id2str(peer->ksnp_id),
386                                &route->ksnr_ipaddr);
387                         LBUG();
388                 }
389         }
390
391         route->ksnr_peer = peer;
392         ksocknal_peer_addref(peer);
393         /* peer's routelist takes over my ref on 'route' */
394         list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
395
396         list_for_each(tmp, &peer->ksnp_conns) {
397                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
398
399                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
400                         continue;
401
402                 ksocknal_associate_route_conn_locked(route, conn);
403                 /* keep going (typed routes) */
404         }
405 }
406
407 static void
408 ksocknal_del_route_locked (ksock_route_t *route)
409 {
410         ksock_peer_t      *peer = route->ksnr_peer;
411         ksock_interface_t *iface;
412         ksock_conn_t      *conn;
413         struct list_head  *ctmp;
414         struct list_head  *cnxt;
415
416         LASSERT(!route->ksnr_deleted);
417
418         /* Close associated conns */
419         list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
420                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
421
422                 if (conn->ksnc_route != route)
423                         continue;
424
425                 ksocknal_close_conn_locked(conn, 0);
426         }
427
428         if (route->ksnr_myipaddr != 0) {
429                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
430                                           route->ksnr_myipaddr);
431                 if (iface != NULL)
432                         iface->ksni_nroutes--;
433         }
434
435         route->ksnr_deleted = 1;
436         list_del(&route->ksnr_list);
437         ksocknal_route_decref(route);           /* drop peer's ref */
438
439         if (list_empty(&peer->ksnp_routes) &&
440             list_empty(&peer->ksnp_conns)) {
441                 /* I've just removed the last route to a peer with no active
442                  * connections */
443                 ksocknal_unlink_peer_locked(peer);
444         }
445 }
446
447 int
448 ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
449 {
450         struct list_head *tmp;
451         ksock_peer_t     *peer;
452         ksock_peer_t     *peer2;
453         ksock_route_t    *route;
454         ksock_route_t    *route2;
455         int               rc;
456
457         if (id.nid == LNET_NID_ANY ||
458             id.pid == LNET_PID_ANY)
459                 return (-EINVAL);
460
461         /* Have a brand new peer ready... */
462         rc = ksocknal_create_peer(&peer, ni, id);
463         if (rc != 0)
464                 return rc;
465
466         route = ksocknal_create_route (ipaddr, port);
467         if (route == NULL) {
468                 ksocknal_peer_decref(peer);
469                 return (-ENOMEM);
470         }
471
472         write_lock_bh(&ksocknal_data.ksnd_global_lock);
473
474         /* always called with a ref on ni, so shutdown can't have started */
475         LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
476
477         peer2 = ksocknal_find_peer_locked(ni, id);
478         if (peer2 != NULL) {
479                 ksocknal_peer_decref(peer);
480                 peer = peer2;
481         } else {
482                 /* peer table takes my ref on peer */
483                 list_add_tail(&peer->ksnp_list,
484                               ksocknal_nid2peerlist(id.nid));
485         }
486
487         route2 = NULL;
488         list_for_each(tmp, &peer->ksnp_routes) {
489                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
490
491                 if (route2->ksnr_ipaddr == ipaddr)
492                         break;
493
494                 route2 = NULL;
495         }
496         if (route2 == NULL) {
497                 ksocknal_add_route_locked(peer, route);
498                 route->ksnr_share_count++;
499         } else {
500                 ksocknal_route_decref(route);
501                 route2->ksnr_share_count++;
502         }
503
504         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
505
506         return 0;
507 }
508
509 static void
510 ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
511 {
512         ksock_conn_t     *conn;
513         ksock_route_t    *route;
514         struct list_head *tmp;
515         struct list_head *nxt;
516         int               nshared;
517
518         LASSERT(!peer->ksnp_closing);
519
520         /* Extra ref prevents peer disappearing until I'm done with it */
521         ksocknal_peer_addref(peer);
522
523         list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
524                 route = list_entry(tmp, ksock_route_t, ksnr_list);
525
526                 /* no match */
527                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
528                         continue;
529
530                 route->ksnr_share_count = 0;
531                 /* This deletes associated conns too */
532                 ksocknal_del_route_locked(route);
533         }
534
535         nshared = 0;
536         list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
537                 route = list_entry(tmp, ksock_route_t, ksnr_list);
538                 nshared += route->ksnr_share_count;
539         }
540
541         if (nshared == 0) {
542                 /* remove everything else if there are no explicit entries
543                  * left */
544
545                 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
546                         route = list_entry(tmp, ksock_route_t, ksnr_list);
547
548                         /* we should only be removing auto-entries */
549                         LASSERT(route->ksnr_share_count == 0);
550                         ksocknal_del_route_locked(route);
551                 }
552
553                 list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
554                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
555
556                         ksocknal_close_conn_locked(conn, 0);
557                 }
558         }
559
560         ksocknal_peer_decref(peer);
561                 /* NB peer unlinks itself when last conn/route is removed */
562 }
563
564 static int
565 ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
566 {
567         struct list_head  zombies = LIST_HEAD_INIT(zombies);
568         struct list_head *ptmp;
569         struct list_head *pnxt;
570         ksock_peer_t     *peer;
571         int               lo;
572         int               hi;
573         int               i;
574         int               rc = -ENOENT;
575
576         write_lock_bh(&ksocknal_data.ksnd_global_lock);
577
578         if (id.nid != LNET_NID_ANY) {
579                 hi = (int)(ksocknal_nid2peerlist(id.nid) -
580                            ksocknal_data.ksnd_peers);
581                 lo = hi;
582         } else {
583                 lo = 0;
584                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
585         }
586
587         for (i = lo; i <= hi; i++) {
588                 list_for_each_safe(ptmp, pnxt,
589                                    &ksocknal_data.ksnd_peers[i]) {
590                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
591
592                         if (peer->ksnp_ni != ni)
593                                 continue;
594
595                         if (!((id.nid == LNET_NID_ANY ||
596                                peer->ksnp_id.nid == id.nid) &&
597                               (id.pid == LNET_PID_ANY ||
598                                peer->ksnp_id.pid == id.pid)))
599                                 continue;
600
601                         ksocknal_peer_addref(peer);     /* a ref for me... */
602
603                         ksocknal_del_peer_locked(peer, ip);
604
605                         if (peer->ksnp_closing &&
606                             !list_empty(&peer->ksnp_tx_queue)) {
607                                 LASSERT(list_empty(&peer->ksnp_conns));
608                                 LASSERT(list_empty(&peer->ksnp_routes));
609
610                                 list_splice_init(&peer->ksnp_tx_queue,
611                                                  &zombies);
612                         }
613
614                         ksocknal_peer_decref(peer);     /* ...till here */
615
616                         rc = 0;                         /* matched! */
617                 }
618         }
619
620         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
621
622         ksocknal_txlist_done(ni, &zombies, 1);
623
624         return rc;
625 }
626
627 static ksock_conn_t *
628 ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
629 {
630         ksock_peer_t     *peer;
631         struct list_head *ptmp;
632         ksock_conn_t     *conn;
633         struct list_head *ctmp;
634         int               i;
635
636         read_lock(&ksocknal_data.ksnd_global_lock);
637
638         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
639                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
640                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
641
642                         LASSERT(!peer->ksnp_closing);
643
644                         if (peer->ksnp_ni != ni)
645                                 continue;
646
647                         list_for_each(ctmp, &peer->ksnp_conns) {
648                                 if (index-- > 0)
649                                         continue;
650
651                                 conn = list_entry(ctmp, ksock_conn_t,
652                                                   ksnc_list);
653                                 ksocknal_conn_addref(conn);
654                                 read_unlock(&ksocknal_data. \
655                                             ksnd_global_lock);
656                                 return conn;
657                         }
658                 }
659         }
660
661         read_unlock(&ksocknal_data.ksnd_global_lock);
662         return NULL;
663 }
664
665 static ksock_sched_t *
666 ksocknal_choose_scheduler_locked(unsigned int cpt)
667 {
668         struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
669         ksock_sched_t           *sched;
670         int                     i;
671
672         LASSERT(info->ksi_nthreads > 0);
673
674         sched = &info->ksi_scheds[0];
675         /*
676          * NB: it's safe so far, but info->ksi_nthreads could be changed
677          * at runtime when we have dynamic LNet configuration, then we
678          * need to take care of this.
679          */
680         for (i = 1; i < info->ksi_nthreads; i++) {
681                 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
682                         sched = &info->ksi_scheds[i];
683         }
684
685         return sched;
686 }
687
688 static int
689 ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
690 {
691         ksock_net_t       *net = ni->ni_data;
692         int                i;
693         int                nip;
694
695         read_lock(&ksocknal_data.ksnd_global_lock);
696
697         nip = net->ksnn_ninterfaces;
698         LASSERT (nip <= LNET_MAX_INTERFACES);
699
700         /* Only offer interfaces for additional connections if I have
701          * more than one. */
702         if (nip < 2) {
703                 read_unlock(&ksocknal_data.ksnd_global_lock);
704                 return 0;
705         }
706
707         for (i = 0; i < nip; i++) {
708                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
709                 LASSERT (ipaddrs[i] != 0);
710         }
711
712         read_unlock(&ksocknal_data.ksnd_global_lock);
713         return (nip);
714 }
715
716 static int
717 ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
718 {
719         int   best_netmatch = 0;
720         int   best_xor      = 0;
721         int   best          = -1;
722         int   this_xor;
723         int   this_netmatch;
724         int   i;
725
726         for (i = 0; i < nips; i++) {
727                 if (ips[i] == 0)
728                         continue;
729
730                 this_xor = (ips[i] ^ iface->ksni_ipaddr);
731                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
732
733                 if (!(best < 0 ||
734                       best_netmatch < this_netmatch ||
735                       (best_netmatch == this_netmatch &&
736                        best_xor > this_xor)))
737                         continue;
738
739                 best = i;
740                 best_netmatch = this_netmatch;
741                 best_xor = this_xor;
742         }
743
744         LASSERT (best >= 0);
745         return (best);
746 }
747
748 static int
749 ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
750 {
751         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
752         ksock_net_t        *net = peer->ksnp_ni->ni_data;
753         ksock_interface_t  *iface;
754         ksock_interface_t  *best_iface;
755         int                 n_ips;
756         int                 i;
757         int                 j;
758         int                 k;
759         __u32               ip;
760         __u32               xor;
761         int                 this_netmatch;
762         int                 best_netmatch;
763         int                 best_npeers;
764
765         /* CAVEAT EMPTOR: We do all our interface matching with an
766          * exclusive hold of global lock at IRQ priority.  We're only
767          * expecting to be dealing with small numbers of interfaces, so the
768          * O(n**3)-ness shouldn't matter */
769
770         /* Also note that I'm not going to return more than n_peerips
771          * interfaces, even if I have more myself */
772
773         write_lock_bh(global_lock);
774
775         LASSERT (n_peerips <= LNET_MAX_INTERFACES);
776         LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
777
778         /* Only match interfaces for additional connections
779          * if I have > 1 interface */
780         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
781                 MIN(n_peerips, net->ksnn_ninterfaces);
782
783         for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
784                 /*              ^ yes really... */
785
786                 /* If we have any new interfaces, first tick off all the
787                  * peer IPs that match old interfaces, then choose new
788                  * interfaces to match the remaining peer IPS.
789                  * We don't forget interfaces we've stopped using; we might
790                  * start using them again... */
791
792                 if (i < peer->ksnp_n_passive_ips) {
793                         /* Old interface. */
794                         ip = peer->ksnp_passive_ips[i];
795                         best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
796
797                         /* peer passive ips are kept up to date */
798                         LASSERT(best_iface != NULL);
799                 } else {
800                         /* choose a new interface */
801                         LASSERT (i == peer->ksnp_n_passive_ips);
802
803                         best_iface = NULL;
804                         best_netmatch = 0;
805                         best_npeers = 0;
806
807                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
808                                 iface = &net->ksnn_interfaces[j];
809                                 ip = iface->ksni_ipaddr;
810
811                                 for (k = 0; k < peer->ksnp_n_passive_ips; k++)
812                                         if (peer->ksnp_passive_ips[k] == ip)
813                                                 break;
814
815                                 if (k < peer->ksnp_n_passive_ips) /* using it already */
816                                         continue;
817
818                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
819                                 xor = (ip ^ peerips[k]);
820                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
821
822                                 if (!(best_iface == NULL ||
823                                       best_netmatch < this_netmatch ||
824                                       (best_netmatch == this_netmatch &&
825                                        best_npeers > iface->ksni_npeers)))
826                                         continue;
827
828                                 best_iface = iface;
829                                 best_netmatch = this_netmatch;
830                                 best_npeers = iface->ksni_npeers;
831                         }
832
833                         LASSERT(best_iface != NULL);
834
835                         best_iface->ksni_npeers++;
836                         ip = best_iface->ksni_ipaddr;
837                         peer->ksnp_passive_ips[i] = ip;
838                         peer->ksnp_n_passive_ips = i+1;
839                 }
840
841                 /* mark the best matching peer IP used */
842                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
843                 peerips[j] = 0;
844         }
845
846         /* Overwrite input peer IP addresses */
847         memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
848
849         write_unlock_bh(global_lock);
850
851         return (n_ips);
852 }
853
854 static void
855 ksocknal_create_routes(ksock_peer_t *peer, int port,
856                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
857 {
858         ksock_route_t           *newroute = NULL;
859         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
860         lnet_ni_t               *ni = peer->ksnp_ni;
861         ksock_net_t             *net = ni->ni_data;
862         struct list_head        *rtmp;
863         ksock_route_t           *route;
864         ksock_interface_t       *iface;
865         ksock_interface_t       *best_iface;
866         int                     best_netmatch;
867         int                     this_netmatch;
868         int                     best_nroutes;
869         int                     i;
870         int                     j;
871
872         /* CAVEAT EMPTOR: We do all our interface matching with an
873          * exclusive hold of global lock at IRQ priority.  We're only
874          * expecting to be dealing with small numbers of interfaces, so the
875          * O(n**3)-ness here shouldn't matter */
876
877         write_lock_bh(global_lock);
878
879         if (net->ksnn_ninterfaces < 2) {
880                 /* Only create additional connections
881                  * if I have > 1 interface */
882                 write_unlock_bh(global_lock);
883                 return;
884         }
885
886         LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
887
888         for (i = 0; i < npeer_ipaddrs; i++) {
889                 if (newroute != NULL) {
890                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
891                 } else {
892                         write_unlock_bh(global_lock);
893
894                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
895                         if (newroute == NULL)
896                                 return;
897
898                         write_lock_bh(global_lock);
899                 }
900
901                 if (peer->ksnp_closing) {
902                         /* peer got closed under me */
903                         break;
904                 }
905
906                 /* Already got a route? */
907                 route = NULL;
908                 list_for_each(rtmp, &peer->ksnp_routes) {
909                         route = list_entry(rtmp, ksock_route_t, ksnr_list);
910
911                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
912                                 break;
913
914                         route = NULL;
915                 }
916                 if (route != NULL)
917                         continue;
918
919                 best_iface = NULL;
920                 best_nroutes = 0;
921                 best_netmatch = 0;
922
923                 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
924
925                 /* Select interface to connect from */
926                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
927                         iface = &net->ksnn_interfaces[j];
928
929                         /* Using this interface already? */
930                         list_for_each(rtmp, &peer->ksnp_routes) {
931                                 route = list_entry(rtmp, ksock_route_t,
932                                                    ksnr_list);
933
934                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
935                                         break;
936
937                                 route = NULL;
938                         }
939                         if (route != NULL)
940                                 continue;
941
942                         this_netmatch = (((iface->ksni_ipaddr ^
943                                            newroute->ksnr_ipaddr) &
944                                            iface->ksni_netmask) == 0) ? 1 : 0;
945
946                         if (!(best_iface == NULL ||
947                               best_netmatch < this_netmatch ||
948                               (best_netmatch == this_netmatch &&
949                                best_nroutes > iface->ksni_nroutes)))
950                                 continue;
951
952                         best_iface = iface;
953                         best_netmatch = this_netmatch;
954                         best_nroutes = iface->ksni_nroutes;
955                 }
956
957                 if (best_iface == NULL)
958                         continue;
959
960                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
961                 best_iface->ksni_nroutes++;
962
963                 ksocknal_add_route_locked(peer, newroute);
964                 newroute = NULL;
965         }
966
967         write_unlock_bh(global_lock);
968         if (newroute != NULL)
969                 ksocknal_route_decref(newroute);
970 }
971
972 int
973 ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
974 {
975         ksock_connreq_t *cr;
976         int              rc;
977         __u32            peer_ip;
978         int              peer_port;
979
980         rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
981         LASSERT(rc == 0);               /* we succeeded before */
982
983         LIBCFS_ALLOC(cr, sizeof(*cr));
984         if (cr == NULL) {
985                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
986                                    "%pI4h: memory exhausted\n", &peer_ip);
987                 return -ENOMEM;
988         }
989
990         lnet_ni_addref(ni);
991         cr->ksncr_ni   = ni;
992         cr->ksncr_sock = sock;
993
994         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
995
996         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
997         wake_up(&ksocknal_data.ksnd_connd_waitq);
998
999         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1000         return 0;
1001 }
1002
1003 static int
1004 ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
1005 {
1006         ksock_route_t *route;
1007
1008         list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
1009                 if (route->ksnr_ipaddr == ipaddr)
1010                         return route->ksnr_connecting;
1011         }
1012         return 0;
1013 }
1014
1015 int
1016 ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
1017                      struct socket *sock, int type)
1018 {
1019         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
1020         struct list_head        zombies = LIST_HEAD_INIT(zombies);
1021         lnet_process_id_t       peerid;
1022         struct list_head        *tmp;
1023         __u64              incarnation;
1024         ksock_conn_t      *conn;
1025         ksock_conn_t      *conn2;
1026         ksock_peer_t      *peer = NULL;
1027         ksock_peer_t      *peer2;
1028         ksock_sched_t     *sched;
1029         ksock_hello_msg_t *hello;
1030         int                cpt;
1031         ksock_tx_t        *tx;
1032         ksock_tx_t        *txtmp;
1033         int                rc;
1034         int                active;
1035         char              *warn = NULL;
1036
1037         active = (route != NULL);
1038
1039         LASSERT (active == (type != SOCKLND_CONN_NONE));
1040
1041         LIBCFS_ALLOC(conn, sizeof(*conn));
1042         if (conn == NULL) {
1043                 rc = -ENOMEM;
1044                 goto failed_0;
1045         }
1046
1047         memset (conn, 0, sizeof (*conn));
1048
1049         conn->ksnc_peer = NULL;
1050         conn->ksnc_route = NULL;
1051         conn->ksnc_sock = sock;
1052         /* 2 ref, 1 for conn, another extra ref prevents socket
1053          * being closed before establishment of connection */
1054         atomic_set (&conn->ksnc_sock_refcount, 2);
1055         conn->ksnc_type = type;
1056         ksocknal_lib_save_callback(sock, conn);
1057         atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1058
1059         conn->ksnc_rx_ready = 0;
1060         conn->ksnc_rx_scheduled = 0;
1061
1062         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1063         conn->ksnc_tx_ready = 0;
1064         conn->ksnc_tx_scheduled = 0;
1065         conn->ksnc_tx_carrier = NULL;
1066         atomic_set (&conn->ksnc_tx_nob, 0);
1067
1068         LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
1069                                      kshm_ips[LNET_MAX_INTERFACES]));
1070         if (hello == NULL) {
1071                 rc = -ENOMEM;
1072                 goto failed_1;
1073         }
1074
1075         /* stash conn's local and remote addrs */
1076         rc = ksocknal_lib_get_conn_addrs (conn);
1077         if (rc != 0)
1078                 goto failed_1;
1079
1080         /* Find out/confirm peer's NID and connection type and get the
1081          * vector of interfaces she's willing to let me connect to.
1082          * Passive connections use the listener timeout since the peer sends
1083          * eagerly */
1084
1085         if (active) {
1086                 peer = route->ksnr_peer;
1087                 LASSERT(ni == peer->ksnp_ni);
1088
1089                 /* Active connection sends HELLO eagerly */
1090                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1091                 peerid = peer->ksnp_id;
1092
1093                 write_lock_bh(global_lock);
1094                 conn->ksnc_proto = peer->ksnp_proto;
1095                 write_unlock_bh(global_lock);
1096
1097                 if (conn->ksnc_proto == NULL) {
1098                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1099 #if SOCKNAL_VERSION_DEBUG
1100                          if (*ksocknal_tunables.ksnd_protocol == 2)
1101                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1102                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1103                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1104 #endif
1105                 }
1106
1107                 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1108                 if (rc != 0)
1109                         goto failed_1;
1110         } else {
1111                 peerid.nid = LNET_NID_ANY;
1112                 peerid.pid = LNET_PID_ANY;
1113
1114                 /* Passive, get protocol from peer */
1115                 conn->ksnc_proto = NULL;
1116         }
1117
1118         rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1119         if (rc < 0)
1120                 goto failed_1;
1121
1122         LASSERT (rc == 0 || active);
1123         LASSERT (conn->ksnc_proto != NULL);
1124         LASSERT (peerid.nid != LNET_NID_ANY);
1125
1126         cpt = lnet_cpt_of_nid(peerid.nid);
1127
1128         if (active) {
1129                 ksocknal_peer_addref(peer);
1130                 write_lock_bh(global_lock);
1131         } else {
1132                 rc = ksocknal_create_peer(&peer, ni, peerid);
1133                 if (rc != 0)
1134                         goto failed_1;
1135
1136                 write_lock_bh(global_lock);
1137
1138                 /* called with a ref on ni, so shutdown can't have started */
1139                 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1140
1141                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1142                 if (peer2 == NULL) {
1143                         /* NB this puts an "empty" peer in the peer
1144                          * table (which takes my ref) */
1145                         list_add_tail(&peer->ksnp_list,
1146                                       ksocknal_nid2peerlist(peerid.nid));
1147                 } else {
1148                         ksocknal_peer_decref(peer);
1149                         peer = peer2;
1150                 }
1151
1152                 /* +1 ref for me */
1153                 ksocknal_peer_addref(peer);
1154                 peer->ksnp_accepting++;
1155
1156                 /* Am I already connecting to this guy?  Resolve in
1157                  * favour of higher NID... */
1158                 if (peerid.nid < ni->ni_nid &&
1159                     ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
1160                         rc = EALREADY;
1161                         warn = "connection race resolution";
1162                         goto failed_2;
1163                 }
1164         }
1165
1166         if (peer->ksnp_closing ||
1167             (active && route->ksnr_deleted)) {
1168                 /* peer/route got closed under me */
1169                 rc = -ESTALE;
1170                 warn = "peer/route removed";
1171                 goto failed_2;
1172         }
1173
1174         if (peer->ksnp_proto == NULL) {
1175                 /* Never connected before.
1176                  * NB recv_hello may have returned EPROTO to signal my peer
1177                  * wants a different protocol than the one I asked for.
1178                  */
1179                 LASSERT(list_empty(&peer->ksnp_conns));
1180
1181                 peer->ksnp_proto = conn->ksnc_proto;
1182                 peer->ksnp_incarnation = incarnation;
1183         }
1184
1185         if (peer->ksnp_proto != conn->ksnc_proto ||
1186             peer->ksnp_incarnation != incarnation) {
1187                 /* Peer rebooted or I've got the wrong protocol version */
1188                 ksocknal_close_peer_conns_locked(peer, 0, 0);
1189
1190                 peer->ksnp_proto = NULL;
1191                 rc = ESTALE;
1192                 warn = peer->ksnp_incarnation != incarnation ?
1193                        "peer rebooted" :
1194                        "wrong proto version";
1195                 goto failed_2;
1196         }
1197
1198         switch (rc) {
1199         default:
1200                 LBUG();
1201         case 0:
1202                 break;
1203         case EALREADY:
1204                 warn = "lost conn race";
1205                 goto failed_2;
1206         case EPROTO:
1207                 warn = "retry with different protocol version";
1208                 goto failed_2;
1209         }
1210
1211         /* Refuse to duplicate an existing connection, unless this is a
1212          * loopback connection */
1213         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1214                 list_for_each(tmp, &peer->ksnp_conns) {
1215                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1216
1217                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1218                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1219                             conn2->ksnc_type != conn->ksnc_type)
1220                                 continue;
1221
1222                         /* Reply on a passive connection attempt so the peer
1223                          * realises we're connected. */
1224                         LASSERT (rc == 0);
1225                         if (!active)
1226                                 rc = EALREADY;
1227
1228                         warn = "duplicate";
1229                         goto failed_2;
1230                 }
1231         }
1232
1233         /* If the connection created by this route didn't bind to the IP
1234          * address the route connected to, the connection/route matching
1235          * code below probably isn't going to work. */
1236         if (active &&
1237             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1238                 CERROR("Route %s %pI4h connected to %pI4h\n",
1239                        libcfs_id2str(peer->ksnp_id),
1240                        &route->ksnr_ipaddr,
1241                        &conn->ksnc_ipaddr);
1242         }
1243
1244         /* Search for a route corresponding to the new connection and
1245          * create an association.  This allows incoming connections created
1246          * by routes in my peer to match my own route entries so I don't
1247          * continually create duplicate routes. */
1248         list_for_each(tmp, &peer->ksnp_routes) {
1249                 route = list_entry(tmp, ksock_route_t, ksnr_list);
1250
1251                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1252                         continue;
1253
1254                 ksocknal_associate_route_conn_locked(route, conn);
1255                 break;
1256         }
1257
1258         conn->ksnc_peer = peer;                 /* conn takes my ref on peer */
1259         peer->ksnp_last_alive = cfs_time_current();
1260         peer->ksnp_send_keepalive = 0;
1261         peer->ksnp_error = 0;
1262
1263         sched = ksocknal_choose_scheduler_locked(cpt);
1264         sched->kss_nconns++;
1265         conn->ksnc_scheduler = sched;
1266
1267         conn->ksnc_tx_last_post = cfs_time_current();
1268         /* Set the deadline for the outgoing HELLO to drain */
1269         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1270         conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1271         smp_mb();   /* order with adding to peer's conn list */
1272
1273         list_add(&conn->ksnc_list, &peer->ksnp_conns);
1274         ksocknal_conn_addref(conn);
1275
1276         ksocknal_new_packet(conn, 0);
1277
1278         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1279
1280         /* Take packets blocking for this connection. */
1281         list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
1282                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1283                     SOCKNAL_MATCH_NO)
1284                         continue;
1285
1286                 list_del(&tx->tx_list);
1287                 ksocknal_queue_tx_locked(tx, conn);
1288         }
1289
1290         write_unlock_bh(global_lock);
1291
1292         /* We've now got a new connection.  Any errors from here on are just
1293          * like "normal" comms errors and we close the connection normally.
1294          * NB (a) we still have to send the reply HELLO for passive
1295          *        connections,
1296          *    (b) normal I/O on the conn is blocked until I setup and call the
1297          *        socket callbacks.
1298          */
1299
1300         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1301                " incarnation:%lld sched[%d:%d]\n",
1302                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1303                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1304                conn->ksnc_port, incarnation, cpt,
1305                (int)(sched - &sched->kss_info->ksi_scheds[0]));
1306
1307         if (active) {
1308                 /* additional routes after interface exchange? */
1309                 ksocknal_create_routes(peer, conn->ksnc_port,
1310                                        hello->kshm_ips, hello->kshm_nips);
1311         } else {
1312                 hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
1313                                                        hello->kshm_nips);
1314                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1315         }
1316
1317         LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1318                                     kshm_ips[LNET_MAX_INTERFACES]));
1319
1320         /* setup the socket AFTER I've received hello (it disables
1321          * SO_LINGER).  I might call back to the acceptor who may want
1322          * to send a protocol version response and then close the
1323          * socket; this ensures the socket only tears down after the
1324          * response has been sent. */
1325         if (rc == 0)
1326                 rc = ksocknal_lib_setup_sock(sock);
1327
1328         write_lock_bh(global_lock);
1329
1330         /* NB my callbacks block while I hold ksnd_global_lock */
1331         ksocknal_lib_set_callback(sock, conn);
1332
1333         if (!active)
1334                 peer->ksnp_accepting--;
1335
1336         write_unlock_bh(global_lock);
1337
1338         if (rc != 0) {
1339                 write_lock_bh(global_lock);
1340                 if (!conn->ksnc_closing) {
1341                         /* could be closed by another thread */
1342                         ksocknal_close_conn_locked(conn, rc);
1343                 }
1344                 write_unlock_bh(global_lock);
1345         } else if (ksocknal_connsock_addref(conn) == 0) {
1346                 /* Allow I/O to proceed. */
1347                 ksocknal_read_callback(conn);
1348                 ksocknal_write_callback(conn);
1349                 ksocknal_connsock_decref(conn);
1350         }
1351
1352         ksocknal_connsock_decref(conn);
1353         ksocknal_conn_decref(conn);
1354         return rc;
1355
1356 failed_2:
1357         if (!peer->ksnp_closing &&
1358             list_empty(&peer->ksnp_conns) &&
1359             list_empty(&peer->ksnp_routes)) {
1360                 list_add(&zombies, &peer->ksnp_tx_queue);
1361                 list_del_init(&peer->ksnp_tx_queue);
1362                 ksocknal_unlink_peer_locked(peer);
1363         }
1364
1365         write_unlock_bh(global_lock);
1366
1367         if (warn != NULL) {
1368                 if (rc < 0)
1369                         CERROR("Not creating conn %s type %d: %s\n",
1370                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1371                 else
1372                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1373                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1374         }
1375
1376         if (!active) {
1377                 if (rc > 0) {
1378                         /* Request retry by replying with CONN_NONE
1379                          * ksnc_proto has been set already */
1380                         conn->ksnc_type = SOCKLND_CONN_NONE;
1381                         hello->kshm_nips = 0;
1382                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1383                 }
1384
1385                 write_lock_bh(global_lock);
1386                 peer->ksnp_accepting--;
1387                 write_unlock_bh(global_lock);
1388         }
1389
1390         ksocknal_txlist_done(ni, &zombies, 1);
1391         ksocknal_peer_decref(peer);
1392
1393  failed_1:
1394         if (hello != NULL)
1395                 LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1396                                             kshm_ips[LNET_MAX_INTERFACES]));
1397
1398         LIBCFS_FREE(conn, sizeof(*conn));
1399
1400 failed_0:
1401         sock_release(sock);
1402         return rc;
1403 }
1404
1405 void
1406 ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1407 {
1408         /* This just does the immmediate housekeeping, and queues the
1409          * connection for the reaper to terminate.
1410          * Caller holds ksnd_global_lock exclusively in irq context */
1411         ksock_peer_t      *peer = conn->ksnc_peer;
1412         ksock_route_t     *route;
1413         ksock_conn_t      *conn2;
1414         struct list_head  *tmp;
1415
1416         LASSERT(peer->ksnp_error == 0);
1417         LASSERT(!conn->ksnc_closing);
1418         conn->ksnc_closing = 1;
1419
1420         /* ksnd_deathrow_conns takes over peer's ref */
1421         list_del(&conn->ksnc_list);
1422
1423         route = conn->ksnc_route;
1424         if (route != NULL) {
1425                 /* dissociate conn from route... */
1426                 LASSERT(!route->ksnr_deleted);
1427                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1428
1429                 conn2 = NULL;
1430                 list_for_each(tmp, &peer->ksnp_conns) {
1431                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1432
1433                         if (conn2->ksnc_route == route &&
1434                             conn2->ksnc_type == conn->ksnc_type)
1435                                 break;
1436
1437                         conn2 = NULL;
1438                 }
1439                 if (conn2 == NULL)
1440                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1441
1442                 conn->ksnc_route = NULL;
1443
1444 #if 0           /* irrelevent with only eager routes */
1445                 /* make route least favourite */
1446                 list_del(&route->ksnr_list);
1447                 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
1448 #endif
1449                 ksocknal_route_decref(route);   /* drop conn's ref on route */
1450         }
1451
1452         if (list_empty(&peer->ksnp_conns)) {
1453                 /* No more connections to this peer */
1454
1455                 if (!list_empty(&peer->ksnp_tx_queue)) {
1456                                 ksock_tx_t *tx;
1457
1458                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1459
1460                         /* throw them to the last connection...,
1461                          * these TXs will be send to /dev/null by scheduler */
1462                         list_for_each_entry(tx, &peer->ksnp_tx_queue,
1463                                             tx_list)
1464                                 ksocknal_tx_prep(conn, tx);
1465
1466                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1467                         list_splice_init(&peer->ksnp_tx_queue,
1468                                          &conn->ksnc_tx_queue);
1469                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1470                 }
1471
1472                 /* renegotiate protocol version */
1473                 peer->ksnp_proto = NULL;
1474                 /* stash last conn close reason */
1475                 peer->ksnp_error = error;
1476
1477                 if (list_empty(&peer->ksnp_routes)) {
1478                         /* I've just closed last conn belonging to a
1479                          * peer with no routes to it */
1480                         ksocknal_unlink_peer_locked(peer);
1481                 }
1482         }
1483
1484         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1485
1486         list_add_tail(&conn->ksnc_list,
1487                       &ksocknal_data.ksnd_deathrow_conns);
1488         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1489
1490         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1491 }
1492
1493 void
1494 ksocknal_peer_failed (ksock_peer_t *peer)
1495 {
1496         int        notify = 0;
1497         cfs_time_t last_alive = 0;
1498
1499         /* There has been a connection failure or comms error; but I'll only
1500          * tell LNET I think the peer is dead if it's to another kernel and
1501          * there are no connections or connection attempts in existence. */
1502
1503         read_lock(&ksocknal_data.ksnd_global_lock);
1504
1505         if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1506              list_empty(&peer->ksnp_conns) &&
1507              peer->ksnp_accepting == 0 &&
1508              ksocknal_find_connecting_route_locked(peer) == NULL) {
1509                 notify = 1;
1510                 last_alive = peer->ksnp_last_alive;
1511         }
1512
1513         read_unlock(&ksocknal_data.ksnd_global_lock);
1514
1515         if (notify)
1516                 lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
1517                             last_alive);
1518 }
1519
1520 void
1521 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1522 {
1523         ksock_peer_t     *peer = conn->ksnc_peer;
1524         ksock_tx_t       *tx;
1525         ksock_tx_t       *tmp;
1526         struct list_head  zlist = LIST_HEAD_INIT(zlist);
1527
1528         /* NB safe to finalize TXs because closing of socket will
1529          * abort all buffered data */
1530         LASSERT(conn->ksnc_sock == NULL);
1531
1532         spin_lock(&peer->ksnp_lock);
1533
1534         list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
1535                 if (tx->tx_conn != conn)
1536                         continue;
1537
1538                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1539
1540                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1541                 tx->tx_zc_aborted = 1;  /* mark it as not-acked */
1542                 list_del(&tx->tx_zc_list);
1543                 list_add(&tx->tx_zc_list, &zlist);
1544         }
1545
1546         spin_unlock(&peer->ksnp_lock);
1547
1548         while (!list_empty(&zlist)) {
1549                 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1550
1551                 list_del(&tx->tx_zc_list);
1552                 ksocknal_tx_decref(tx);
1553         }
1554 }
1555
1556 void
1557 ksocknal_terminate_conn(ksock_conn_t *conn)
1558 {
1559         /* This gets called by the reaper (guaranteed thread context) to
1560          * disengage the socket from its callbacks and close it.
1561          * ksnc_refcount will eventually hit zero, and then the reaper will
1562          * destroy it. */
1563         ksock_peer_t     *peer = conn->ksnc_peer;
1564         ksock_sched_t    *sched = conn->ksnc_scheduler;
1565         int               failed = 0;
1566
1567         LASSERT(conn->ksnc_closing);
1568
1569         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1570         spin_lock_bh(&sched->kss_lock);
1571
1572         /* a closing conn is always ready to tx */
1573         conn->ksnc_tx_ready = 1;
1574
1575         if (!conn->ksnc_tx_scheduled &&
1576             !list_empty(&conn->ksnc_tx_queue)) {
1577                 list_add_tail(&conn->ksnc_tx_list,
1578                                &sched->kss_tx_conns);
1579                 conn->ksnc_tx_scheduled = 1;
1580                 /* extra ref for scheduler */
1581                 ksocknal_conn_addref(conn);
1582
1583                 wake_up (&sched->kss_waitq);
1584         }
1585
1586         spin_unlock_bh(&sched->kss_lock);
1587
1588         /* serialise with callbacks */
1589         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1590
1591         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1592
1593         /* OK, so this conn may not be completely disengaged from its
1594          * scheduler yet, but it _has_ committed to terminate... */
1595         conn->ksnc_scheduler->kss_nconns--;
1596
1597         if (peer->ksnp_error != 0) {
1598                 /* peer's last conn closed in error */
1599                 LASSERT(list_empty(&peer->ksnp_conns));
1600                 failed = 1;
1601                 peer->ksnp_error = 0;     /* avoid multiple notifications */
1602         }
1603
1604         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1605
1606         if (failed)
1607                 ksocknal_peer_failed(peer);
1608
1609         /* The socket is closed on the final put; either here, or in
1610          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1611          * when the connection was established, this will close the socket
1612          * immediately, aborting anything buffered in it. Any hung
1613          * zero-copy transmits will therefore complete in finite time. */
1614         ksocknal_connsock_decref(conn);
1615 }
1616
1617 void
1618 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1619 {
1620         /* Queue the conn for the reaper to destroy */
1621
1622         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1623         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1624
1625         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1626         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1627
1628         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1629 }
1630
1631 void
1632 ksocknal_destroy_conn (ksock_conn_t *conn)
1633 {
1634         cfs_time_t      last_rcv;
1635
1636         /* Final coup-de-grace of the reaper */
1637         CDEBUG (D_NET, "connection %p\n", conn);
1638
1639         LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1640         LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1641         LASSERT (conn->ksnc_sock == NULL);
1642         LASSERT (conn->ksnc_route == NULL);
1643         LASSERT (!conn->ksnc_tx_scheduled);
1644         LASSERT (!conn->ksnc_rx_scheduled);
1645         LASSERT(list_empty(&conn->ksnc_tx_queue));
1646
1647         /* complete current receive if any */
1648         switch (conn->ksnc_rx_state) {
1649         case SOCKNAL_RX_LNET_PAYLOAD:
1650                 last_rcv = conn->ksnc_rx_deadline -
1651                            cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1652                 CERROR("Completing partial receive from %s[%d], "
1653                        "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1654                        "last alive is %ld secs ago\n",
1655                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1656                        &conn->ksnc_ipaddr, conn->ksnc_port,
1657                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1658                        cfs_duration_sec(cfs_time_sub(cfs_time_current(),
1659                                         last_rcv)));
1660                 lnet_finalize (conn->ksnc_peer->ksnp_ni,
1661                                conn->ksnc_cookie, -EIO);
1662                 break;
1663         case SOCKNAL_RX_LNET_HEADER:
1664                 if (conn->ksnc_rx_started)
1665                         CERROR("Incomplete receive of lnet header from %s, "
1666                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1667                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1668                                &conn->ksnc_ipaddr, conn->ksnc_port,
1669                                conn->ksnc_proto->pro_version);
1670                 break;
1671         case SOCKNAL_RX_KSM_HEADER:
1672                 if (conn->ksnc_rx_started)
1673                         CERROR("Incomplete receive of ksock message from %s, "
1674                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1675                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1676                                &conn->ksnc_ipaddr, conn->ksnc_port,
1677                                conn->ksnc_proto->pro_version);
1678                 break;
1679         case SOCKNAL_RX_SLOP:
1680                 if (conn->ksnc_rx_started)
1681                         CERROR("Incomplete receive of slops from %s, "
1682                                "ip %pI4h:%d, with error\n",
1683                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1684                                &conn->ksnc_ipaddr, conn->ksnc_port);
1685                break;
1686         default:
1687                 LBUG ();
1688                 break;
1689         }
1690
1691         ksocknal_peer_decref(conn->ksnc_peer);
1692
1693         LIBCFS_FREE (conn, sizeof (*conn));
1694 }
1695
1696 int
1697 ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
1698 {
1699         ksock_conn_t       *conn;
1700         struct list_head         *ctmp;
1701         struct list_head         *cnxt;
1702         int                 count = 0;
1703
1704         list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
1705                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1706
1707                 if (ipaddr == 0 ||
1708                     conn->ksnc_ipaddr == ipaddr) {
1709                         count++;
1710                         ksocknal_close_conn_locked (conn, why);
1711                 }
1712         }
1713
1714         return (count);
1715 }
1716
1717 int
1718 ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1719 {
1720         ksock_peer_t     *peer = conn->ksnc_peer;
1721         __u32             ipaddr = conn->ksnc_ipaddr;
1722         int               count;
1723
1724         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1725
1726         count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
1727
1728         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1729
1730         return (count);
1731 }
1732
1733 int
1734 ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
1735 {
1736         ksock_peer_t       *peer;
1737         struct list_head         *ptmp;
1738         struct list_head         *pnxt;
1739         int                 lo;
1740         int                 hi;
1741         int                 i;
1742         int                 count = 0;
1743
1744         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1745
1746         if (id.nid != LNET_NID_ANY)
1747                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1748         else {
1749                 lo = 0;
1750                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1751         }
1752
1753         for (i = lo; i <= hi; i++) {
1754                 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1755
1756                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
1757
1758                         if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
1759                               (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
1760                                 continue;
1761
1762                         count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0);
1763                 }
1764         }
1765
1766         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1767
1768         /* wildcards always succeed */
1769         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1770                 return (0);
1771
1772         return (count == 0 ? -ENOENT : 0);
1773 }
1774
1775 void
1776 ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
1777 {
1778         /* The router is telling me she's been notified of a change in
1779          * gateway state.... */
1780         lnet_process_id_t  id = {0};
1781
1782         id.nid = gw_nid;
1783         id.pid = LNET_PID_ANY;
1784
1785         CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1786                 alive ? "up" : "down");
1787
1788         if (!alive) {
1789                 /* If the gateway crashed, close all open connections... */
1790                 ksocknal_close_matching_conns (id, 0);
1791                 return;
1792         }
1793
1794         /* ...otherwise do nothing.  We can only establish new connections
1795          * if we have autroutes, and these connect on demand. */
1796 }
1797
1798 void
1799 ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1800 {
1801         int                connect = 1;
1802         cfs_time_t         last_alive = 0;
1803         cfs_time_t         now = cfs_time_current();
1804         ksock_peer_t      *peer = NULL;
1805         rwlock_t                *glock = &ksocknal_data.ksnd_global_lock;
1806         lnet_process_id_t  id = {
1807                 .nid = nid,
1808                 .pid = LNET_PID_LUSTRE,
1809         };
1810
1811         read_lock(glock);
1812
1813         peer = ksocknal_find_peer_locked(ni, id);
1814         if (peer != NULL) {
1815                 struct list_head       *tmp;
1816                 ksock_conn_t     *conn;
1817                 int               bufnob;
1818
1819                 list_for_each(tmp, &peer->ksnp_conns) {
1820                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1821                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1822
1823                         if (bufnob < conn->ksnc_tx_bufnob) {
1824                                 /* something got ACKed */
1825                                 conn->ksnc_tx_deadline =
1826                                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1827                                 peer->ksnp_last_alive = now;
1828                                 conn->ksnc_tx_bufnob = bufnob;
1829                         }
1830                 }
1831
1832                 last_alive = peer->ksnp_last_alive;
1833                 if (ksocknal_find_connectable_route_locked(peer) == NULL)
1834                         connect = 0;
1835         }
1836
1837         read_unlock(glock);
1838
1839         if (last_alive != 0)
1840                 *when = last_alive;
1841
1842         CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
1843                libcfs_nid2str(nid), peer,
1844                last_alive ? cfs_duration_sec(now - last_alive) : -1,
1845                connect);
1846
1847         if (!connect)
1848                 return;
1849
1850         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1851
1852         write_lock_bh(glock);
1853
1854         peer = ksocknal_find_peer_locked(ni, id);
1855         if (peer != NULL)
1856                 ksocknal_launch_all_connections_locked(peer);
1857
1858         write_unlock_bh(glock);
1859         return;
1860 }
1861
1862 static void
1863 ksocknal_push_peer (ksock_peer_t *peer)
1864 {
1865         int               index;
1866         int               i;
1867         struct list_head       *tmp;
1868         ksock_conn_t     *conn;
1869
1870         for (index = 0; ; index++) {
1871                 read_lock(&ksocknal_data.ksnd_global_lock);
1872
1873                 i = 0;
1874                 conn = NULL;
1875
1876                 list_for_each(tmp, &peer->ksnp_conns) {
1877                         if (i++ == index) {
1878                                 conn = list_entry(tmp, ksock_conn_t,
1879                                                        ksnc_list);
1880                                 ksocknal_conn_addref(conn);
1881                                 break;
1882                         }
1883                 }
1884
1885                 read_unlock(&ksocknal_data.ksnd_global_lock);
1886
1887                 if (conn == NULL)
1888                         break;
1889
1890                 ksocknal_lib_push_conn (conn);
1891                 ksocknal_conn_decref(conn);
1892         }
1893 }
1894
1895 static int
1896 ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
1897 {
1898         struct list_head *start;
1899         struct list_head *end;
1900         struct list_head *tmp;
1901         int               rc = -ENOENT;
1902         unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
1903
1904         if (id.nid == LNET_NID_ANY) {
1905                 start = &ksocknal_data.ksnd_peers[0];
1906                 end = &ksocknal_data.ksnd_peers[hsize - 1];
1907         } else {
1908                 start = end = ksocknal_nid2peerlist(id.nid);
1909         }
1910
1911         for (tmp = start; tmp <= end; tmp++) {
1912                 int     peer_off; /* searching offset in peer hash table */
1913
1914                 for (peer_off = 0; ; peer_off++) {
1915                         ksock_peer_t *peer;
1916                         int           i = 0;
1917
1918                         read_lock(&ksocknal_data.ksnd_global_lock);
1919                         list_for_each_entry(peer, tmp, ksnp_list) {
1920                                 if (!((id.nid == LNET_NID_ANY ||
1921                                        id.nid == peer->ksnp_id.nid) &&
1922                                       (id.pid == LNET_PID_ANY ||
1923                                        id.pid == peer->ksnp_id.pid)))
1924                                         continue;
1925
1926                                 if (i++ == peer_off) {
1927                                         ksocknal_peer_addref(peer);
1928                                         break;
1929                                 }
1930                         }
1931                         read_unlock(&ksocknal_data.ksnd_global_lock);
1932
1933                         if (i == 0) /* no match */
1934                                 break;
1935
1936                         rc = 0;
1937                         ksocknal_push_peer(peer);
1938                         ksocknal_peer_decref(peer);
1939                 }
1940         }
1941         return rc;
1942 }
1943
1944 static int
1945 ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1946 {
1947         ksock_net_t       *net = ni->ni_data;
1948         ksock_interface_t *iface;
1949         int                rc;
1950         int                i;
1951         int                j;
1952         struct list_head        *ptmp;
1953         ksock_peer_t      *peer;
1954         struct list_head        *rtmp;
1955         ksock_route_t     *route;
1956
1957         if (ipaddress == 0 ||
1958             netmask == 0)
1959                 return (-EINVAL);
1960
1961         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1962
1963         iface = ksocknal_ip2iface(ni, ipaddress);
1964         if (iface != NULL) {
1965                 /* silently ignore dups */
1966                 rc = 0;
1967         } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1968                 rc = -ENOSPC;
1969         } else {
1970                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1971
1972                 iface->ksni_ipaddr = ipaddress;
1973                 iface->ksni_netmask = netmask;
1974                 iface->ksni_nroutes = 0;
1975                 iface->ksni_npeers = 0;
1976
1977                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1978                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1979                                 peer = list_entry(ptmp, ksock_peer_t,
1980                                                       ksnp_list);
1981
1982                                 for (j = 0; j < peer->ksnp_n_passive_ips; j++)
1983                                         if (peer->ksnp_passive_ips[j] == ipaddress)
1984                                                 iface->ksni_npeers++;
1985
1986                                 list_for_each(rtmp, &peer->ksnp_routes) {
1987                                         route = list_entry(rtmp,
1988                                                                ksock_route_t,
1989                                                                ksnr_list);
1990
1991                                         if (route->ksnr_myipaddr == ipaddress)
1992                                                 iface->ksni_nroutes++;
1993                                 }
1994                         }
1995                 }
1996
1997                 rc = 0;
1998                 /* NB only new connections will pay attention to the new interface! */
1999         }
2000
2001         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2002
2003         return (rc);
2004 }
2005
2006 static void
2007 ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
2008 {
2009         struct list_head         *tmp;
2010         struct list_head         *nxt;
2011         ksock_route_t      *route;
2012         ksock_conn_t       *conn;
2013         int                 i;
2014         int                 j;
2015
2016         for (i = 0; i < peer->ksnp_n_passive_ips; i++)
2017                 if (peer->ksnp_passive_ips[i] == ipaddr) {
2018                         for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
2019                                 peer->ksnp_passive_ips[j-1] =
2020                                         peer->ksnp_passive_ips[j];
2021                         peer->ksnp_n_passive_ips--;
2022                         break;
2023                 }
2024
2025         list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
2026                 route = list_entry(tmp, ksock_route_t, ksnr_list);
2027
2028                 if (route->ksnr_myipaddr != ipaddr)
2029                         continue;
2030
2031                 if (route->ksnr_share_count != 0) {
2032                         /* Manually created; keep, but unbind */
2033                         route->ksnr_myipaddr = 0;
2034                 } else {
2035                         ksocknal_del_route_locked(route);
2036                 }
2037         }
2038
2039         list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
2040                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2041
2042                 if (conn->ksnc_myipaddr == ipaddr)
2043                         ksocknal_close_conn_locked (conn, 0);
2044         }
2045 }
2046
2047 static int
2048 ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2049 {
2050         ksock_net_t       *net = ni->ni_data;
2051         int                rc = -ENOENT;
2052         struct list_head        *tmp;
2053         struct list_head        *nxt;
2054         ksock_peer_t      *peer;
2055         __u32              this_ip;
2056         int                i;
2057         int                j;
2058
2059         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2060
2061         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2062                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2063
2064                 if (!(ipaddress == 0 ||
2065                       ipaddress == this_ip))
2066                         continue;
2067
2068                 rc = 0;
2069
2070                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2071                         net->ksnn_interfaces[j-1] =
2072                                 net->ksnn_interfaces[j];
2073
2074                 net->ksnn_ninterfaces--;
2075
2076                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2077                         list_for_each_safe(tmp, nxt,
2078                                                &ksocknal_data.ksnd_peers[j]) {
2079                                 peer = list_entry(tmp, ksock_peer_t,
2080                                                       ksnp_list);
2081
2082                                 if (peer->ksnp_ni != ni)
2083                                         continue;
2084
2085                                 ksocknal_peer_del_interface_locked(peer, this_ip);
2086                         }
2087                 }
2088         }
2089
2090         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2091
2092         return (rc);
2093 }
2094
2095 int
2096 ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2097 {
2098         lnet_process_id_t id = {0};
2099         struct libcfs_ioctl_data *data = arg;
2100         int rc;
2101
2102         switch(cmd) {
2103         case IOC_LIBCFS_GET_INTERFACE: {
2104                 ksock_net_t       *net = ni->ni_data;
2105                 ksock_interface_t *iface;
2106
2107                 read_lock(&ksocknal_data.ksnd_global_lock);
2108
2109                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2110                         rc = -ENOENT;
2111                 } else {
2112                         rc = 0;
2113                         iface = &net->ksnn_interfaces[data->ioc_count];
2114
2115                         data->ioc_u32[0] = iface->ksni_ipaddr;
2116                         data->ioc_u32[1] = iface->ksni_netmask;
2117                         data->ioc_u32[2] = iface->ksni_npeers;
2118                         data->ioc_u32[3] = iface->ksni_nroutes;
2119                 }
2120
2121                 read_unlock(&ksocknal_data.ksnd_global_lock);
2122                 return rc;
2123         }
2124
2125         case IOC_LIBCFS_ADD_INTERFACE:
2126                 return ksocknal_add_interface(ni,
2127                                               data->ioc_u32[0], /* IP address */
2128                                               data->ioc_u32[1]); /* net mask */
2129
2130         case IOC_LIBCFS_DEL_INTERFACE:
2131                 return ksocknal_del_interface(ni,
2132                                               data->ioc_u32[0]); /* IP address */
2133
2134         case IOC_LIBCFS_GET_PEER: {
2135                 __u32            myip = 0;
2136                 __u32            ip = 0;
2137                 int              port = 0;
2138                 int              conn_count = 0;
2139                 int              share_count = 0;
2140
2141                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2142                                             &id, &myip, &ip, &port,
2143                                             &conn_count,  &share_count);
2144                 if (rc != 0)
2145                         return rc;
2146
2147                 data->ioc_nid    = id.nid;
2148                 data->ioc_count  = share_count;
2149                 data->ioc_u32[0] = ip;
2150                 data->ioc_u32[1] = port;
2151                 data->ioc_u32[2] = myip;
2152                 data->ioc_u32[3] = conn_count;
2153                 data->ioc_u32[4] = id.pid;
2154                 return 0;
2155         }
2156
2157         case IOC_LIBCFS_ADD_PEER:
2158                 id.nid = data->ioc_nid;
2159                 id.pid = LNET_PID_LUSTRE;
2160                 return ksocknal_add_peer (ni, id,
2161                                           data->ioc_u32[0], /* IP */
2162                                           data->ioc_u32[1]); /* port */
2163
2164         case IOC_LIBCFS_DEL_PEER:
2165                 id.nid = data->ioc_nid;
2166                 id.pid = LNET_PID_ANY;
2167                 return ksocknal_del_peer (ni, id,
2168                                           data->ioc_u32[0]); /* IP */
2169
2170         case IOC_LIBCFS_GET_CONN: {
2171                 int           txmem;
2172                 int           rxmem;
2173                 int           nagle;
2174                 ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2175
2176                 if (conn == NULL)
2177                         return -ENOENT;
2178
2179                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2180
2181                 data->ioc_count  = txmem;
2182                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2183                 data->ioc_flags  = nagle;
2184                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2185                 data->ioc_u32[1] = conn->ksnc_port;
2186                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2187                 data->ioc_u32[3] = conn->ksnc_type;
2188                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2189                 data->ioc_u32[5] = rxmem;
2190                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2191                 ksocknal_conn_decref(conn);
2192                 return 0;
2193         }
2194
2195         case IOC_LIBCFS_CLOSE_CONNECTION:
2196                 id.nid = data->ioc_nid;
2197                 id.pid = LNET_PID_ANY;
2198                 return ksocknal_close_matching_conns (id,
2199                                                       data->ioc_u32[0]);
2200
2201         case IOC_LIBCFS_REGISTER_MYNID:
2202                 /* Ignore if this is a noop */
2203                 if (data->ioc_nid == ni->ni_nid)
2204                         return 0;
2205
2206                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2207                        libcfs_nid2str(data->ioc_nid),
2208                        libcfs_nid2str(ni->ni_nid));
2209                 return -EINVAL;
2210
2211         case IOC_LIBCFS_PUSH_CONNECTION:
2212                 id.nid = data->ioc_nid;
2213                 id.pid = LNET_PID_ANY;
2214                 return ksocknal_push(ni, id);
2215
2216         default:
2217                 return -EINVAL;
2218         }
2219         /* not reached */
2220 }
2221
2222 static void
2223 ksocknal_free_buffers (void)
2224 {
2225         LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2226
2227         if (ksocknal_data.ksnd_sched_info != NULL) {
2228                 struct ksock_sched_info *info;
2229                 int                     i;
2230
2231                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2232                         if (info->ksi_scheds != NULL) {
2233                                 LIBCFS_FREE(info->ksi_scheds,
2234                                             info->ksi_nthreads_max *
2235                                             sizeof(info->ksi_scheds[0]));
2236                         }
2237                 }
2238                 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2239         }
2240
2241         LIBCFS_FREE (ksocknal_data.ksnd_peers,
2242                      sizeof(struct list_head) *
2243                      ksocknal_data.ksnd_peer_hash_size);
2244
2245         spin_lock(&ksocknal_data.ksnd_tx_lock);
2246
2247         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2248                 struct list_head        zlist;
2249                 ksock_tx_t      *tx;
2250
2251                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2252                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2253                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2254
2255                 while (!list_empty(&zlist)) {
2256                         tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2257                         list_del(&tx->tx_list);
2258                         LIBCFS_FREE(tx, tx->tx_desc_size);
2259                 }
2260         } else {
2261                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2262         }
2263 }
2264
2265 static void
2266 ksocknal_base_shutdown(void)
2267 {
2268         struct ksock_sched_info *info;
2269         ksock_sched_t           *sched;
2270         int                     i;
2271         int                     j;
2272
2273         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2274                atomic_read (&libcfs_kmemory));
2275         LASSERT (ksocknal_data.ksnd_nnets == 0);
2276
2277         switch (ksocknal_data.ksnd_init) {
2278         default:
2279                 LASSERT (0);
2280
2281         case SOCKNAL_INIT_ALL:
2282         case SOCKNAL_INIT_DATA:
2283                 LASSERT (ksocknal_data.ksnd_peers != NULL);
2284                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2285                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2286                 }
2287
2288                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2289                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2290                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2291                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2292                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2293
2294                 if (ksocknal_data.ksnd_sched_info != NULL) {
2295                         cfs_percpt_for_each(info, i,
2296                                             ksocknal_data.ksnd_sched_info) {
2297                                 if (info->ksi_scheds == NULL)
2298                                         continue;
2299
2300                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2301
2302                                         sched = &info->ksi_scheds[j];
2303                                         LASSERT(list_empty(&sched->\
2304                                                                kss_tx_conns));
2305                                         LASSERT(list_empty(&sched->\
2306                                                                kss_rx_conns));
2307                                         LASSERT(list_empty(&sched-> \
2308                                                   kss_zombie_noop_txs));
2309                                         LASSERT(sched->kss_nconns == 0);
2310                                 }
2311                         }
2312                 }
2313
2314                 /* flag threads to terminate; wake and wait for them to die */
2315                 ksocknal_data.ksnd_shuttingdown = 1;
2316                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2317                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2318
2319                 if (ksocknal_data.ksnd_sched_info != NULL) {
2320                         cfs_percpt_for_each(info, i,
2321                                             ksocknal_data.ksnd_sched_info) {
2322                                 if (info->ksi_scheds == NULL)
2323                                         continue;
2324
2325                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2326                                         sched = &info->ksi_scheds[j];
2327                                         wake_up_all(&sched->kss_waitq);
2328                                 }
2329                         }
2330                 }
2331
2332                 i = 4;
2333                 read_lock(&ksocknal_data.ksnd_global_lock);
2334                 while (ksocknal_data.ksnd_nthreads != 0) {
2335                         i++;
2336                         /* power of 2? */
2337                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2338                                 "waiting for %d threads to terminate\n",
2339                                 ksocknal_data.ksnd_nthreads);
2340                         read_unlock(&ksocknal_data.ksnd_global_lock);
2341                         set_current_state(TASK_UNINTERRUPTIBLE);
2342                         schedule_timeout(cfs_time_seconds(1));
2343                         read_lock(&ksocknal_data.ksnd_global_lock);
2344                 }
2345                 read_unlock(&ksocknal_data.ksnd_global_lock);
2346
2347                 ksocknal_free_buffers();
2348
2349                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2350                 break;
2351         }
2352
2353         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2354                atomic_read (&libcfs_kmemory));
2355
2356         module_put(THIS_MODULE);
2357 }
2358
2359 static __u64 ksocknal_new_incarnation(void)
2360 {
2361         struct timeval tv;
2362
2363         /* The incarnation number is the time this module loaded and it
2364          * identifies this particular instance of the socknal.  Hopefully
2365          * we won't be able to reboot more frequently than 1MHz for the
2366          * forseeable future :) */
2367
2368         do_gettimeofday(&tv);
2369
2370         return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2371 }
2372
2373 static int
2374 ksocknal_base_startup(void)
2375 {
2376         struct ksock_sched_info *info;
2377         int                     rc;
2378         int                     i;
2379
2380         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2381         LASSERT (ksocknal_data.ksnd_nnets == 0);
2382
2383         memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2384
2385         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2386         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2387                      sizeof(struct list_head) *
2388                      ksocknal_data.ksnd_peer_hash_size);
2389         if (ksocknal_data.ksnd_peers == NULL)
2390                 return -ENOMEM;
2391
2392         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2393                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2394
2395         rwlock_init(&ksocknal_data.ksnd_global_lock);
2396         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2397
2398         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2399         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2400         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2401         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2402         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2403
2404         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2405         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2406         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2407         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2408
2409         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2410         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2411
2412         /* NB memset above zeros whole of ksocknal_data */
2413
2414         /* flag lists/ptrs/locks initialised */
2415         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2416         try_module_get(THIS_MODULE);
2417
2418         ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2419                                                          sizeof(*info));
2420         if (ksocknal_data.ksnd_sched_info == NULL)
2421                 goto failed;
2422
2423         cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2424                 ksock_sched_t   *sched;
2425                 int             nthrs;
2426
2427                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2428                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2429                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2430                 } else {
2431                         /* max to half of CPUs, assume another half should be
2432                          * reserved for upper layer modules */
2433                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2434                 }
2435
2436                 info->ksi_nthreads_max = nthrs;
2437                 info->ksi_cpt = i;
2438
2439                 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2440                                  info->ksi_nthreads_max * sizeof(*sched));
2441                 if (info->ksi_scheds == NULL)
2442                         goto failed;
2443
2444                 for (; nthrs > 0; nthrs--) {
2445                         sched = &info->ksi_scheds[nthrs - 1];
2446
2447                         sched->kss_info = info;
2448                         spin_lock_init(&sched->kss_lock);
2449                         INIT_LIST_HEAD(&sched->kss_rx_conns);
2450                         INIT_LIST_HEAD(&sched->kss_tx_conns);
2451                         INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2452                         init_waitqueue_head(&sched->kss_waitq);
2453                 }
2454         }
2455
2456         ksocknal_data.ksnd_connd_starting         = 0;
2457         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2458         ksocknal_data.ksnd_connd_starting_stamp   = cfs_time_current_sec();
2459         /* must have at least 2 connds to remain responsive to accepts while
2460          * connecting */
2461         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2462                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2463
2464         if (*ksocknal_tunables.ksnd_nconnds_max <
2465             *ksocknal_tunables.ksnd_nconnds) {
2466                 ksocknal_tunables.ksnd_nconnds_max =
2467                         ksocknal_tunables.ksnd_nconnds;
2468         }
2469
2470         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2471                 char name[16];
2472                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2473                 ksocknal_data.ksnd_connd_starting++;
2474                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2475
2476
2477                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2478                 rc = ksocknal_thread_start(ksocknal_connd,
2479                                            (void *)((ulong_ptr_t)i), name);
2480                 if (rc != 0) {
2481                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2482                         ksocknal_data.ksnd_connd_starting--;
2483                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2484                         CERROR("Can't spawn socknal connd: %d\n", rc);
2485                         goto failed;
2486                 }
2487         }
2488
2489         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2490         if (rc != 0) {
2491                 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2492                 goto failed;
2493         }
2494
2495         /* flag everything initialised */
2496         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2497
2498         return 0;
2499
2500  failed:
2501         ksocknal_base_shutdown();
2502         return -ENETDOWN;
2503 }
2504
2505 static void
2506 ksocknal_debug_peerhash (lnet_ni_t *ni)
2507 {
2508         ksock_peer_t    *peer = NULL;
2509         struct list_head        *tmp;
2510         int             i;
2511
2512         read_lock(&ksocknal_data.ksnd_global_lock);
2513
2514         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2515                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2516                         peer = list_entry(tmp, ksock_peer_t, ksnp_list);
2517
2518                         if (peer->ksnp_ni == ni) break;
2519
2520                         peer = NULL;
2521                 }
2522         }
2523
2524         if (peer != NULL) {
2525                 ksock_route_t *route;
2526                 ksock_conn_t  *conn;
2527
2528                 CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
2529                        "closing %d, accepting %d, err %d, zcookie "LPU64", "
2530                        "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
2531                        atomic_read(&peer->ksnp_refcount),
2532                        peer->ksnp_sharecount, peer->ksnp_closing,
2533                        peer->ksnp_accepting, peer->ksnp_error,
2534                        peer->ksnp_zc_next_cookie,
2535                        !list_empty(&peer->ksnp_tx_queue),
2536                        !list_empty(&peer->ksnp_zc_req_list));
2537
2538                 list_for_each(tmp, &peer->ksnp_routes) {
2539                         route = list_entry(tmp, ksock_route_t, ksnr_list);
2540                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2541                                "del %d\n", atomic_read(&route->ksnr_refcount),
2542                                route->ksnr_scheduled, route->ksnr_connecting,
2543                                route->ksnr_connected, route->ksnr_deleted);
2544                 }
2545
2546                 list_for_each(tmp, &peer->ksnp_conns) {
2547                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2548                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2549                                atomic_read(&conn->ksnc_conn_refcount),
2550                                atomic_read(&conn->ksnc_sock_refcount),
2551                                conn->ksnc_type, conn->ksnc_closing);
2552                 }
2553         }
2554
2555         read_unlock(&ksocknal_data.ksnd_global_lock);
2556         return;
2557 }
2558
2559 void
2560 ksocknal_shutdown (lnet_ni_t *ni)
2561 {
2562         ksock_net_t      *net = ni->ni_data;
2563         int               i;
2564         lnet_process_id_t anyid = {0};
2565
2566         anyid.nid =  LNET_NID_ANY;
2567         anyid.pid =  LNET_PID_ANY;
2568
2569         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2570         LASSERT(ksocknal_data.ksnd_nnets > 0);
2571
2572         spin_lock_bh(&net->ksnn_lock);
2573         net->ksnn_shutdown = 1;                 /* prevent new peers */
2574         spin_unlock_bh(&net->ksnn_lock);
2575
2576         /* Delete all peers */
2577         ksocknal_del_peer(ni, anyid, 0);
2578
2579         /* Wait for all peer state to clean up */
2580         i = 2;
2581         spin_lock_bh(&net->ksnn_lock);
2582         while (net->ksnn_npeers != 0) {
2583                 spin_unlock_bh(&net->ksnn_lock);
2584
2585                 i++;
2586                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2587                        "waiting for %d peers to disconnect\n",
2588                        net->ksnn_npeers);
2589                 set_current_state(TASK_UNINTERRUPTIBLE);
2590                 schedule_timeout(cfs_time_seconds(1));
2591
2592                 ksocknal_debug_peerhash(ni);
2593
2594                 spin_lock_bh(&net->ksnn_lock);
2595         }
2596         spin_unlock_bh(&net->ksnn_lock);
2597
2598         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2599                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2600                 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2601         }
2602
2603         list_del(&net->ksnn_list);
2604         LIBCFS_FREE(net, sizeof(*net));
2605
2606         ksocknal_data.ksnd_nnets--;
2607         if (ksocknal_data.ksnd_nnets == 0)
2608                 ksocknal_base_shutdown();
2609 }
2610
2611 static int
2612 ksocknal_enumerate_interfaces(ksock_net_t *net)
2613 {
2614         char      **names;
2615         int         i;
2616         int         j;
2617         int         rc;
2618         int         n;
2619
2620         n = lnet_ipif_enumerate(&names);
2621         if (n <= 0) {
2622                 CERROR("Can't enumerate interfaces: %d\n", n);
2623                 return n;
2624         }
2625
2626         for (i = j = 0; i < n; i++) {
2627                 int        up;
2628                 __u32      ip;
2629                 __u32      mask;
2630
2631                 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2632                         continue;
2633
2634                 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2635                 if (rc != 0) {
2636                         CWARN("Can't get interface %s info: %d\n",
2637                               names[i], rc);
2638                         continue;
2639                 }
2640
2641                 if (!up) {
2642                         CWARN("Ignoring interface %s (down)\n",
2643                               names[i]);
2644                         continue;
2645                 }
2646
2647                 if (j == LNET_MAX_INTERFACES) {
2648                         CWARN("Ignoring interface %s (too many interfaces)\n",
2649                               names[i]);
2650                         continue;
2651                 }
2652
2653                 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2654                 net->ksnn_interfaces[j].ksni_netmask = mask;
2655                 strlcpy(net->ksnn_interfaces[j].ksni_name,
2656                         names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2657                 j++;
2658         }
2659
2660         lnet_ipif_free_enumeration(names, n);
2661
2662         if (j == 0)
2663                 CERROR("Can't find any usable interfaces\n");
2664
2665         return j;
2666 }
2667
2668 static int
2669 ksocknal_search_new_ipif(ksock_net_t *net)
2670 {
2671         int     new_ipif = 0;
2672         int     i;
2673
2674         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2675                 char            *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2676                 char            *colon = strchr(ifnam, ':');
2677                 int             found  = 0;
2678                 ksock_net_t     *tmp;
2679                 int             j;
2680
2681                 if (colon != NULL) /* ignore alias device */
2682                         *colon = 0;
2683
2684                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2685                                         ksnn_list) {
2686                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2687                                 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2688                                              ksni_name[0];
2689                                 char *colon2 = strchr(ifnam2, ':');
2690
2691                                 if (colon2 != NULL)
2692                                         *colon2 = 0;
2693
2694                                 found = strcmp(ifnam, ifnam2) == 0;
2695                                 if (colon2 != NULL)
2696                                         *colon2 = ':';
2697                         }
2698                         if (found)
2699                                 break;
2700                 }
2701
2702                 new_ipif += !found;
2703                 if (colon != NULL)
2704                         *colon = ':';
2705         }
2706
2707         return new_ipif;
2708 }
2709
2710 static int
2711 ksocknal_start_schedulers(struct ksock_sched_info *info)
2712 {
2713         int     nthrs;
2714         int     rc = 0;
2715         int     i;
2716
2717         if (info->ksi_nthreads == 0) {
2718                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2719                         nthrs = info->ksi_nthreads_max;
2720                 } else {
2721                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2722                                                info->ksi_cpt);
2723                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2724                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2725                 }
2726                 nthrs = min(nthrs, info->ksi_nthreads_max);
2727         } else {
2728                 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2729                 /* increase two threads if there is new interface */
2730                 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2731         }
2732
2733         for (i = 0; i < nthrs; i++) {
2734                 long            id;
2735                 char            name[20];
2736                 ksock_sched_t   *sched;
2737                 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2738                 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2739                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2740                          info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2741
2742                 rc = ksocknal_thread_start(ksocknal_scheduler,
2743                                            (void *)id, name);
2744                 if (rc == 0)
2745                         continue;
2746
2747                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2748                        info->ksi_cpt, info->ksi_nthreads + i, rc);
2749                 break;
2750         }
2751
2752         info->ksi_nthreads += i;
2753         return rc;
2754 }
2755
2756 static int
2757 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2758 {
2759         int     newif = ksocknal_search_new_ipif(net);
2760         int     rc;
2761         int     i;
2762
2763         LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
2764
2765         for (i = 0; i < ncpts; i++) {
2766                 struct ksock_sched_info *info;
2767                 int cpt = (cpts == NULL) ? i : cpts[i];
2768
2769                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2770                 info = ksocknal_data.ksnd_sched_info[cpt];
2771
2772                 if (!newif && info->ksi_nthreads > 0)
2773                         continue;
2774
2775                 rc = ksocknal_start_schedulers(info);
2776                 if (rc != 0)
2777                         return rc;
2778         }
2779         return 0;
2780 }
2781
2782 int
2783 ksocknal_startup (lnet_ni_t *ni)
2784 {
2785         ksock_net_t  *net;
2786         int           rc;
2787         int           i;
2788
2789         LASSERT (ni->ni_lnd == &the_ksocklnd);
2790
2791         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2792                 rc = ksocknal_base_startup();
2793                 if (rc != 0)
2794                         return rc;
2795         }
2796
2797         LIBCFS_ALLOC(net, sizeof(*net));
2798         if (net == NULL)
2799                 goto fail_0;
2800
2801         spin_lock_init(&net->ksnn_lock);
2802         net->ksnn_incarnation = ksocknal_new_incarnation();
2803         ni->ni_data = net;
2804         ni->ni_peertimeout    = *ksocknal_tunables.ksnd_peertimeout;
2805         ni->ni_maxtxcredits   = *ksocknal_tunables.ksnd_credits;
2806         ni->ni_peertxcredits  = *ksocknal_tunables.ksnd_peertxcredits;
2807         ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
2808
2809         if (ni->ni_interfaces[0] == NULL) {
2810                 rc = ksocknal_enumerate_interfaces(net);
2811                 if (rc <= 0)
2812                         goto fail_1;
2813
2814                 net->ksnn_ninterfaces = 1;
2815         } else {
2816                 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2817                         int    up;
2818
2819                         if (ni->ni_interfaces[i] == NULL)
2820                                 break;
2821
2822                         rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2823                                 &net->ksnn_interfaces[i].ksni_ipaddr,
2824                                 &net->ksnn_interfaces[i].ksni_netmask);
2825
2826                         if (rc != 0) {
2827                                 CERROR("Can't get interface %s info: %d\n",
2828                                        ni->ni_interfaces[i], rc);
2829                                 goto fail_1;
2830                         }
2831
2832                         if (!up) {
2833                                 CERROR("Interface %s is down\n",
2834                                        ni->ni_interfaces[i]);
2835                                 goto fail_1;
2836                         }
2837
2838                         strlcpy(net->ksnn_interfaces[i].ksni_name,
2839                                 ni->ni_interfaces[i],
2840                                 sizeof(net->ksnn_interfaces[i].ksni_name));
2841                 }
2842                 net->ksnn_ninterfaces = i;
2843         }
2844
2845         /* call it before add it to ksocknal_data.ksnd_nets */
2846         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2847         if (rc != 0)
2848                 goto fail_1;
2849
2850         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2851                                 net->ksnn_interfaces[0].ksni_ipaddr);
2852         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2853
2854         ksocknal_data.ksnd_nnets++;
2855
2856         return 0;
2857
2858  fail_1:
2859         LIBCFS_FREE(net, sizeof(*net));
2860  fail_0:
2861         if (ksocknal_data.ksnd_nnets == 0)
2862                 ksocknal_base_shutdown();
2863
2864         return -ENETDOWN;
2865 }
2866
2867
2868 static void __exit
2869 ksocknal_module_fini (void)
2870 {
2871         lnet_unregister_lnd(&the_ksocklnd);
2872         ksocknal_tunables_fini();
2873 }
2874
2875 static int __init
2876 ksocknal_module_init (void)
2877 {
2878         int    rc;
2879
2880         /* check ksnr_connected/connecting field large enough */
2881         CLASSERT (SOCKLND_CONN_NTYPES <= 4);
2882         CLASSERT (SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2883
2884         /* initialize the_ksocklnd */
2885         the_ksocklnd.lnd_type     = SOCKLND;
2886         the_ksocklnd.lnd_startup  = ksocknal_startup;
2887         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2888         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2889         the_ksocklnd.lnd_send     = ksocknal_send;
2890         the_ksocklnd.lnd_recv     = ksocknal_recv;
2891         the_ksocklnd.lnd_notify   = ksocknal_notify;
2892         the_ksocklnd.lnd_query    = ksocknal_query;
2893         the_ksocklnd.lnd_accept   = ksocknal_accept;
2894
2895         rc = ksocknal_tunables_init();
2896         if (rc != 0)
2897                 return rc;
2898
2899         lnet_register_lnd(&the_ksocklnd);
2900
2901         return 0;
2902 }
2903
2904 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2905 MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
2906 MODULE_VERSION("3.0.0");
2907 MODULE_LICENSE("GPL");
2908
2909 module_init(ksocknal_module_init);
2910 module_exit(ksocknal_module_fini);