Whamcloud - gitweb
0fdedbaad5858386e0da7e77048dd86a8a70df90
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/socklnd/socklnd.c
33  *
34  * Author: Zach Brown <zab@zabbo.net>
35  * Author: Peter J. Braam <braam@clusterfs.com>
36  * Author: Phil Schwan <phil@clusterfs.com>
37  * Author: Eric Barton <eric@bartonsoftware.com>
38  */
39
40 #include <linux/pci.h>
41 #include "socklnd.h"
42
43 static struct lnet_lnd the_ksocklnd;
44 ksock_nal_data_t        ksocknal_data;
45
46 static ksock_interface_t *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
48 {
49         ksock_net_t *net = ni->ni_data;
50         int i;
51         ksock_interface_t *iface;
52
53         for (i = 0; i < net->ksnn_ninterfaces; i++) {
54                 LASSERT(i < LNET_NUM_INTERFACES);
55                 iface = &net->ksnn_interfaces[i];
56
57                 if (iface->ksni_ipaddr == ip)
58                         return iface;
59         }
60
61         return NULL;
62 }
63
64 static ksock_route_t *
65 ksocknal_create_route (__u32 ipaddr, int port)
66 {
67         ksock_route_t *route;
68
69         LIBCFS_ALLOC (route, sizeof (*route));
70         if (route == NULL)
71                 return (NULL);
72
73         atomic_set (&route->ksnr_refcount, 1);
74         route->ksnr_peer = NULL;
75         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
76         route->ksnr_ipaddr = ipaddr;
77         route->ksnr_port = port;
78         route->ksnr_scheduled = 0;
79         route->ksnr_connecting = 0;
80         route->ksnr_connected = 0;
81         route->ksnr_deleted = 0;
82         route->ksnr_conn_count = 0;
83         route->ksnr_share_count = 0;
84
85         return (route);
86 }
87
88 void
89 ksocknal_destroy_route (ksock_route_t *route)
90 {
91         LASSERT (atomic_read(&route->ksnr_refcount) == 0);
92
93         if (route->ksnr_peer != NULL)
94                 ksocknal_peer_decref(route->ksnr_peer);
95
96         LIBCFS_FREE (route, sizeof (*route));
97 }
98
99 static int
100 ksocknal_create_peer(ksock_peer_ni_t **peerp, struct lnet_ni *ni,
101                      struct lnet_process_id id)
102 {
103         int             cpt = lnet_cpt_of_nid(id.nid, ni);
104         ksock_net_t     *net = ni->ni_data;
105         ksock_peer_ni_t *peer_ni;
106
107         LASSERT(id.nid != LNET_NID_ANY);
108         LASSERT(id.pid != LNET_PID_ANY);
109         LASSERT(!in_interrupt());
110
111         LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
112         if (peer_ni == NULL)
113                 return -ENOMEM;
114
115         peer_ni->ksnp_ni = ni;
116         peer_ni->ksnp_id = id;
117         atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
118         peer_ni->ksnp_closing = 0;
119         peer_ni->ksnp_accepting = 0;
120         peer_ni->ksnp_proto = NULL;
121         peer_ni->ksnp_last_alive = 0;
122         peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
123
124         INIT_LIST_HEAD(&peer_ni->ksnp_conns);
125         INIT_LIST_HEAD(&peer_ni->ksnp_routes);
126         INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
127         INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
128         spin_lock_init(&peer_ni->ksnp_lock);
129
130         spin_lock_bh(&net->ksnn_lock);
131
132         if (net->ksnn_shutdown) {
133                 spin_unlock_bh(&net->ksnn_lock);
134
135                 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
136                 CERROR("Can't create peer_ni: network shutdown\n");
137                 return -ESHUTDOWN;
138         }
139
140         net->ksnn_npeers++;
141
142         spin_unlock_bh(&net->ksnn_lock);
143
144         *peerp = peer_ni;
145         return 0;
146 }
147
148 void
149 ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni)
150 {
151         ksock_net_t    *net = peer_ni->ksnp_ni->ni_data;
152
153         CDEBUG (D_NET, "peer_ni %s %p deleted\n",
154                 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
155
156         LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
157         LASSERT(peer_ni->ksnp_accepting == 0);
158         LASSERT(list_empty(&peer_ni->ksnp_conns));
159         LASSERT(list_empty(&peer_ni->ksnp_routes));
160         LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
161         LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
162
163         LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
164
165         /* NB a peer_ni's connections and routes keep a reference on their peer_ni
166          * until they are destroyed, so we can be assured that _all_ state to
167          * do with this peer_ni has been cleaned up when its refcount drops to
168          * zero. */
169         spin_lock_bh(&net->ksnn_lock);
170         net->ksnn_npeers--;
171         spin_unlock_bh(&net->ksnn_lock);
172 }
173
174 ksock_peer_ni_t *
175 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
176 {
177         struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178         struct list_head *tmp;
179         ksock_peer_ni_t  *peer_ni;
180
181         list_for_each(tmp, peer_list) {
182
183                 peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
184
185                 LASSERT(!peer_ni->ksnp_closing);
186
187                 if (peer_ni->ksnp_ni != ni)
188                         continue;
189
190                 if (peer_ni->ksnp_id.nid != id.nid ||
191                     peer_ni->ksnp_id.pid != id.pid)
192                         continue;
193
194                 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
195                        peer_ni, libcfs_id2str(id),
196                        atomic_read(&peer_ni->ksnp_refcount));
197                 return peer_ni;
198         }
199         return NULL;
200 }
201
202 ksock_peer_ni_t *
203 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
204 {
205         ksock_peer_ni_t     *peer_ni;
206
207         read_lock(&ksocknal_data.ksnd_global_lock);
208         peer_ni = ksocknal_find_peer_locked(ni, id);
209         if (peer_ni != NULL)                    /* +1 ref for caller? */
210                 ksocknal_peer_addref(peer_ni);
211         read_unlock(&ksocknal_data.ksnd_global_lock);
212
213         return (peer_ni);
214 }
215
216 static void
217 ksocknal_unlink_peer_locked(ksock_peer_ni_t *peer_ni)
218 {
219         int i;
220         __u32 ip;
221         ksock_interface_t *iface;
222
223         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
224                 LASSERT(i < LNET_NUM_INTERFACES);
225                 ip = peer_ni->ksnp_passive_ips[i];
226
227                 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
228                 /*
229                  * All IPs in peer_ni->ksnp_passive_ips[] come from the
230                  * interface list, therefore the call must succeed.
231                  */
232                 LASSERT(iface != NULL);
233
234                 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
235                        peer_ni, iface, iface->ksni_nroutes);
236                 iface->ksni_npeers--;
237         }
238
239         LASSERT(list_empty(&peer_ni->ksnp_conns));
240         LASSERT(list_empty(&peer_ni->ksnp_routes));
241         LASSERT(!peer_ni->ksnp_closing);
242         peer_ni->ksnp_closing = 1;
243         list_del(&peer_ni->ksnp_list);
244         /* lose peerlist's ref */
245         ksocknal_peer_decref(peer_ni);
246 }
247
248 static int
249 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
250                        struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
251                        int *port, int *conn_count, int *share_count)
252 {
253         ksock_peer_ni_t   *peer_ni;
254         struct list_head  *ptmp;
255         ksock_route_t     *route;
256         struct list_head  *rtmp;
257         int                i;
258         int                j;
259         int                rc = -ENOENT;
260
261         read_lock(&ksocknal_data.ksnd_global_lock);
262
263         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
264                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
265                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
266
267                         if (peer_ni->ksnp_ni != ni)
268                                 continue;
269
270                         if (peer_ni->ksnp_n_passive_ips == 0 &&
271                             list_empty(&peer_ni->ksnp_routes)) {
272                                 if (index-- > 0)
273                                         continue;
274
275                                 *id = peer_ni->ksnp_id;
276                                 *myip = 0;
277                                 *peer_ip = 0;
278                                 *port = 0;
279                                 *conn_count = 0;
280                                 *share_count = 0;
281                                 rc = 0;
282                                 goto out;
283                         }
284
285                         for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
286                                 if (index-- > 0)
287                                         continue;
288
289                                 *id = peer_ni->ksnp_id;
290                                 *myip = peer_ni->ksnp_passive_ips[j];
291                                 *peer_ip = 0;
292                                 *port = 0;
293                                 *conn_count = 0;
294                                 *share_count = 0;
295                                 rc = 0;
296                                 goto out;
297                         }
298
299                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
300                                 if (index-- > 0)
301                                         continue;
302
303                                 route = list_entry(rtmp, ksock_route_t,
304                                                    ksnr_list);
305
306                                 *id = peer_ni->ksnp_id;
307                                 *myip = route->ksnr_myipaddr;
308                                 *peer_ip = route->ksnr_ipaddr;
309                                 *port = route->ksnr_port;
310                                 *conn_count = route->ksnr_conn_count;
311                                 *share_count = route->ksnr_share_count;
312                                 rc = 0;
313                                 goto out;
314                         }
315                 }
316         }
317 out:
318         read_unlock(&ksocknal_data.ksnd_global_lock);
319         return rc;
320 }
321
322 static void
323 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
324 {
325         ksock_peer_ni_t   *peer_ni = route->ksnr_peer;
326         int                type = conn->ksnc_type;
327         ksock_interface_t *iface;
328
329         conn->ksnc_route = route;
330         ksocknal_route_addref(route);
331
332         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
333                 if (route->ksnr_myipaddr == 0) {
334                         /* route wasn't bound locally yet (the initial route) */
335                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
336                                libcfs_id2str(peer_ni->ksnp_id),
337                                &route->ksnr_ipaddr,
338                                &conn->ksnc_myipaddr);
339                 } else {
340                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
341                                "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
342                                &route->ksnr_ipaddr,
343                                &route->ksnr_myipaddr,
344                                &conn->ksnc_myipaddr);
345
346                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
347                                                   route->ksnr_myipaddr);
348                         if (iface != NULL)
349                                 iface->ksni_nroutes--;
350                 }
351                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
352                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
353                                           route->ksnr_myipaddr);
354                 if (iface != NULL)
355                         iface->ksni_nroutes++;
356         }
357
358         route->ksnr_connected |= (1<<type);
359         route->ksnr_conn_count++;
360
361         /* Successful connection => further attempts can
362          * proceed immediately */
363         route->ksnr_retry_interval = 0;
364 }
365
366 static void
367 ksocknal_add_route_locked (ksock_peer_ni_t *peer_ni, ksock_route_t *route)
368 {
369         struct list_head *tmp;
370         ksock_conn_t     *conn;
371         ksock_route_t    *route2;
372
373         LASSERT(!peer_ni->ksnp_closing);
374         LASSERT(route->ksnr_peer == NULL);
375         LASSERT(!route->ksnr_scheduled);
376         LASSERT(!route->ksnr_connecting);
377         LASSERT(route->ksnr_connected == 0);
378
379         /* LASSERT(unique) */
380         list_for_each(tmp, &peer_ni->ksnp_routes) {
381                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
382
383                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
384                         CERROR("Duplicate route %s %pI4h\n",
385                                libcfs_id2str(peer_ni->ksnp_id),
386                                &route->ksnr_ipaddr);
387                         LBUG();
388                 }
389         }
390
391         route->ksnr_peer = peer_ni;
392         ksocknal_peer_addref(peer_ni);
393         /* peer_ni's routelist takes over my ref on 'route' */
394         list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
395
396         list_for_each(tmp, &peer_ni->ksnp_conns) {
397                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
398
399                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
400                         continue;
401
402                 ksocknal_associate_route_conn_locked(route, conn);
403                 /* keep going (typed routes) */
404         }
405 }
406
407 static void
408 ksocknal_del_route_locked (ksock_route_t *route)
409 {
410         ksock_peer_ni_t   *peer_ni = route->ksnr_peer;
411         ksock_interface_t *iface;
412         ksock_conn_t      *conn;
413         struct list_head  *ctmp;
414         struct list_head  *cnxt;
415
416         LASSERT(!route->ksnr_deleted);
417
418         /* Close associated conns */
419         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
420                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
421
422                 if (conn->ksnc_route != route)
423                         continue;
424
425                 ksocknal_close_conn_locked(conn, 0);
426         }
427
428         if (route->ksnr_myipaddr != 0) {
429                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
430                                           route->ksnr_myipaddr);
431                 if (iface != NULL)
432                         iface->ksni_nroutes--;
433         }
434
435         route->ksnr_deleted = 1;
436         list_del(&route->ksnr_list);
437         ksocknal_route_decref(route);           /* drop peer_ni's ref */
438
439         if (list_empty(&peer_ni->ksnp_routes) &&
440             list_empty(&peer_ni->ksnp_conns)) {
441                 /* I've just removed the last route to a peer_ni with no active
442                  * connections */
443                 ksocknal_unlink_peer_locked(peer_ni);
444         }
445 }
446
447 int
448 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
449                   int port)
450 {
451         struct list_head *tmp;
452         ksock_peer_ni_t  *peer_ni;
453         ksock_peer_ni_t  *peer2;
454         ksock_route_t    *route;
455         ksock_route_t    *route2;
456         int               rc;
457
458         if (id.nid == LNET_NID_ANY ||
459             id.pid == LNET_PID_ANY)
460                 return (-EINVAL);
461
462         /* Have a brand new peer_ni ready... */
463         rc = ksocknal_create_peer(&peer_ni, ni, id);
464         if (rc != 0)
465                 return rc;
466
467         route = ksocknal_create_route (ipaddr, port);
468         if (route == NULL) {
469                 ksocknal_peer_decref(peer_ni);
470                 return (-ENOMEM);
471         }
472
473         write_lock_bh(&ksocknal_data.ksnd_global_lock);
474
475         /* always called with a ref on ni, so shutdown can't have started */
476         LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
477
478         peer2 = ksocknal_find_peer_locked(ni, id);
479         if (peer2 != NULL) {
480                 ksocknal_peer_decref(peer_ni);
481                 peer_ni = peer2;
482         } else {
483                 /* peer_ni table takes my ref on peer_ni */
484                 list_add_tail(&peer_ni->ksnp_list,
485                               ksocknal_nid2peerlist(id.nid));
486         }
487
488         route2 = NULL;
489         list_for_each(tmp, &peer_ni->ksnp_routes) {
490                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
491
492                 if (route2->ksnr_ipaddr == ipaddr)
493                         break;
494
495                 route2 = NULL;
496         }
497         if (route2 == NULL) {
498                 ksocknal_add_route_locked(peer_ni, route);
499                 route->ksnr_share_count++;
500         } else {
501                 ksocknal_route_decref(route);
502                 route2->ksnr_share_count++;
503         }
504
505         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
506
507         return 0;
508 }
509
510 static void
511 ksocknal_del_peer_locked (ksock_peer_ni_t *peer_ni, __u32 ip)
512 {
513         ksock_conn_t     *conn;
514         ksock_route_t    *route;
515         struct list_head *tmp;
516         struct list_head *nxt;
517         int               nshared;
518
519         LASSERT(!peer_ni->ksnp_closing);
520
521         /* Extra ref prevents peer_ni disappearing until I'm done with it */
522         ksocknal_peer_addref(peer_ni);
523
524         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
525                 route = list_entry(tmp, ksock_route_t, ksnr_list);
526
527                 /* no match */
528                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
529                         continue;
530
531                 route->ksnr_share_count = 0;
532                 /* This deletes associated conns too */
533                 ksocknal_del_route_locked(route);
534         }
535
536         nshared = 0;
537         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
538                 route = list_entry(tmp, ksock_route_t, ksnr_list);
539                 nshared += route->ksnr_share_count;
540         }
541
542         if (nshared == 0) {
543                 /* remove everything else if there are no explicit entries
544                  * left */
545
546                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
547                         route = list_entry(tmp, ksock_route_t, ksnr_list);
548
549                         /* we should only be removing auto-entries */
550                         LASSERT(route->ksnr_share_count == 0);
551                         ksocknal_del_route_locked(route);
552                 }
553
554                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
555                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
556
557                         ksocknal_close_conn_locked(conn, 0);
558                 }
559         }
560
561         ksocknal_peer_decref(peer_ni);
562                 /* NB peer_ni unlinks itself when last conn/route is removed */
563 }
564
565 static int
566 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
567 {
568         struct list_head  zombies = LIST_HEAD_INIT(zombies);
569         struct list_head *ptmp;
570         struct list_head *pnxt;
571         ksock_peer_ni_t     *peer_ni;
572         int               lo;
573         int               hi;
574         int               i;
575         int               rc = -ENOENT;
576
577         write_lock_bh(&ksocknal_data.ksnd_global_lock);
578
579         if (id.nid != LNET_NID_ANY) {
580                 hi = (int)(ksocknal_nid2peerlist(id.nid) -
581                            ksocknal_data.ksnd_peers);
582                 lo = hi;
583         } else {
584                 lo = 0;
585                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
586         }
587
588         for (i = lo; i <= hi; i++) {
589                 list_for_each_safe(ptmp, pnxt,
590                                    &ksocknal_data.ksnd_peers[i]) {
591                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
592
593                         if (peer_ni->ksnp_ni != ni)
594                                 continue;
595
596                         if (!((id.nid == LNET_NID_ANY ||
597                                peer_ni->ksnp_id.nid == id.nid) &&
598                               (id.pid == LNET_PID_ANY ||
599                                peer_ni->ksnp_id.pid == id.pid)))
600                                 continue;
601
602                         ksocknal_peer_addref(peer_ni);  /* a ref for me... */
603
604                         ksocknal_del_peer_locked(peer_ni, ip);
605
606                         if (peer_ni->ksnp_closing &&
607                             !list_empty(&peer_ni->ksnp_tx_queue)) {
608                                 LASSERT(list_empty(&peer_ni->ksnp_conns));
609                                 LASSERT(list_empty(&peer_ni->ksnp_routes));
610
611                                 list_splice_init(&peer_ni->ksnp_tx_queue,
612                                                  &zombies);
613                         }
614
615                         ksocknal_peer_decref(peer_ni);  /* ...till here */
616
617                         rc = 0;                         /* matched! */
618                 }
619         }
620
621         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
622
623         ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
624
625         return rc;
626 }
627
628 static ksock_conn_t *
629 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
630 {
631         ksock_peer_ni_t  *peer_ni;
632         struct list_head *ptmp;
633         ksock_conn_t     *conn;
634         struct list_head *ctmp;
635         int               i;
636
637         read_lock(&ksocknal_data.ksnd_global_lock);
638
639         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
640                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
641                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
642
643                         LASSERT(!peer_ni->ksnp_closing);
644
645                         if (peer_ni->ksnp_ni != ni)
646                                 continue;
647
648                         list_for_each(ctmp, &peer_ni->ksnp_conns) {
649                                 if (index-- > 0)
650                                         continue;
651
652                                 conn = list_entry(ctmp, ksock_conn_t,
653                                                   ksnc_list);
654                                 ksocknal_conn_addref(conn);
655                                 read_unlock(&ksocknal_data. \
656                                             ksnd_global_lock);
657                                 return conn;
658                         }
659                 }
660         }
661
662         read_unlock(&ksocknal_data.ksnd_global_lock);
663         return NULL;
664 }
665
666 static ksock_sched_t *
667 ksocknal_choose_scheduler_locked(unsigned int cpt)
668 {
669         struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
670         ksock_sched_t           *sched;
671         int                     i;
672
673         if (info->ksi_nthreads == 0) {
674                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
675                         if (info->ksi_nthreads > 0) {
676                                 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
677                                        cpt, info->ksi_cpt);
678                                 goto select_sched;
679                         }
680                 }
681                 return NULL;
682         }
683
684 select_sched:
685         sched = &info->ksi_scheds[0];
686         /*
687          * NB: it's safe so far, but info->ksi_nthreads could be changed
688          * at runtime when we have dynamic LNet configuration, then we
689          * need to take care of this.
690          */
691         for (i = 1; i < info->ksi_nthreads; i++) {
692                 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
693                         sched = &info->ksi_scheds[i];
694         }
695
696         return sched;
697 }
698
699 static int
700 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
701 {
702         ksock_net_t *net = ni->ni_data;
703         int i;
704         int nip;
705
706         read_lock(&ksocknal_data.ksnd_global_lock);
707
708         nip = net->ksnn_ninterfaces;
709         LASSERT(nip <= LNET_NUM_INTERFACES);
710
711         /*
712          * Only offer interfaces for additional connections if I have
713          * more than one.
714          */
715         if (nip < 2) {
716                 read_unlock(&ksocknal_data.ksnd_global_lock);
717                 return 0;
718         }
719
720         for (i = 0; i < nip; i++) {
721                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
722                 LASSERT(ipaddrs[i] != 0);
723         }
724
725         read_unlock(&ksocknal_data.ksnd_global_lock);
726         return nip;
727 }
728
729 static int
730 ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
731 {
732         int   best_netmatch = 0;
733         int   best_xor      = 0;
734         int   best          = -1;
735         int   this_xor;
736         int   this_netmatch;
737         int   i;
738
739         for (i = 0; i < nips; i++) {
740                 if (ips[i] == 0)
741                         continue;
742
743                 this_xor = (ips[i] ^ iface->ksni_ipaddr);
744                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
745
746                 if (!(best < 0 ||
747                       best_netmatch < this_netmatch ||
748                       (best_netmatch == this_netmatch &&
749                        best_xor > this_xor)))
750                         continue;
751
752                 best = i;
753                 best_netmatch = this_netmatch;
754                 best_xor = this_xor;
755         }
756
757         LASSERT (best >= 0);
758         return (best);
759 }
760
761 static int
762 ksocknal_select_ips(ksock_peer_ni_t *peer_ni, __u32 *peerips, int n_peerips)
763 {
764         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
765         ksock_net_t        *net = peer_ni->ksnp_ni->ni_data;
766         ksock_interface_t  *iface;
767         ksock_interface_t  *best_iface;
768         int                 n_ips;
769         int                 i;
770         int                 j;
771         int                 k;
772         __u32               ip;
773         __u32               xor;
774         int                 this_netmatch;
775         int                 best_netmatch;
776         int                 best_npeers;
777
778         /* CAVEAT EMPTOR: We do all our interface matching with an
779          * exclusive hold of global lock at IRQ priority.  We're only
780          * expecting to be dealing with small numbers of interfaces, so the
781          * O(n**3)-ness shouldn't matter */
782
783         /* Also note that I'm not going to return more than n_peerips
784          * interfaces, even if I have more myself */
785
786         write_lock_bh(global_lock);
787
788         LASSERT(n_peerips <= LNET_NUM_INTERFACES);
789         LASSERT(net->ksnn_ninterfaces <= LNET_NUM_INTERFACES);
790
791         /* Only match interfaces for additional connections
792          * if I have > 1 interface */
793         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
794                 MIN(n_peerips, net->ksnn_ninterfaces);
795
796         for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
797                 /*              ^ yes really... */
798
799                 /* If we have any new interfaces, first tick off all the
800                  * peer_ni IPs that match old interfaces, then choose new
801                  * interfaces to match the remaining peer_ni IPS.
802                  * We don't forget interfaces we've stopped using; we might
803                  * start using them again... */
804
805                 if (i < peer_ni->ksnp_n_passive_ips) {
806                         /* Old interface. */
807                         ip = peer_ni->ksnp_passive_ips[i];
808                         best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
809
810                         /* peer_ni passive ips are kept up to date */
811                         LASSERT(best_iface != NULL);
812                 } else {
813                         /* choose a new interface */
814                         LASSERT (i == peer_ni->ksnp_n_passive_ips);
815
816                         best_iface = NULL;
817                         best_netmatch = 0;
818                         best_npeers = 0;
819
820                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
821                                 iface = &net->ksnn_interfaces[j];
822                                 ip = iface->ksni_ipaddr;
823
824                                 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
825                                         if (peer_ni->ksnp_passive_ips[k] == ip)
826                                                 break;
827
828                                 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
829                                         continue;
830
831                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
832                                 xor = (ip ^ peerips[k]);
833                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
834
835                                 if (!(best_iface == NULL ||
836                                       best_netmatch < this_netmatch ||
837                                       (best_netmatch == this_netmatch &&
838                                        best_npeers > iface->ksni_npeers)))
839                                         continue;
840
841                                 best_iface = iface;
842                                 best_netmatch = this_netmatch;
843                                 best_npeers = iface->ksni_npeers;
844                         }
845
846                         LASSERT(best_iface != NULL);
847
848                         best_iface->ksni_npeers++;
849                         ip = best_iface->ksni_ipaddr;
850                         peer_ni->ksnp_passive_ips[i] = ip;
851                         peer_ni->ksnp_n_passive_ips = i+1;
852                 }
853
854                 /* mark the best matching peer_ni IP used */
855                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
856                 peerips[j] = 0;
857         }
858
859         /* Overwrite input peer_ni IP addresses */
860         memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
861
862         write_unlock_bh(global_lock);
863
864         return (n_ips);
865 }
866
867 static void
868 ksocknal_create_routes(ksock_peer_ni_t *peer_ni, int port,
869                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
870 {
871         ksock_route_t           *newroute = NULL;
872         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
873         struct lnet_ni *ni = peer_ni->ksnp_ni;
874         ksock_net_t             *net = ni->ni_data;
875         struct list_head        *rtmp;
876         ksock_route_t           *route;
877         ksock_interface_t       *iface;
878         ksock_interface_t       *best_iface;
879         int                     best_netmatch;
880         int                     this_netmatch;
881         int                     best_nroutes;
882         int                     i;
883         int                     j;
884
885         /* CAVEAT EMPTOR: We do all our interface matching with an
886          * exclusive hold of global lock at IRQ priority.  We're only
887          * expecting to be dealing with small numbers of interfaces, so the
888          * O(n**3)-ness here shouldn't matter */
889
890         write_lock_bh(global_lock);
891
892         if (net->ksnn_ninterfaces < 2) {
893                 /* Only create additional connections
894                  * if I have > 1 interface */
895                 write_unlock_bh(global_lock);
896                 return;
897         }
898
899         LASSERT(npeer_ipaddrs <= LNET_NUM_INTERFACES);
900
901         for (i = 0; i < npeer_ipaddrs; i++) {
902                 if (newroute != NULL) {
903                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
904                 } else {
905                         write_unlock_bh(global_lock);
906
907                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
908                         if (newroute == NULL)
909                                 return;
910
911                         write_lock_bh(global_lock);
912                 }
913
914                 if (peer_ni->ksnp_closing) {
915                         /* peer_ni got closed under me */
916                         break;
917                 }
918
919                 /* Already got a route? */
920                 route = NULL;
921                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
922                         route = list_entry(rtmp, ksock_route_t, ksnr_list);
923
924                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
925                                 break;
926
927                         route = NULL;
928                 }
929                 if (route != NULL)
930                         continue;
931
932                 best_iface = NULL;
933                 best_nroutes = 0;
934                 best_netmatch = 0;
935
936                 LASSERT(net->ksnn_ninterfaces <= LNET_NUM_INTERFACES);
937
938                 /* Select interface to connect from */
939                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
940                         iface = &net->ksnn_interfaces[j];
941
942                         /* Using this interface already? */
943                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
944                                 route = list_entry(rtmp, ksock_route_t,
945                                                    ksnr_list);
946
947                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
948                                         break;
949
950                                 route = NULL;
951                         }
952                         if (route != NULL)
953                                 continue;
954
955                         this_netmatch = (((iface->ksni_ipaddr ^
956                                            newroute->ksnr_ipaddr) &
957                                            iface->ksni_netmask) == 0) ? 1 : 0;
958
959                         if (!(best_iface == NULL ||
960                               best_netmatch < this_netmatch ||
961                               (best_netmatch == this_netmatch &&
962                                best_nroutes > iface->ksni_nroutes)))
963                                 continue;
964
965                         best_iface = iface;
966                         best_netmatch = this_netmatch;
967                         best_nroutes = iface->ksni_nroutes;
968                 }
969
970                 if (best_iface == NULL)
971                         continue;
972
973                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
974                 best_iface->ksni_nroutes++;
975
976                 ksocknal_add_route_locked(peer_ni, newroute);
977                 newroute = NULL;
978         }
979
980         write_unlock_bh(global_lock);
981         if (newroute != NULL)
982                 ksocknal_route_decref(newroute);
983 }
984
985 int
986 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
987 {
988         ksock_connreq_t *cr;
989         int              rc;
990         __u32            peer_ip;
991         int              peer_port;
992
993         rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
994         LASSERT(rc == 0);               /* we succeeded before */
995
996         LIBCFS_ALLOC(cr, sizeof(*cr));
997         if (cr == NULL) {
998                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
999                                    "%pI4h: memory exhausted\n", &peer_ip);
1000                 return -ENOMEM;
1001         }
1002
1003         lnet_ni_addref(ni);
1004         cr->ksncr_ni   = ni;
1005         cr->ksncr_sock = sock;
1006
1007         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
1008
1009         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
1010         wake_up(&ksocknal_data.ksnd_connd_waitq);
1011
1012         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1013         return 0;
1014 }
1015
1016 static int
1017 ksocknal_connecting (ksock_peer_ni_t *peer_ni, __u32 ipaddr)
1018 {
1019         ksock_route_t *route;
1020
1021         list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1022                 if (route->ksnr_ipaddr == ipaddr)
1023                         return route->ksnr_connecting;
1024         }
1025         return 0;
1026 }
1027
1028 int
1029 ksocknal_create_conn(struct lnet_ni *ni, ksock_route_t *route,
1030                      struct socket *sock, int type)
1031 {
1032         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
1033         struct list_head        zombies = LIST_HEAD_INIT(zombies);
1034         struct lnet_process_id peerid;
1035         struct list_head        *tmp;
1036         __u64              incarnation;
1037         ksock_conn_t      *conn;
1038         ksock_conn_t      *conn2;
1039         ksock_peer_ni_t      *peer_ni = NULL;
1040         ksock_peer_ni_t      *peer2;
1041         ksock_sched_t     *sched;
1042         struct ksock_hello_msg *hello;
1043         int                cpt;
1044         ksock_tx_t        *tx;
1045         ksock_tx_t        *txtmp;
1046         int                rc;
1047         int                rc2;
1048         int                active;
1049         char              *warn = NULL;
1050
1051         active = (route != NULL);
1052
1053         LASSERT (active == (type != SOCKLND_CONN_NONE));
1054
1055         LIBCFS_ALLOC(conn, sizeof(*conn));
1056         if (conn == NULL) {
1057                 rc = -ENOMEM;
1058                 goto failed_0;
1059         }
1060
1061         conn->ksnc_peer = NULL;
1062         conn->ksnc_route = NULL;
1063         conn->ksnc_sock = sock;
1064         /* 2 ref, 1 for conn, another extra ref prevents socket
1065          * being closed before establishment of connection */
1066         atomic_set (&conn->ksnc_sock_refcount, 2);
1067         conn->ksnc_type = type;
1068         ksocknal_lib_save_callback(sock, conn);
1069         atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1070
1071         conn->ksnc_rx_ready = 0;
1072         conn->ksnc_rx_scheduled = 0;
1073
1074         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1075         conn->ksnc_tx_ready = 0;
1076         conn->ksnc_tx_scheduled = 0;
1077         conn->ksnc_tx_carrier = NULL;
1078         atomic_set (&conn->ksnc_tx_nob, 0);
1079
1080         LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1081                                      kshm_ips[LNET_NUM_INTERFACES]));
1082         if (hello == NULL) {
1083                 rc = -ENOMEM;
1084                 goto failed_1;
1085         }
1086
1087         /* stash conn's local and remote addrs */
1088         rc = ksocknal_lib_get_conn_addrs (conn);
1089         if (rc != 0)
1090                 goto failed_1;
1091
1092         /* Find out/confirm peer_ni's NID and connection type and get the
1093          * vector of interfaces she's willing to let me connect to.
1094          * Passive connections use the listener timeout since the peer_ni sends
1095          * eagerly */
1096
1097         if (active) {
1098                 peer_ni = route->ksnr_peer;
1099                 LASSERT(ni == peer_ni->ksnp_ni);
1100
1101                 /* Active connection sends HELLO eagerly */
1102                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1103                 peerid = peer_ni->ksnp_id;
1104
1105                 write_lock_bh(global_lock);
1106                 conn->ksnc_proto = peer_ni->ksnp_proto;
1107                 write_unlock_bh(global_lock);
1108
1109                 if (conn->ksnc_proto == NULL) {
1110                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1111 #if SOCKNAL_VERSION_DEBUG
1112                          if (*ksocknal_tunables.ksnd_protocol == 2)
1113                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1114                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1115                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1116 #endif
1117                 }
1118
1119                 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1120                 if (rc != 0)
1121                         goto failed_1;
1122         } else {
1123                 peerid.nid = LNET_NID_ANY;
1124                 peerid.pid = LNET_PID_ANY;
1125
1126                 /* Passive, get protocol from peer_ni */
1127                 conn->ksnc_proto = NULL;
1128         }
1129
1130         rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1131         if (rc < 0)
1132                 goto failed_1;
1133
1134         LASSERT (rc == 0 || active);
1135         LASSERT (conn->ksnc_proto != NULL);
1136         LASSERT (peerid.nid != LNET_NID_ANY);
1137
1138         cpt = lnet_cpt_of_nid(peerid.nid, ni);
1139
1140         if (active) {
1141                 ksocknal_peer_addref(peer_ni);
1142                 write_lock_bh(global_lock);
1143         } else {
1144                 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1145                 if (rc != 0)
1146                         goto failed_1;
1147
1148                 write_lock_bh(global_lock);
1149
1150                 /* called with a ref on ni, so shutdown can't have started */
1151                 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1152
1153                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1154                 if (peer2 == NULL) {
1155                         /* NB this puts an "empty" peer_ni in the peer_ni
1156                          * table (which takes my ref) */
1157                         list_add_tail(&peer_ni->ksnp_list,
1158                                       ksocknal_nid2peerlist(peerid.nid));
1159                 } else {
1160                         ksocknal_peer_decref(peer_ni);
1161                         peer_ni = peer2;
1162                 }
1163
1164                 /* +1 ref for me */
1165                 ksocknal_peer_addref(peer_ni);
1166                 peer_ni->ksnp_accepting++;
1167
1168                 /* Am I already connecting to this guy?  Resolve in
1169                  * favour of higher NID... */
1170                 if (peerid.nid < ni->ni_nid &&
1171                     ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1172                         rc = EALREADY;
1173                         warn = "connection race resolution";
1174                         goto failed_2;
1175                 }
1176         }
1177
1178         if (peer_ni->ksnp_closing ||
1179             (active && route->ksnr_deleted)) {
1180                 /* peer_ni/route got closed under me */
1181                 rc = -ESTALE;
1182                 warn = "peer_ni/route removed";
1183                 goto failed_2;
1184         }
1185
1186         if (peer_ni->ksnp_proto == NULL) {
1187                 /* Never connected before.
1188                  * NB recv_hello may have returned EPROTO to signal my peer_ni
1189                  * wants a different protocol than the one I asked for.
1190                  */
1191                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1192
1193                 peer_ni->ksnp_proto = conn->ksnc_proto;
1194                 peer_ni->ksnp_incarnation = incarnation;
1195         }
1196
1197         if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1198             peer_ni->ksnp_incarnation != incarnation) {
1199                 /* peer_ni rebooted or I've got the wrong protocol version */
1200                 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1201
1202                 peer_ni->ksnp_proto = NULL;
1203                 rc = ESTALE;
1204                 warn = peer_ni->ksnp_incarnation != incarnation ?
1205                        "peer_ni rebooted" :
1206                        "wrong proto version";
1207                 goto failed_2;
1208         }
1209
1210         switch (rc) {
1211         default:
1212                 LBUG();
1213         case 0:
1214                 break;
1215         case EALREADY:
1216                 warn = "lost conn race";
1217                 goto failed_2;
1218         case EPROTO:
1219                 warn = "retry with different protocol version";
1220                 goto failed_2;
1221         }
1222
1223         /* Refuse to duplicate an existing connection, unless this is a
1224          * loopback connection */
1225         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1226                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1227                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1228
1229                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1230                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1231                             conn2->ksnc_type != conn->ksnc_type)
1232                                 continue;
1233
1234                         /* Reply on a passive connection attempt so the peer_ni
1235                          * realises we're connected. */
1236                         LASSERT (rc == 0);
1237                         if (!active)
1238                                 rc = EALREADY;
1239
1240                         warn = "duplicate";
1241                         goto failed_2;
1242                 }
1243         }
1244
1245         /* If the connection created by this route didn't bind to the IP
1246          * address the route connected to, the connection/route matching
1247          * code below probably isn't going to work. */
1248         if (active &&
1249             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1250                 CERROR("Route %s %pI4h connected to %pI4h\n",
1251                        libcfs_id2str(peer_ni->ksnp_id),
1252                        &route->ksnr_ipaddr,
1253                        &conn->ksnc_ipaddr);
1254         }
1255
1256         /* Search for a route corresponding to the new connection and
1257          * create an association.  This allows incoming connections created
1258          * by routes in my peer_ni to match my own route entries so I don't
1259          * continually create duplicate routes. */
1260         list_for_each(tmp, &peer_ni->ksnp_routes) {
1261                 route = list_entry(tmp, ksock_route_t, ksnr_list);
1262
1263                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1264                         continue;
1265
1266                 ksocknal_associate_route_conn_locked(route, conn);
1267                 break;
1268         }
1269
1270         conn->ksnc_peer = peer_ni;                 /* conn takes my ref on peer_ni */
1271         peer_ni->ksnp_last_alive = ktime_get_real_seconds();
1272         peer_ni->ksnp_send_keepalive = 0;
1273         peer_ni->ksnp_error = 0;
1274
1275         sched = ksocknal_choose_scheduler_locked(cpt);
1276         if (!sched) {
1277                 CERROR("no schedulers available. node is unhealthy\n");
1278                 goto failed_2;
1279         }
1280         /*
1281          * The cpt might have changed if we ended up selecting a non cpt
1282          * native scheduler. So use the scheduler's cpt instead.
1283          */
1284         cpt = sched->kss_info->ksi_cpt;
1285         sched->kss_nconns++;
1286         conn->ksnc_scheduler = sched;
1287
1288         conn->ksnc_tx_last_post = ktime_get_real_seconds();
1289         /* Set the deadline for the outgoing HELLO to drain */
1290         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1291         conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1292         smp_mb();   /* order with adding to peer_ni's conn list */
1293
1294         list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1295         ksocknal_conn_addref(conn);
1296
1297         ksocknal_new_packet(conn, 0);
1298
1299         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1300
1301         /* Take packets blocking for this connection. */
1302         list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1303                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1304                     SOCKNAL_MATCH_NO)
1305                         continue;
1306
1307                 list_del(&tx->tx_list);
1308                 ksocknal_queue_tx_locked(tx, conn);
1309         }
1310
1311         write_unlock_bh(global_lock);
1312
1313         /* We've now got a new connection.  Any errors from here on are just
1314          * like "normal" comms errors and we close the connection normally.
1315          * NB (a) we still have to send the reply HELLO for passive
1316          *        connections,
1317          *    (b) normal I/O on the conn is blocked until I setup and call the
1318          *        socket callbacks.
1319          */
1320
1321         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1322                " incarnation:%lld sched[%d:%d]\n",
1323                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1324                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1325                conn->ksnc_port, incarnation, cpt,
1326                (int)(sched - &sched->kss_info->ksi_scheds[0]));
1327
1328         if (active) {
1329                 /* additional routes after interface exchange? */
1330                 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1331                                        hello->kshm_ips, hello->kshm_nips);
1332         } else {
1333                 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1334                                                        hello->kshm_nips);
1335                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1336         }
1337
1338         LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1339                                     kshm_ips[LNET_NUM_INTERFACES]));
1340
1341         /* setup the socket AFTER I've received hello (it disables
1342          * SO_LINGER).  I might call back to the acceptor who may want
1343          * to send a protocol version response and then close the
1344          * socket; this ensures the socket only tears down after the
1345          * response has been sent. */
1346         if (rc == 0)
1347                 rc = ksocknal_lib_setup_sock(sock);
1348
1349         write_lock_bh(global_lock);
1350
1351         /* NB my callbacks block while I hold ksnd_global_lock */
1352         ksocknal_lib_set_callback(sock, conn);
1353
1354         if (!active)
1355                 peer_ni->ksnp_accepting--;
1356
1357         write_unlock_bh(global_lock);
1358
1359         if (rc != 0) {
1360                 write_lock_bh(global_lock);
1361                 if (!conn->ksnc_closing) {
1362                         /* could be closed by another thread */
1363                         ksocknal_close_conn_locked(conn, rc);
1364                 }
1365                 write_unlock_bh(global_lock);
1366         } else if (ksocknal_connsock_addref(conn) == 0) {
1367                 /* Allow I/O to proceed. */
1368                 ksocknal_read_callback(conn);
1369                 ksocknal_write_callback(conn);
1370                 ksocknal_connsock_decref(conn);
1371         }
1372
1373         ksocknal_connsock_decref(conn);
1374         ksocknal_conn_decref(conn);
1375         return rc;
1376
1377 failed_2:
1378         if (!peer_ni->ksnp_closing &&
1379             list_empty(&peer_ni->ksnp_conns) &&
1380             list_empty(&peer_ni->ksnp_routes)) {
1381                 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1382                 list_del_init(&peer_ni->ksnp_tx_queue);
1383                 ksocknal_unlink_peer_locked(peer_ni);
1384         }
1385
1386         write_unlock_bh(global_lock);
1387
1388         if (warn != NULL) {
1389                 if (rc < 0)
1390                         CERROR("Not creating conn %s type %d: %s\n",
1391                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1392                 else
1393                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1394                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1395         }
1396
1397         if (!active) {
1398                 if (rc > 0) {
1399                         /* Request retry by replying with CONN_NONE
1400                          * ksnc_proto has been set already */
1401                         conn->ksnc_type = SOCKLND_CONN_NONE;
1402                         hello->kshm_nips = 0;
1403                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1404                 }
1405
1406                 write_lock_bh(global_lock);
1407                 peer_ni->ksnp_accepting--;
1408                 write_unlock_bh(global_lock);
1409         }
1410
1411         /*
1412          * If we get here without an error code, just use -EALREADY.
1413          * Depending on how we got here, the error may be positive
1414          * or negative. Normalize the value for ksocknal_txlist_done().
1415          */
1416         rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1417         ksocknal_txlist_done(ni, &zombies, rc2);
1418         ksocknal_peer_decref(peer_ni);
1419
1420 failed_1:
1421         if (hello != NULL)
1422                 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1423                                             kshm_ips[LNET_NUM_INTERFACES]));
1424
1425         LIBCFS_FREE(conn, sizeof(*conn));
1426
1427 failed_0:
1428         sock_release(sock);
1429         return rc;
1430 }
1431
1432 void
1433 ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1434 {
1435         /* This just does the immmediate housekeeping, and queues the
1436          * connection for the reaper to terminate.
1437          * Caller holds ksnd_global_lock exclusively in irq context */
1438         ksock_peer_ni_t      *peer_ni = conn->ksnc_peer;
1439         ksock_route_t     *route;
1440         ksock_conn_t      *conn2;
1441         struct list_head  *tmp;
1442
1443         LASSERT(peer_ni->ksnp_error == 0);
1444         LASSERT(!conn->ksnc_closing);
1445         conn->ksnc_closing = 1;
1446
1447         /* ksnd_deathrow_conns takes over peer_ni's ref */
1448         list_del(&conn->ksnc_list);
1449
1450         route = conn->ksnc_route;
1451         if (route != NULL) {
1452                 /* dissociate conn from route... */
1453                 LASSERT(!route->ksnr_deleted);
1454                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1455
1456                 conn2 = NULL;
1457                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1458                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1459
1460                         if (conn2->ksnc_route == route &&
1461                             conn2->ksnc_type == conn->ksnc_type)
1462                                 break;
1463
1464                         conn2 = NULL;
1465                 }
1466                 if (conn2 == NULL)
1467                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1468
1469                 conn->ksnc_route = NULL;
1470
1471                 ksocknal_route_decref(route);   /* drop conn's ref on route */
1472         }
1473
1474         if (list_empty(&peer_ni->ksnp_conns)) {
1475                 /* No more connections to this peer_ni */
1476
1477                 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1478                                 ksock_tx_t *tx;
1479
1480                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1481
1482                         /* throw them to the last connection...,
1483                          * these TXs will be send to /dev/null by scheduler */
1484                         list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1485                                             tx_list)
1486                                 ksocknal_tx_prep(conn, tx);
1487
1488                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1489                         list_splice_init(&peer_ni->ksnp_tx_queue,
1490                                          &conn->ksnc_tx_queue);
1491                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1492                 }
1493
1494                 /* renegotiate protocol version */
1495                 peer_ni->ksnp_proto = NULL;
1496                 /* stash last conn close reason */
1497                 peer_ni->ksnp_error = error;
1498
1499                 if (list_empty(&peer_ni->ksnp_routes)) {
1500                         /* I've just closed last conn belonging to a
1501                          * peer_ni with no routes to it */
1502                         ksocknal_unlink_peer_locked(peer_ni);
1503                 }
1504         }
1505
1506         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1507
1508         list_add_tail(&conn->ksnc_list,
1509                       &ksocknal_data.ksnd_deathrow_conns);
1510         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1511
1512         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1513 }
1514
1515 void
1516 ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
1517 {
1518         int        notify = 0;
1519         cfs_time_t last_alive = 0;
1520
1521         /* There has been a connection failure or comms error; but I'll only
1522          * tell LNET I think the peer_ni is dead if it's to another kernel and
1523          * there are no connections or connection attempts in existence. */
1524
1525         read_lock(&ksocknal_data.ksnd_global_lock);
1526
1527         if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1528              list_empty(&peer_ni->ksnp_conns) &&
1529              peer_ni->ksnp_accepting == 0 &&
1530              ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1531                 notify = 1;
1532                 last_alive = peer_ni->ksnp_last_alive;
1533         }
1534
1535         read_unlock(&ksocknal_data.ksnd_global_lock);
1536
1537         if (notify)
1538                 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
1539                             last_alive);
1540 }
1541
1542 void
1543 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1544 {
1545         ksock_peer_ni_t  *peer_ni = conn->ksnc_peer;
1546         ksock_tx_t       *tx;
1547         ksock_tx_t       *tmp;
1548         struct list_head  zlist = LIST_HEAD_INIT(zlist);
1549
1550         /* NB safe to finalize TXs because closing of socket will
1551          * abort all buffered data */
1552         LASSERT(conn->ksnc_sock == NULL);
1553
1554         spin_lock(&peer_ni->ksnp_lock);
1555
1556         list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1557                 if (tx->tx_conn != conn)
1558                         continue;
1559
1560                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1561
1562                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1563                 tx->tx_zc_aborted = 1;  /* mark it as not-acked */
1564                 list_del(&tx->tx_zc_list);
1565                 list_add(&tx->tx_zc_list, &zlist);
1566         }
1567
1568         spin_unlock(&peer_ni->ksnp_lock);
1569
1570         while (!list_empty(&zlist)) {
1571                 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1572
1573                 list_del(&tx->tx_zc_list);
1574                 ksocknal_tx_decref(tx);
1575         }
1576 }
1577
1578 void
1579 ksocknal_terminate_conn(ksock_conn_t *conn)
1580 {
1581         /* This gets called by the reaper (guaranteed thread context) to
1582          * disengage the socket from its callbacks and close it.
1583          * ksnc_refcount will eventually hit zero, and then the reaper will
1584          * destroy it. */
1585         ksock_peer_ni_t     *peer_ni = conn->ksnc_peer;
1586         ksock_sched_t    *sched = conn->ksnc_scheduler;
1587         int               failed = 0;
1588
1589         LASSERT(conn->ksnc_closing);
1590
1591         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1592         spin_lock_bh(&sched->kss_lock);
1593
1594         /* a closing conn is always ready to tx */
1595         conn->ksnc_tx_ready = 1;
1596
1597         if (!conn->ksnc_tx_scheduled &&
1598             !list_empty(&conn->ksnc_tx_queue)) {
1599                 list_add_tail(&conn->ksnc_tx_list,
1600                                &sched->kss_tx_conns);
1601                 conn->ksnc_tx_scheduled = 1;
1602                 /* extra ref for scheduler */
1603                 ksocknal_conn_addref(conn);
1604
1605                 wake_up (&sched->kss_waitq);
1606         }
1607
1608         spin_unlock_bh(&sched->kss_lock);
1609
1610         /* serialise with callbacks */
1611         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1612
1613         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1614
1615         /* OK, so this conn may not be completely disengaged from its
1616          * scheduler yet, but it _has_ committed to terminate... */
1617         conn->ksnc_scheduler->kss_nconns--;
1618
1619         if (peer_ni->ksnp_error != 0) {
1620                 /* peer_ni's last conn closed in error */
1621                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1622                 failed = 1;
1623                 peer_ni->ksnp_error = 0;     /* avoid multiple notifications */
1624         }
1625
1626         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1627
1628         if (failed)
1629                 ksocknal_peer_failed(peer_ni);
1630
1631         /* The socket is closed on the final put; either here, or in
1632          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1633          * when the connection was established, this will close the socket
1634          * immediately, aborting anything buffered in it. Any hung
1635          * zero-copy transmits will therefore complete in finite time. */
1636         ksocknal_connsock_decref(conn);
1637 }
1638
1639 void
1640 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1641 {
1642         /* Queue the conn for the reaper to destroy */
1643
1644         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1645         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1646
1647         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1648         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1649
1650         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1651 }
1652
1653 void
1654 ksocknal_destroy_conn (ksock_conn_t *conn)
1655 {
1656         cfs_time_t      last_rcv;
1657
1658         /* Final coup-de-grace of the reaper */
1659         CDEBUG (D_NET, "connection %p\n", conn);
1660
1661         LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1662         LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1663         LASSERT (conn->ksnc_sock == NULL);
1664         LASSERT (conn->ksnc_route == NULL);
1665         LASSERT (!conn->ksnc_tx_scheduled);
1666         LASSERT (!conn->ksnc_rx_scheduled);
1667         LASSERT(list_empty(&conn->ksnc_tx_queue));
1668
1669         /* complete current receive if any */
1670         switch (conn->ksnc_rx_state) {
1671         case SOCKNAL_RX_LNET_PAYLOAD:
1672                 last_rcv = conn->ksnc_rx_deadline -
1673                            cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1674                 CERROR("Completing partial receive from %s[%d], "
1675                        "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1676                        "last alive is %ld secs ago\n",
1677                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1678                        &conn->ksnc_ipaddr, conn->ksnc_port,
1679                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1680                        cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(),
1681                                         last_rcv)));
1682                 lnet_finalize(conn->ksnc_cookie, -EIO);
1683                 break;
1684         case SOCKNAL_RX_LNET_HEADER:
1685                 if (conn->ksnc_rx_started)
1686                         CERROR("Incomplete receive of lnet header from %s, "
1687                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1688                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1689                                &conn->ksnc_ipaddr, conn->ksnc_port,
1690                                conn->ksnc_proto->pro_version);
1691                 break;
1692         case SOCKNAL_RX_KSM_HEADER:
1693                 if (conn->ksnc_rx_started)
1694                         CERROR("Incomplete receive of ksock message from %s, "
1695                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1696                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1697                                &conn->ksnc_ipaddr, conn->ksnc_port,
1698                                conn->ksnc_proto->pro_version);
1699                 break;
1700         case SOCKNAL_RX_SLOP:
1701                 if (conn->ksnc_rx_started)
1702                         CERROR("Incomplete receive of slops from %s, "
1703                                "ip %pI4h:%d, with error\n",
1704                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1705                                &conn->ksnc_ipaddr, conn->ksnc_port);
1706                break;
1707         default:
1708                 LBUG ();
1709                 break;
1710         }
1711
1712         ksocknal_peer_decref(conn->ksnc_peer);
1713
1714         LIBCFS_FREE (conn, sizeof (*conn));
1715 }
1716
1717 int
1718 ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why)
1719 {
1720         ksock_conn_t       *conn;
1721         struct list_head         *ctmp;
1722         struct list_head         *cnxt;
1723         int                 count = 0;
1724
1725         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1726                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1727
1728                 if (ipaddr == 0 ||
1729                     conn->ksnc_ipaddr == ipaddr) {
1730                         count++;
1731                         ksocknal_close_conn_locked (conn, why);
1732                 }
1733         }
1734
1735         return (count);
1736 }
1737
1738 int
1739 ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1740 {
1741         ksock_peer_ni_t     *peer_ni = conn->ksnc_peer;
1742         __u32             ipaddr = conn->ksnc_ipaddr;
1743         int               count;
1744
1745         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1746
1747         count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1748
1749         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1750
1751         return (count);
1752 }
1753
1754 int
1755 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1756 {
1757         ksock_peer_ni_t       *peer_ni;
1758         struct list_head         *ptmp;
1759         struct list_head         *pnxt;
1760         int                 lo;
1761         int                 hi;
1762         int                 i;
1763         int                 count = 0;
1764
1765         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1766
1767         if (id.nid != LNET_NID_ANY)
1768                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1769         else {
1770                 lo = 0;
1771                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1772         }
1773
1774         for (i = lo; i <= hi; i++) {
1775                 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1776
1777                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
1778
1779                         if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1780                               (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1781                                 continue;
1782
1783                         count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1784                 }
1785         }
1786
1787         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1788
1789         /* wildcards always succeed */
1790         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1791                 return (0);
1792
1793         return (count == 0 ? -ENOENT : 0);
1794 }
1795
1796 void
1797 ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
1798 {
1799         /* The router is telling me she's been notified of a change in
1800          * gateway state....
1801          */
1802         struct lnet_process_id id = {
1803                 .nid    = gw_nid,
1804                 .pid    = LNET_PID_ANY,
1805         };
1806
1807         CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1808                 alive ? "up" : "down");
1809
1810         if (!alive) {
1811                 /* If the gateway crashed, close all open connections... */
1812                 ksocknal_close_matching_conns (id, 0);
1813                 return;
1814         }
1815
1816         /* ...otherwise do nothing.  We can only establish new connections
1817          * if we have autroutes, and these connect on demand. */
1818 }
1819
1820 void
1821 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when)
1822 {
1823         int connect = 1;
1824         time64_t last_alive = 0;
1825         time64_t now = ktime_get_real_seconds();
1826         ksock_peer_ni_t *peer_ni = NULL;
1827         rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1828         struct lnet_process_id id = {
1829                 .nid = nid,
1830                 .pid = LNET_PID_LUSTRE,
1831         };
1832
1833         read_lock(glock);
1834
1835         peer_ni = ksocknal_find_peer_locked(ni, id);
1836         if (peer_ni != NULL) {
1837                 struct list_head       *tmp;
1838                 ksock_conn_t     *conn;
1839                 int               bufnob;
1840
1841                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1842                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1843                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1844
1845                         if (bufnob < conn->ksnc_tx_bufnob) {
1846                                 /* something got ACKed */
1847                                 conn->ksnc_tx_deadline =
1848                                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1849                                 peer_ni->ksnp_last_alive = now;
1850                                 conn->ksnc_tx_bufnob = bufnob;
1851                         }
1852                 }
1853
1854                 last_alive = peer_ni->ksnp_last_alive;
1855                 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1856                         connect = 0;
1857         }
1858
1859         read_unlock(glock);
1860
1861         if (last_alive != 0)
1862                 *when = last_alive;
1863
1864         CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago, connect %d\n",
1865                libcfs_nid2str(nid), peer_ni,
1866                last_alive ? cfs_duration_sec(now - last_alive) : -1,
1867                connect);
1868
1869         if (!connect)
1870                 return;
1871
1872         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1873
1874         write_lock_bh(glock);
1875
1876         peer_ni = ksocknal_find_peer_locked(ni, id);
1877         if (peer_ni != NULL)
1878                 ksocknal_launch_all_connections_locked(peer_ni);
1879
1880         write_unlock_bh(glock);
1881         return;
1882 }
1883
1884 static void
1885 ksocknal_push_peer (ksock_peer_ni_t *peer_ni)
1886 {
1887         int               index;
1888         int               i;
1889         struct list_head       *tmp;
1890         ksock_conn_t     *conn;
1891
1892         for (index = 0; ; index++) {
1893                 read_lock(&ksocknal_data.ksnd_global_lock);
1894
1895                 i = 0;
1896                 conn = NULL;
1897
1898                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1899                         if (i++ == index) {
1900                                 conn = list_entry(tmp, ksock_conn_t,
1901                                                        ksnc_list);
1902                                 ksocknal_conn_addref(conn);
1903                                 break;
1904                         }
1905                 }
1906
1907                 read_unlock(&ksocknal_data.ksnd_global_lock);
1908
1909                 if (conn == NULL)
1910                         break;
1911
1912                 ksocknal_lib_push_conn (conn);
1913                 ksocknal_conn_decref(conn);
1914         }
1915 }
1916
1917 static int
1918 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1919 {
1920         struct list_head *start;
1921         struct list_head *end;
1922         struct list_head *tmp;
1923         int               rc = -ENOENT;
1924         unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
1925
1926         if (id.nid == LNET_NID_ANY) {
1927                 start = &ksocknal_data.ksnd_peers[0];
1928                 end = &ksocknal_data.ksnd_peers[hsize - 1];
1929         } else {
1930                 start = end = ksocknal_nid2peerlist(id.nid);
1931         }
1932
1933         for (tmp = start; tmp <= end; tmp++) {
1934                 int     peer_off; /* searching offset in peer_ni hash table */
1935
1936                 for (peer_off = 0; ; peer_off++) {
1937                         ksock_peer_ni_t *peer_ni;
1938                         int           i = 0;
1939
1940                         read_lock(&ksocknal_data.ksnd_global_lock);
1941                         list_for_each_entry(peer_ni, tmp, ksnp_list) {
1942                                 if (!((id.nid == LNET_NID_ANY ||
1943                                        id.nid == peer_ni->ksnp_id.nid) &&
1944                                       (id.pid == LNET_PID_ANY ||
1945                                        id.pid == peer_ni->ksnp_id.pid)))
1946                                         continue;
1947
1948                                 if (i++ == peer_off) {
1949                                         ksocknal_peer_addref(peer_ni);
1950                                         break;
1951                                 }
1952                         }
1953                         read_unlock(&ksocknal_data.ksnd_global_lock);
1954
1955                         if (i == 0) /* no match */
1956                                 break;
1957
1958                         rc = 0;
1959                         ksocknal_push_peer(peer_ni);
1960                         ksocknal_peer_decref(peer_ni);
1961                 }
1962         }
1963         return rc;
1964 }
1965
1966 static int
1967 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1968 {
1969         ksock_net_t *net = ni->ni_data;
1970         ksock_interface_t *iface;
1971         int rc;
1972         int i;
1973         int j;
1974         struct list_head *ptmp;
1975         ksock_peer_ni_t *peer_ni;
1976         struct list_head *rtmp;
1977         ksock_route_t *route;
1978
1979         if (ipaddress == 0 ||
1980             netmask == 0)
1981                 return -EINVAL;
1982
1983         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1984
1985         iface = ksocknal_ip2iface(ni, ipaddress);
1986         if (iface != NULL) {
1987                 /* silently ignore dups */
1988                 rc = 0;
1989         } else if (net->ksnn_ninterfaces == LNET_NUM_INTERFACES) {
1990                 rc = -ENOSPC;
1991         } else {
1992                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1993
1994                 iface->ksni_ipaddr = ipaddress;
1995                 iface->ksni_netmask = netmask;
1996                 iface->ksni_nroutes = 0;
1997                 iface->ksni_npeers = 0;
1998
1999                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2000                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
2001                                 peer_ni = list_entry(ptmp, ksock_peer_ni_t,
2002                                                      ksnp_list);
2003
2004                                 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
2005                                         if (peer_ni->ksnp_passive_ips[j] == ipaddress)
2006                                                 iface->ksni_npeers++;
2007
2008                                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
2009                                         route = list_entry(rtmp,
2010                                                            ksock_route_t,
2011                                                            ksnr_list);
2012
2013                                         if (route->ksnr_myipaddr == ipaddress)
2014                                                 iface->ksni_nroutes++;
2015                                 }
2016                         }
2017                 }
2018
2019                 rc = 0;
2020                 /* NB only new connections will pay attention to the new interface! */
2021         }
2022
2023         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2024
2025         return rc;
2026 }
2027
2028 static void
2029 ksocknal_peer_del_interface_locked(ksock_peer_ni_t *peer_ni, __u32 ipaddr)
2030 {
2031         struct list_head         *tmp;
2032         struct list_head         *nxt;
2033         ksock_route_t      *route;
2034         ksock_conn_t       *conn;
2035         int                 i;
2036         int                 j;
2037
2038         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2039                 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2040                         for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2041                                 peer_ni->ksnp_passive_ips[j-1] =
2042                                         peer_ni->ksnp_passive_ips[j];
2043                         peer_ni->ksnp_n_passive_ips--;
2044                         break;
2045                 }
2046
2047         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2048                 route = list_entry(tmp, ksock_route_t, ksnr_list);
2049
2050                 if (route->ksnr_myipaddr != ipaddr)
2051                         continue;
2052
2053                 if (route->ksnr_share_count != 0) {
2054                         /* Manually created; keep, but unbind */
2055                         route->ksnr_myipaddr = 0;
2056                 } else {
2057                         ksocknal_del_route_locked(route);
2058                 }
2059         }
2060
2061         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2062                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2063
2064                 if (conn->ksnc_myipaddr == ipaddr)
2065                         ksocknal_close_conn_locked (conn, 0);
2066         }
2067 }
2068
2069 static int
2070 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2071 {
2072         ksock_net_t       *net = ni->ni_data;
2073         int                rc = -ENOENT;
2074         struct list_head        *tmp;
2075         struct list_head        *nxt;
2076         ksock_peer_ni_t      *peer_ni;
2077         __u32              this_ip;
2078         int                i;
2079         int                j;
2080
2081         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2082
2083         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2084                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2085
2086                 if (!(ipaddress == 0 ||
2087                       ipaddress == this_ip))
2088                         continue;
2089
2090                 rc = 0;
2091
2092                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2093                         net->ksnn_interfaces[j-1] =
2094                                 net->ksnn_interfaces[j];
2095
2096                 net->ksnn_ninterfaces--;
2097
2098                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2099                         list_for_each_safe(tmp, nxt,
2100                                                &ksocknal_data.ksnd_peers[j]) {
2101                                 peer_ni = list_entry(tmp, ksock_peer_ni_t,
2102                                                       ksnp_list);
2103
2104                                 if (peer_ni->ksnp_ni != ni)
2105                                         continue;
2106
2107                                 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2108                         }
2109                 }
2110         }
2111
2112         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2113
2114         return (rc);
2115 }
2116
2117 int
2118 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2119 {
2120         struct lnet_process_id id = {0};
2121         struct libcfs_ioctl_data *data = arg;
2122         int rc;
2123
2124         switch(cmd) {
2125         case IOC_LIBCFS_GET_INTERFACE: {
2126                 ksock_net_t       *net = ni->ni_data;
2127                 ksock_interface_t *iface;
2128
2129                 read_lock(&ksocknal_data.ksnd_global_lock);
2130
2131                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2132                         rc = -ENOENT;
2133                 } else {
2134                         rc = 0;
2135                         iface = &net->ksnn_interfaces[data->ioc_count];
2136
2137                         data->ioc_u32[0] = iface->ksni_ipaddr;
2138                         data->ioc_u32[1] = iface->ksni_netmask;
2139                         data->ioc_u32[2] = iface->ksni_npeers;
2140                         data->ioc_u32[3] = iface->ksni_nroutes;
2141                 }
2142
2143                 read_unlock(&ksocknal_data.ksnd_global_lock);
2144                 return rc;
2145         }
2146
2147         case IOC_LIBCFS_ADD_INTERFACE:
2148                 return ksocknal_add_interface(ni,
2149                                               data->ioc_u32[0], /* IP address */
2150                                               data->ioc_u32[1]); /* net mask */
2151
2152         case IOC_LIBCFS_DEL_INTERFACE:
2153                 return ksocknal_del_interface(ni,
2154                                               data->ioc_u32[0]); /* IP address */
2155
2156         case IOC_LIBCFS_GET_PEER: {
2157                 __u32            myip = 0;
2158                 __u32            ip = 0;
2159                 int              port = 0;
2160                 int              conn_count = 0;
2161                 int              share_count = 0;
2162
2163                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2164                                             &id, &myip, &ip, &port,
2165                                             &conn_count,  &share_count);
2166                 if (rc != 0)
2167                         return rc;
2168
2169                 data->ioc_nid    = id.nid;
2170                 data->ioc_count  = share_count;
2171                 data->ioc_u32[0] = ip;
2172                 data->ioc_u32[1] = port;
2173                 data->ioc_u32[2] = myip;
2174                 data->ioc_u32[3] = conn_count;
2175                 data->ioc_u32[4] = id.pid;
2176                 return 0;
2177         }
2178
2179         case IOC_LIBCFS_ADD_PEER:
2180                 id.nid = data->ioc_nid;
2181                 id.pid = LNET_PID_LUSTRE;
2182                 return ksocknal_add_peer (ni, id,
2183                                           data->ioc_u32[0], /* IP */
2184                                           data->ioc_u32[1]); /* port */
2185
2186         case IOC_LIBCFS_DEL_PEER:
2187                 id.nid = data->ioc_nid;
2188                 id.pid = LNET_PID_ANY;
2189                 return ksocknal_del_peer (ni, id,
2190                                           data->ioc_u32[0]); /* IP */
2191
2192         case IOC_LIBCFS_GET_CONN: {
2193                 int           txmem;
2194                 int           rxmem;
2195                 int           nagle;
2196                 ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2197
2198                 if (conn == NULL)
2199                         return -ENOENT;
2200
2201                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2202
2203                 data->ioc_count  = txmem;
2204                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2205                 data->ioc_flags  = nagle;
2206                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2207                 data->ioc_u32[1] = conn->ksnc_port;
2208                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2209                 data->ioc_u32[3] = conn->ksnc_type;
2210                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2211                 data->ioc_u32[5] = rxmem;
2212                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2213                 ksocknal_conn_decref(conn);
2214                 return 0;
2215         }
2216
2217         case IOC_LIBCFS_CLOSE_CONNECTION:
2218                 id.nid = data->ioc_nid;
2219                 id.pid = LNET_PID_ANY;
2220                 return ksocknal_close_matching_conns (id,
2221                                                       data->ioc_u32[0]);
2222
2223         case IOC_LIBCFS_REGISTER_MYNID:
2224                 /* Ignore if this is a noop */
2225                 if (data->ioc_nid == ni->ni_nid)
2226                         return 0;
2227
2228                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2229                        libcfs_nid2str(data->ioc_nid),
2230                        libcfs_nid2str(ni->ni_nid));
2231                 return -EINVAL;
2232
2233         case IOC_LIBCFS_PUSH_CONNECTION:
2234                 id.nid = data->ioc_nid;
2235                 id.pid = LNET_PID_ANY;
2236                 return ksocknal_push(ni, id);
2237
2238         default:
2239                 return -EINVAL;
2240         }
2241         /* not reached */
2242 }
2243
2244 static void
2245 ksocknal_free_buffers (void)
2246 {
2247         LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2248
2249         if (ksocknal_data.ksnd_sched_info != NULL) {
2250                 struct ksock_sched_info *info;
2251                 int                     i;
2252
2253                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2254                         if (info->ksi_scheds != NULL) {
2255                                 LIBCFS_FREE(info->ksi_scheds,
2256                                             info->ksi_nthreads_max *
2257                                             sizeof(info->ksi_scheds[0]));
2258                         }
2259                 }
2260                 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2261         }
2262
2263         LIBCFS_FREE (ksocknal_data.ksnd_peers,
2264                      sizeof(struct list_head) *
2265                      ksocknal_data.ksnd_peer_hash_size);
2266
2267         spin_lock(&ksocknal_data.ksnd_tx_lock);
2268
2269         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2270                 struct list_head        zlist;
2271                 ksock_tx_t      *tx;
2272
2273                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2274                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2275                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2276
2277                 while (!list_empty(&zlist)) {
2278                         tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2279                         list_del(&tx->tx_list);
2280                         LIBCFS_FREE(tx, tx->tx_desc_size);
2281                 }
2282         } else {
2283                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2284         }
2285 }
2286
2287 static void
2288 ksocknal_base_shutdown(void)
2289 {
2290         struct ksock_sched_info *info;
2291         ksock_sched_t           *sched;
2292         int                     i;
2293         int                     j;
2294
2295         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2296                atomic_read (&libcfs_kmemory));
2297         LASSERT (ksocknal_data.ksnd_nnets == 0);
2298
2299         switch (ksocknal_data.ksnd_init) {
2300         default:
2301                 LASSERT (0);
2302
2303         case SOCKNAL_INIT_ALL:
2304         case SOCKNAL_INIT_DATA:
2305                 LASSERT (ksocknal_data.ksnd_peers != NULL);
2306                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2307                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2308                 }
2309
2310                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2311                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2312                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2313                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2314                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2315
2316                 if (ksocknal_data.ksnd_sched_info != NULL) {
2317                         cfs_percpt_for_each(info, i,
2318                                             ksocknal_data.ksnd_sched_info) {
2319                                 if (info->ksi_scheds == NULL)
2320                                         continue;
2321
2322                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2323
2324                                         sched = &info->ksi_scheds[j];
2325                                         LASSERT(list_empty(&sched->\
2326                                                                kss_tx_conns));
2327                                         LASSERT(list_empty(&sched->\
2328                                                                kss_rx_conns));
2329                                         LASSERT(list_empty(&sched-> \
2330                                                   kss_zombie_noop_txs));
2331                                         LASSERT(sched->kss_nconns == 0);
2332                                 }
2333                         }
2334                 }
2335
2336                 /* flag threads to terminate; wake and wait for them to die */
2337                 ksocknal_data.ksnd_shuttingdown = 1;
2338                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2339                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2340
2341                 if (ksocknal_data.ksnd_sched_info != NULL) {
2342                         cfs_percpt_for_each(info, i,
2343                                             ksocknal_data.ksnd_sched_info) {
2344                                 if (info->ksi_scheds == NULL)
2345                                         continue;
2346
2347                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2348                                         sched = &info->ksi_scheds[j];
2349                                         wake_up_all(&sched->kss_waitq);
2350                                 }
2351                         }
2352                 }
2353
2354                 i = 4;
2355                 read_lock(&ksocknal_data.ksnd_global_lock);
2356                 while (ksocknal_data.ksnd_nthreads != 0) {
2357                         i++;
2358                         /* power of 2? */
2359                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2360                                 "waiting for %d threads to terminate\n",
2361                                 ksocknal_data.ksnd_nthreads);
2362                         read_unlock(&ksocknal_data.ksnd_global_lock);
2363                         set_current_state(TASK_UNINTERRUPTIBLE);
2364                         schedule_timeout(cfs_time_seconds(1));
2365                         read_lock(&ksocknal_data.ksnd_global_lock);
2366                 }
2367                 read_unlock(&ksocknal_data.ksnd_global_lock);
2368
2369                 ksocknal_free_buffers();
2370
2371                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2372                 break;
2373         }
2374
2375         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2376                atomic_read (&libcfs_kmemory));
2377
2378         module_put(THIS_MODULE);
2379 }
2380
2381 static int
2382 ksocknal_base_startup(void)
2383 {
2384         struct ksock_sched_info *info;
2385         int                     rc;
2386         int                     i;
2387
2388         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2389         LASSERT (ksocknal_data.ksnd_nnets == 0);
2390
2391         memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2392
2393         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2394         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2395                      sizeof(struct list_head) *
2396                      ksocknal_data.ksnd_peer_hash_size);
2397         if (ksocknal_data.ksnd_peers == NULL)
2398                 return -ENOMEM;
2399
2400         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2401                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2402
2403         rwlock_init(&ksocknal_data.ksnd_global_lock);
2404         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2405
2406         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2407         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2408         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2409         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2410         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2411
2412         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2413         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2414         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2415         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2416
2417         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2418         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2419
2420         /* NB memset above zeros whole of ksocknal_data */
2421
2422         /* flag lists/ptrs/locks initialised */
2423         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2424         try_module_get(THIS_MODULE);
2425
2426         ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2427                                                          sizeof(*info));
2428         if (ksocknal_data.ksnd_sched_info == NULL)
2429                 goto failed;
2430
2431         cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2432                 ksock_sched_t   *sched;
2433                 int             nthrs;
2434
2435                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2436                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2437                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2438                 } else {
2439                         /* max to half of CPUs, assume another half should be
2440                          * reserved for upper layer modules */
2441                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2442                 }
2443
2444                 info->ksi_nthreads_max = nthrs;
2445                 info->ksi_cpt = i;
2446
2447                 if (nthrs != 0) {
2448                         LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2449                                          info->ksi_nthreads_max *
2450                                                 sizeof(*sched));
2451                         if (info->ksi_scheds == NULL)
2452                                 goto failed;
2453
2454                         for (; nthrs > 0; nthrs--) {
2455                                 sched = &info->ksi_scheds[nthrs - 1];
2456
2457                                 sched->kss_info = info;
2458                                 spin_lock_init(&sched->kss_lock);
2459                                 INIT_LIST_HEAD(&sched->kss_rx_conns);
2460                                 INIT_LIST_HEAD(&sched->kss_tx_conns);
2461                                 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2462                                 init_waitqueue_head(&sched->kss_waitq);
2463                         }
2464                 }
2465         }
2466
2467         ksocknal_data.ksnd_connd_starting         = 0;
2468         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2469         ksocknal_data.ksnd_connd_starting_stamp   = ktime_get_real_seconds();
2470         /* must have at least 2 connds to remain responsive to accepts while
2471          * connecting */
2472         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2473                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2474
2475         if (*ksocknal_tunables.ksnd_nconnds_max <
2476             *ksocknal_tunables.ksnd_nconnds) {
2477                 ksocknal_tunables.ksnd_nconnds_max =
2478                         ksocknal_tunables.ksnd_nconnds;
2479         }
2480
2481         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2482                 char name[16];
2483                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2484                 ksocknal_data.ksnd_connd_starting++;
2485                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2486
2487
2488                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2489                 rc = ksocknal_thread_start(ksocknal_connd,
2490                                            (void *)((uintptr_t)i), name);
2491                 if (rc != 0) {
2492                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2493                         ksocknal_data.ksnd_connd_starting--;
2494                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2495                         CERROR("Can't spawn socknal connd: %d\n", rc);
2496                         goto failed;
2497                 }
2498         }
2499
2500         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2501         if (rc != 0) {
2502                 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2503                 goto failed;
2504         }
2505
2506         /* flag everything initialised */
2507         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2508
2509         return 0;
2510
2511  failed:
2512         ksocknal_base_shutdown();
2513         return -ENETDOWN;
2514 }
2515
2516 static void
2517 ksocknal_debug_peerhash(struct lnet_ni *ni)
2518 {
2519         ksock_peer_ni_t *peer_ni = NULL;
2520         struct list_head        *tmp;
2521         int             i;
2522
2523         read_lock(&ksocknal_data.ksnd_global_lock);
2524
2525         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2526                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2527                         peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
2528
2529                         if (peer_ni->ksnp_ni == ni) break;
2530
2531                         peer_ni = NULL;
2532                 }
2533         }
2534
2535         if (peer_ni != NULL) {
2536                 ksock_route_t *route;
2537                 ksock_conn_t  *conn;
2538
2539                 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2540                        "closing %d, accepting %d, err %d, zcookie %llu, "
2541                        "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2542                        atomic_read(&peer_ni->ksnp_refcount),
2543                        peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2544                        peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2545                        peer_ni->ksnp_zc_next_cookie,
2546                        !list_empty(&peer_ni->ksnp_tx_queue),
2547                        !list_empty(&peer_ni->ksnp_zc_req_list));
2548
2549                 list_for_each(tmp, &peer_ni->ksnp_routes) {
2550                         route = list_entry(tmp, ksock_route_t, ksnr_list);
2551                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2552                                "del %d\n", atomic_read(&route->ksnr_refcount),
2553                                route->ksnr_scheduled, route->ksnr_connecting,
2554                                route->ksnr_connected, route->ksnr_deleted);
2555                 }
2556
2557                 list_for_each(tmp, &peer_ni->ksnp_conns) {
2558                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2559                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2560                                atomic_read(&conn->ksnc_conn_refcount),
2561                                atomic_read(&conn->ksnc_sock_refcount),
2562                                conn->ksnc_type, conn->ksnc_closing);
2563                 }
2564         }
2565
2566         read_unlock(&ksocknal_data.ksnd_global_lock);
2567         return;
2568 }
2569
2570 void
2571 ksocknal_shutdown(struct lnet_ni *ni)
2572 {
2573         ksock_net_t *net = ni->ni_data;
2574         struct lnet_process_id anyid = {
2575                 .nid = LNET_NID_ANY,
2576                 .pid = LNET_PID_ANY,
2577         };
2578         int i;
2579
2580         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2581         LASSERT(ksocknal_data.ksnd_nnets > 0);
2582
2583         spin_lock_bh(&net->ksnn_lock);
2584         net->ksnn_shutdown = 1;                 /* prevent new peers */
2585         spin_unlock_bh(&net->ksnn_lock);
2586
2587         /* Delete all peers */
2588         ksocknal_del_peer(ni, anyid, 0);
2589
2590         /* Wait for all peer_ni state to clean up */
2591         i = 2;
2592         spin_lock_bh(&net->ksnn_lock);
2593         while (net->ksnn_npeers != 0) {
2594                 spin_unlock_bh(&net->ksnn_lock);
2595
2596                 i++;
2597                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2598                        "waiting for %d peers to disconnect\n",
2599                        net->ksnn_npeers);
2600                 set_current_state(TASK_UNINTERRUPTIBLE);
2601                 schedule_timeout(cfs_time_seconds(1));
2602
2603                 ksocknal_debug_peerhash(ni);
2604
2605                 spin_lock_bh(&net->ksnn_lock);
2606         }
2607         spin_unlock_bh(&net->ksnn_lock);
2608
2609         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2610                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2611                 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2612         }
2613
2614         list_del(&net->ksnn_list);
2615         LIBCFS_FREE(net, sizeof(*net));
2616
2617         ksocknal_data.ksnd_nnets--;
2618         if (ksocknal_data.ksnd_nnets == 0)
2619                 ksocknal_base_shutdown();
2620 }
2621
2622 static int
2623 ksocknal_enumerate_interfaces(ksock_net_t *net)
2624 {
2625         char      **names;
2626         int         i;
2627         int         j;
2628         int         rc;
2629         int         n;
2630
2631         n = lnet_ipif_enumerate(&names);
2632         if (n <= 0) {
2633                 CERROR("Can't enumerate interfaces: %d\n", n);
2634                 return n;
2635         }
2636
2637         for (i = j = 0; i < n; i++) {
2638                 int        up;
2639                 __u32      ip;
2640                 __u32      mask;
2641
2642                 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2643                         continue;
2644
2645                 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2646                 if (rc != 0) {
2647                         CWARN("Can't get interface %s info: %d\n",
2648                               names[i], rc);
2649                         continue;
2650                 }
2651
2652                 if (!up) {
2653                         CWARN("Ignoring interface %s (down)\n",
2654                               names[i]);
2655                         continue;
2656                 }
2657
2658                 if (j == LNET_NUM_INTERFACES) {
2659                         CWARN("Ignoring interface %s (too many interfaces)\n",
2660                               names[i]);
2661                         continue;
2662                 }
2663
2664                 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2665                 net->ksnn_interfaces[j].ksni_netmask = mask;
2666                 strlcpy(net->ksnn_interfaces[j].ksni_name,
2667                         names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2668                 j++;
2669         }
2670
2671         lnet_ipif_free_enumeration(names, n);
2672
2673         if (j == 0)
2674                 CERROR("Can't find any usable interfaces\n");
2675
2676         return j;
2677 }
2678
2679 static int
2680 ksocknal_search_new_ipif(ksock_net_t *net)
2681 {
2682         int     new_ipif = 0;
2683         int     i;
2684
2685         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2686                 char            *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2687                 char            *colon = strchr(ifnam, ':');
2688                 int             found  = 0;
2689                 ksock_net_t     *tmp;
2690                 int             j;
2691
2692                 if (colon != NULL) /* ignore alias device */
2693                         *colon = 0;
2694
2695                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2696                                         ksnn_list) {
2697                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2698                                 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2699                                              ksni_name[0];
2700                                 char *colon2 = strchr(ifnam2, ':');
2701
2702                                 if (colon2 != NULL)
2703                                         *colon2 = 0;
2704
2705                                 found = strcmp(ifnam, ifnam2) == 0;
2706                                 if (colon2 != NULL)
2707                                         *colon2 = ':';
2708                         }
2709                         if (found)
2710                                 break;
2711                 }
2712
2713                 new_ipif += !found;
2714                 if (colon != NULL)
2715                         *colon = ':';
2716         }
2717
2718         return new_ipif;
2719 }
2720
2721 static int
2722 ksocknal_start_schedulers(struct ksock_sched_info *info)
2723 {
2724         int     nthrs;
2725         int     rc = 0;
2726         int     i;
2727
2728         if (info->ksi_nthreads == 0) {
2729                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2730                         nthrs = info->ksi_nthreads_max;
2731                 } else {
2732                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2733                                                info->ksi_cpt);
2734                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2735                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2736                 }
2737                 nthrs = min(nthrs, info->ksi_nthreads_max);
2738         } else {
2739                 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2740                 /* increase two threads if there is new interface */
2741                 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2742         }
2743
2744         for (i = 0; i < nthrs; i++) {
2745                 long            id;
2746                 char            name[20];
2747                 ksock_sched_t   *sched;
2748                 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2749                 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2750                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2751                          info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2752
2753                 rc = ksocknal_thread_start(ksocknal_scheduler,
2754                                            (void *)id, name);
2755                 if (rc == 0)
2756                         continue;
2757
2758                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2759                        info->ksi_cpt, info->ksi_nthreads + i, rc);
2760                 break;
2761         }
2762
2763         info->ksi_nthreads += i;
2764         return rc;
2765 }
2766
2767 static int
2768 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2769 {
2770         int     newif = ksocknal_search_new_ipif(net);
2771         int     rc;
2772         int     i;
2773
2774         if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2775                 return -EINVAL;
2776
2777         for (i = 0; i < ncpts; i++) {
2778                 struct ksock_sched_info *info;
2779                 int cpt = (cpts == NULL) ? i : cpts[i];
2780
2781                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2782                 info = ksocknal_data.ksnd_sched_info[cpt];
2783
2784                 if (!newif && info->ksi_nthreads > 0)
2785                         continue;
2786
2787                 rc = ksocknal_start_schedulers(info);
2788                 if (rc != 0)
2789                         return rc;
2790         }
2791         return 0;
2792 }
2793
2794 int
2795 ksocknal_startup(struct lnet_ni *ni)
2796 {
2797         ksock_net_t  *net;
2798         int           rc;
2799         int           i;
2800         struct net_device *net_dev;
2801         int node_id;
2802
2803         LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2804
2805         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2806                 rc = ksocknal_base_startup();
2807                 if (rc != 0)
2808                         return rc;
2809         }
2810
2811         LIBCFS_ALLOC(net, sizeof(*net));
2812         if (net == NULL)
2813                 goto fail_0;
2814
2815         spin_lock_init(&net->ksnn_lock);
2816         net->ksnn_incarnation = ktime_get_real_ns();
2817         ni->ni_data = net;
2818         if (!ni->ni_net->net_tunables_set) {
2819                 ni->ni_net->net_tunables.lct_peer_timeout =
2820                         *ksocknal_tunables.ksnd_peertimeout;
2821                 ni->ni_net->net_tunables.lct_max_tx_credits =
2822                         *ksocknal_tunables.ksnd_credits;
2823                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2824                         *ksocknal_tunables.ksnd_peertxcredits;
2825                 ni->ni_net->net_tunables.lct_peer_rtr_credits =
2826                         *ksocknal_tunables.ksnd_peerrtrcredits;
2827                 ni->ni_net->net_tunables_set = true;
2828         }
2829
2830
2831         if (ni->ni_interfaces[0] == NULL) {
2832                 rc = ksocknal_enumerate_interfaces(net);
2833                 if (rc <= 0)
2834                         goto fail_1;
2835
2836                 net->ksnn_ninterfaces = 1;
2837         } else {
2838                 for (i = 0; i < LNET_NUM_INTERFACES; i++) {
2839                         int up;
2840
2841                         if (ni->ni_interfaces[i] == NULL)
2842                                 break;
2843
2844                         rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2845                                 &net->ksnn_interfaces[i].ksni_ipaddr,
2846                                 &net->ksnn_interfaces[i].ksni_netmask);
2847
2848                         if (rc != 0) {
2849                                 CERROR("Can't get interface %s info: %d\n",
2850                                        ni->ni_interfaces[i], rc);
2851                                 goto fail_1;
2852                         }
2853
2854                         if (!up) {
2855                                 CERROR("Interface %s is down\n",
2856                                        ni->ni_interfaces[i]);
2857                                 goto fail_1;
2858                         }
2859
2860                         strlcpy(net->ksnn_interfaces[i].ksni_name,
2861                                 ni->ni_interfaces[i],
2862                                 sizeof(net->ksnn_interfaces[i].ksni_name));
2863
2864                 }
2865                 net->ksnn_ninterfaces = i;
2866         }
2867
2868         net_dev = dev_get_by_name(&init_net,
2869                                   net->ksnn_interfaces[0].ksni_name);
2870         if (net_dev != NULL) {
2871                 node_id = dev_to_node(&net_dev->dev);
2872                 ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
2873                 dev_put(net_dev);
2874         } else {
2875                 ni->ni_dev_cpt = CFS_CPT_ANY;
2876         }
2877
2878         /* call it before add it to ksocknal_data.ksnd_nets */
2879         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2880         if (rc != 0)
2881                 goto fail_1;
2882
2883         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2884                                 net->ksnn_interfaces[0].ksni_ipaddr);
2885         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2886
2887         ksocknal_data.ksnd_nnets++;
2888
2889         return 0;
2890
2891  fail_1:
2892         LIBCFS_FREE(net, sizeof(*net));
2893  fail_0:
2894         if (ksocknal_data.ksnd_nnets == 0)
2895                 ksocknal_base_shutdown();
2896
2897         return -ENETDOWN;
2898 }
2899
2900
2901 static void __exit ksocklnd_exit(void)
2902 {
2903         lnet_unregister_lnd(&the_ksocklnd);
2904 }
2905
2906 static int __init ksocklnd_init(void)
2907 {
2908         int rc;
2909
2910         /* check ksnr_connected/connecting field large enough */
2911         CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2912         CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2913
2914         /* initialize the_ksocklnd */
2915         the_ksocklnd.lnd_type     = SOCKLND;
2916         the_ksocklnd.lnd_startup  = ksocknal_startup;
2917         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2918         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2919         the_ksocklnd.lnd_send     = ksocknal_send;
2920         the_ksocklnd.lnd_recv     = ksocknal_recv;
2921         the_ksocklnd.lnd_notify   = ksocknal_notify;
2922         the_ksocklnd.lnd_query    = ksocknal_query;
2923         the_ksocklnd.lnd_accept   = ksocknal_accept;
2924
2925         rc = ksocknal_tunables_init();
2926         if (rc != 0)
2927                 return rc;
2928
2929         lnet_register_lnd(&the_ksocklnd);
2930
2931         return 0;
2932 }
2933
2934 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2935 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2936 MODULE_VERSION("2.8.0");
2937 MODULE_LICENSE("GPL");
2938
2939 module_init(ksocklnd_init);
2940 module_exit(ksocklnd_exit);