Whamcloud - gitweb
LU-4423 lnet: free a struct kib_conn outside of the kiblnd_destroy_conn()
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/socklnd/socklnd.c
33  *
34  * Author: Zach Brown <zab@zabbo.net>
35  * Author: Peter J. Braam <braam@clusterfs.com>
36  * Author: Phil Schwan <phil@clusterfs.com>
37  * Author: Eric Barton <eric@bartonsoftware.com>
38  */
39
40 #include <linux/pci.h>
41 #include "socklnd.h"
42
43 static struct lnet_lnd the_ksocklnd;
44 struct ksock_nal_data ksocknal_data;
45
46 static struct ksock_interface *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
48 {
49         struct ksock_net *net = ni->ni_data;
50         int i;
51         struct ksock_interface *iface;
52
53         for (i = 0; i < net->ksnn_ninterfaces; i++) {
54                 LASSERT(i < LNET_INTERFACES_NUM);
55                 iface = &net->ksnn_interfaces[i];
56
57                 if (iface->ksni_ipaddr == ip)
58                         return iface;
59         }
60
61         return NULL;
62 }
63
64 static struct ksock_route *
65 ksocknal_create_route(__u32 ipaddr, int port)
66 {
67         struct ksock_route *route;
68
69         LIBCFS_ALLOC (route, sizeof (*route));
70         if (route == NULL)
71                 return (NULL);
72
73         atomic_set (&route->ksnr_refcount, 1);
74         route->ksnr_peer = NULL;
75         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
76         route->ksnr_ipaddr = ipaddr;
77         route->ksnr_port = port;
78         route->ksnr_scheduled = 0;
79         route->ksnr_connecting = 0;
80         route->ksnr_connected = 0;
81         route->ksnr_deleted = 0;
82         route->ksnr_conn_count = 0;
83         route->ksnr_share_count = 0;
84
85         return (route);
86 }
87
88 void
89 ksocknal_destroy_route(struct ksock_route *route)
90 {
91         LASSERT (atomic_read(&route->ksnr_refcount) == 0);
92
93         if (route->ksnr_peer != NULL)
94                 ksocknal_peer_decref(route->ksnr_peer);
95
96         LIBCFS_FREE (route, sizeof (*route));
97 }
98
99 static int
100 ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
101                      struct lnet_process_id id)
102 {
103         int cpt = lnet_cpt_of_nid(id.nid, ni);
104         struct ksock_net *net = ni->ni_data;
105         struct ksock_peer_ni *peer_ni;
106
107         LASSERT(id.nid != LNET_NID_ANY);
108         LASSERT(id.pid != LNET_PID_ANY);
109         LASSERT(!in_interrupt());
110
111         LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
112         if (peer_ni == NULL)
113                 return -ENOMEM;
114
115         peer_ni->ksnp_ni = ni;
116         peer_ni->ksnp_id = id;
117         atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
118         peer_ni->ksnp_closing = 0;
119         peer_ni->ksnp_accepting = 0;
120         peer_ni->ksnp_proto = NULL;
121         peer_ni->ksnp_last_alive = 0;
122         peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
123
124         INIT_LIST_HEAD(&peer_ni->ksnp_conns);
125         INIT_LIST_HEAD(&peer_ni->ksnp_routes);
126         INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
127         INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
128         spin_lock_init(&peer_ni->ksnp_lock);
129
130         spin_lock_bh(&net->ksnn_lock);
131
132         if (net->ksnn_shutdown) {
133                 spin_unlock_bh(&net->ksnn_lock);
134
135                 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
136                 CERROR("Can't create peer_ni: network shutdown\n");
137                 return -ESHUTDOWN;
138         }
139
140         net->ksnn_npeers++;
141
142         spin_unlock_bh(&net->ksnn_lock);
143
144         *peerp = peer_ni;
145         return 0;
146 }
147
148 void
149 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
150 {
151         struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
152
153         CDEBUG (D_NET, "peer_ni %s %p deleted\n",
154                 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
155
156         LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
157         LASSERT(peer_ni->ksnp_accepting == 0);
158         LASSERT(list_empty(&peer_ni->ksnp_conns));
159         LASSERT(list_empty(&peer_ni->ksnp_routes));
160         LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
161         LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
162
163         LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
164
165         /* NB a peer_ni's connections and routes keep a reference on their peer_ni
166          * until they are destroyed, so we can be assured that _all_ state to
167          * do with this peer_ni has been cleaned up when its refcount drops to
168          * zero. */
169         spin_lock_bh(&net->ksnn_lock);
170         net->ksnn_npeers--;
171         spin_unlock_bh(&net->ksnn_lock);
172 }
173
174 struct ksock_peer_ni *
175 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
176 {
177         struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178         struct list_head *tmp;
179         struct ksock_peer_ni *peer_ni;
180
181         list_for_each(tmp, peer_list) {
182                 peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
183
184                 LASSERT(!peer_ni->ksnp_closing);
185
186                 if (peer_ni->ksnp_ni != ni)
187                         continue;
188
189                 if (peer_ni->ksnp_id.nid != id.nid ||
190                     peer_ni->ksnp_id.pid != id.pid)
191                         continue;
192
193                 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
194                        peer_ni, libcfs_id2str(id),
195                        atomic_read(&peer_ni->ksnp_refcount));
196                 return peer_ni;
197         }
198         return NULL;
199 }
200
201 struct ksock_peer_ni *
202 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
203 {
204         struct ksock_peer_ni *peer_ni;
205
206         read_lock(&ksocknal_data.ksnd_global_lock);
207         peer_ni = ksocknal_find_peer_locked(ni, id);
208         if (peer_ni != NULL)                    /* +1 ref for caller? */
209                 ksocknal_peer_addref(peer_ni);
210         read_unlock(&ksocknal_data.ksnd_global_lock);
211
212         return (peer_ni);
213 }
214
215 static void
216 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
217 {
218         int i;
219         __u32 ip;
220         struct ksock_interface *iface;
221
222         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
223                 LASSERT(i < LNET_INTERFACES_NUM);
224                 ip = peer_ni->ksnp_passive_ips[i];
225
226                 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
227                 /*
228                  * All IPs in peer_ni->ksnp_passive_ips[] come from the
229                  * interface list, therefore the call must succeed.
230                  */
231                 LASSERT(iface != NULL);
232
233                 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
234                        peer_ni, iface, iface->ksni_nroutes);
235                 iface->ksni_npeers--;
236         }
237
238         LASSERT(list_empty(&peer_ni->ksnp_conns));
239         LASSERT(list_empty(&peer_ni->ksnp_routes));
240         LASSERT(!peer_ni->ksnp_closing);
241         peer_ni->ksnp_closing = 1;
242         list_del(&peer_ni->ksnp_list);
243         /* lose peerlist's ref */
244         ksocknal_peer_decref(peer_ni);
245 }
246
247 static int
248 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
249                        struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
250                        int *port, int *conn_count, int *share_count)
251 {
252         struct ksock_peer_ni *peer_ni;
253         struct list_head *ptmp;
254         struct ksock_route *route;
255         struct list_head *rtmp;
256         int i;
257         int j;
258         int rc = -ENOENT;
259
260         read_lock(&ksocknal_data.ksnd_global_lock);
261
262         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
263                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
264                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
265
266                         if (peer_ni->ksnp_ni != ni)
267                                 continue;
268
269                         if (peer_ni->ksnp_n_passive_ips == 0 &&
270                             list_empty(&peer_ni->ksnp_routes)) {
271                                 if (index-- > 0)
272                                         continue;
273
274                                 *id = peer_ni->ksnp_id;
275                                 *myip = 0;
276                                 *peer_ip = 0;
277                                 *port = 0;
278                                 *conn_count = 0;
279                                 *share_count = 0;
280                                 rc = 0;
281                                 goto out;
282                         }
283
284                         for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
285                                 if (index-- > 0)
286                                         continue;
287
288                                 *id = peer_ni->ksnp_id;
289                                 *myip = peer_ni->ksnp_passive_ips[j];
290                                 *peer_ip = 0;
291                                 *port = 0;
292                                 *conn_count = 0;
293                                 *share_count = 0;
294                                 rc = 0;
295                                 goto out;
296                         }
297
298                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
299                                 if (index-- > 0)
300                                         continue;
301
302                                 route = list_entry(rtmp, struct ksock_route,
303                                                    ksnr_list);
304
305                                 *id = peer_ni->ksnp_id;
306                                 *myip = route->ksnr_myipaddr;
307                                 *peer_ip = route->ksnr_ipaddr;
308                                 *port = route->ksnr_port;
309                                 *conn_count = route->ksnr_conn_count;
310                                 *share_count = route->ksnr_share_count;
311                                 rc = 0;
312                                 goto out;
313                         }
314                 }
315         }
316 out:
317         read_unlock(&ksocknal_data.ksnd_global_lock);
318         return rc;
319 }
320
321 static void
322 ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
323 {
324         struct ksock_peer_ni *peer_ni = route->ksnr_peer;
325         int type = conn->ksnc_type;
326         struct ksock_interface *iface;
327
328         conn->ksnc_route = route;
329         ksocknal_route_addref(route);
330
331         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
332                 if (route->ksnr_myipaddr == 0) {
333                         /* route wasn't bound locally yet (the initial route) */
334                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
335                                libcfs_id2str(peer_ni->ksnp_id),
336                                &route->ksnr_ipaddr,
337                                &conn->ksnc_myipaddr);
338                 } else {
339                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
340                                "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
341                                &route->ksnr_ipaddr,
342                                &route->ksnr_myipaddr,
343                                &conn->ksnc_myipaddr);
344
345                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
346                                                   route->ksnr_myipaddr);
347                         if (iface != NULL)
348                                 iface->ksni_nroutes--;
349                 }
350                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
351                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
352                                           route->ksnr_myipaddr);
353                 if (iface != NULL)
354                         iface->ksni_nroutes++;
355         }
356
357         route->ksnr_connected |= (1<<type);
358         route->ksnr_conn_count++;
359
360         /* Successful connection => further attempts can
361          * proceed immediately */
362         route->ksnr_retry_interval = 0;
363 }
364
365 static void
366 ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
367 {
368         struct list_head *tmp;
369         struct ksock_conn *conn;
370         struct ksock_route *route2;
371
372         LASSERT(!peer_ni->ksnp_closing);
373         LASSERT(route->ksnr_peer == NULL);
374         LASSERT(!route->ksnr_scheduled);
375         LASSERT(!route->ksnr_connecting);
376         LASSERT(route->ksnr_connected == 0);
377
378         /* LASSERT(unique) */
379         list_for_each(tmp, &peer_ni->ksnp_routes) {
380                 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
381
382                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
383                         CERROR("Duplicate route %s %pI4h\n",
384                                libcfs_id2str(peer_ni->ksnp_id),
385                                &route->ksnr_ipaddr);
386                         LBUG();
387                 }
388         }
389
390         route->ksnr_peer = peer_ni;
391         ksocknal_peer_addref(peer_ni);
392         /* peer_ni's routelist takes over my ref on 'route' */
393         list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
394
395         list_for_each(tmp, &peer_ni->ksnp_conns) {
396                 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
397
398                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
399                         continue;
400
401                 ksocknal_associate_route_conn_locked(route, conn);
402                 /* keep going (typed routes) */
403         }
404 }
405
406 static void
407 ksocknal_del_route_locked(struct ksock_route *route)
408 {
409         struct ksock_peer_ni *peer_ni = route->ksnr_peer;
410         struct ksock_interface *iface;
411         struct ksock_conn *conn;
412         struct list_head *ctmp;
413         struct list_head *cnxt;
414
415         LASSERT(!route->ksnr_deleted);
416
417         /* Close associated conns */
418         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
419                 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
420
421                 if (conn->ksnc_route != route)
422                         continue;
423
424                 ksocknal_close_conn_locked(conn, 0);
425         }
426
427         if (route->ksnr_myipaddr != 0) {
428                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
429                                           route->ksnr_myipaddr);
430                 if (iface != NULL)
431                         iface->ksni_nroutes--;
432         }
433
434         route->ksnr_deleted = 1;
435         list_del(&route->ksnr_list);
436         ksocknal_route_decref(route);           /* drop peer_ni's ref */
437
438         if (list_empty(&peer_ni->ksnp_routes) &&
439             list_empty(&peer_ni->ksnp_conns)) {
440                 /* I've just removed the last route to a peer_ni with no active
441                  * connections */
442                 ksocknal_unlink_peer_locked(peer_ni);
443         }
444 }
445
446 int
447 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
448                   int port)
449 {
450         struct list_head *tmp;
451         struct ksock_peer_ni *peer_ni;
452         struct ksock_peer_ni *peer2;
453         struct ksock_route *route;
454         struct ksock_route *route2;
455         int rc;
456
457         if (id.nid == LNET_NID_ANY ||
458             id.pid == LNET_PID_ANY)
459                 return (-EINVAL);
460
461         /* Have a brand new peer_ni ready... */
462         rc = ksocknal_create_peer(&peer_ni, ni, id);
463         if (rc != 0)
464                 return rc;
465
466         route = ksocknal_create_route (ipaddr, port);
467         if (route == NULL) {
468                 ksocknal_peer_decref(peer_ni);
469                 return (-ENOMEM);
470         }
471
472         write_lock_bh(&ksocknal_data.ksnd_global_lock);
473
474         /* always called with a ref on ni, so shutdown can't have started */
475         LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
476
477         peer2 = ksocknal_find_peer_locked(ni, id);
478         if (peer2 != NULL) {
479                 ksocknal_peer_decref(peer_ni);
480                 peer_ni = peer2;
481         } else {
482                 /* peer_ni table takes my ref on peer_ni */
483                 list_add_tail(&peer_ni->ksnp_list,
484                               ksocknal_nid2peerlist(id.nid));
485         }
486
487         route2 = NULL;
488         list_for_each(tmp, &peer_ni->ksnp_routes) {
489                 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
490
491                 if (route2->ksnr_ipaddr == ipaddr)
492                         break;
493
494                 route2 = NULL;
495         }
496         if (route2 == NULL) {
497                 ksocknal_add_route_locked(peer_ni, route);
498                 route->ksnr_share_count++;
499         } else {
500                 ksocknal_route_decref(route);
501                 route2->ksnr_share_count++;
502         }
503
504         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
505
506         return 0;
507 }
508
509 static void
510 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
511 {
512         struct ksock_conn *conn;
513         struct ksock_route *route;
514         struct list_head *tmp;
515         struct list_head *nxt;
516         int nshared;
517
518         LASSERT(!peer_ni->ksnp_closing);
519
520         /* Extra ref prevents peer_ni disappearing until I'm done with it */
521         ksocknal_peer_addref(peer_ni);
522
523         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
524                 route = list_entry(tmp, struct ksock_route, ksnr_list);
525
526                 /* no match */
527                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
528                         continue;
529
530                 route->ksnr_share_count = 0;
531                 /* This deletes associated conns too */
532                 ksocknal_del_route_locked(route);
533         }
534
535         nshared = 0;
536         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
537                 route = list_entry(tmp, struct ksock_route, ksnr_list);
538                 nshared += route->ksnr_share_count;
539         }
540
541         if (nshared == 0) {
542                 /* remove everything else if there are no explicit entries
543                  * left */
544
545                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
546                         route = list_entry(tmp, struct ksock_route, ksnr_list);
547
548                         /* we should only be removing auto-entries */
549                         LASSERT(route->ksnr_share_count == 0);
550                         ksocknal_del_route_locked(route);
551                 }
552
553                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
554                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
555
556                         ksocknal_close_conn_locked(conn, 0);
557                 }
558         }
559
560         ksocknal_peer_decref(peer_ni);
561         /* NB peer_ni unlinks itself when last conn/route is removed */
562 }
563
564 static int
565 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
566 {
567         struct list_head zombies = LIST_HEAD_INIT(zombies);
568         struct list_head *ptmp;
569         struct list_head *pnxt;
570         struct ksock_peer_ni *peer_ni;
571         int lo;
572         int hi;
573         int i;
574         int rc = -ENOENT;
575
576         write_lock_bh(&ksocknal_data.ksnd_global_lock);
577
578         if (id.nid != LNET_NID_ANY) {
579                 hi = (int)(ksocknal_nid2peerlist(id.nid) -
580                            ksocknal_data.ksnd_peers);
581                 lo = hi;
582         } else {
583                 lo = 0;
584                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
585         }
586
587         for (i = lo; i <= hi; i++) {
588                 list_for_each_safe(ptmp, pnxt,
589                                    &ksocknal_data.ksnd_peers[i]) {
590                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
591
592                         if (peer_ni->ksnp_ni != ni)
593                                 continue;
594
595                         if (!((id.nid == LNET_NID_ANY ||
596                                peer_ni->ksnp_id.nid == id.nid) &&
597                               (id.pid == LNET_PID_ANY ||
598                                peer_ni->ksnp_id.pid == id.pid)))
599                                 continue;
600
601                         ksocknal_peer_addref(peer_ni);  /* a ref for me... */
602
603                         ksocknal_del_peer_locked(peer_ni, ip);
604
605                         if (peer_ni->ksnp_closing &&
606                             !list_empty(&peer_ni->ksnp_tx_queue)) {
607                                 LASSERT(list_empty(&peer_ni->ksnp_conns));
608                                 LASSERT(list_empty(&peer_ni->ksnp_routes));
609
610                                 list_splice_init(&peer_ni->ksnp_tx_queue,
611                                                  &zombies);
612                         }
613
614                         ksocknal_peer_decref(peer_ni);  /* ...till here */
615
616                         rc = 0;                         /* matched! */
617                 }
618         }
619
620         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
621
622         ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
623
624         return rc;
625 }
626
627 static struct ksock_conn *
628 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
629 {
630         struct ksock_peer_ni *peer_ni;
631         struct list_head *ptmp;
632         struct ksock_conn *conn;
633         struct list_head *ctmp;
634         int i;
635
636         read_lock(&ksocknal_data.ksnd_global_lock);
637
638         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
639                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
640                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
641
642                         LASSERT(!peer_ni->ksnp_closing);
643
644                         if (peer_ni->ksnp_ni != ni)
645                                 continue;
646
647                         list_for_each(ctmp, &peer_ni->ksnp_conns) {
648                                 if (index-- > 0)
649                                         continue;
650
651                                 conn = list_entry(ctmp, struct ksock_conn,
652                                                   ksnc_list);
653                                 ksocknal_conn_addref(conn);
654                                 read_unlock(&ksocknal_data. \
655                                             ksnd_global_lock);
656                                 return conn;
657                         }
658                 }
659         }
660
661         read_unlock(&ksocknal_data.ksnd_global_lock);
662         return NULL;
663 }
664
665 static struct ksock_sched *
666 ksocknal_choose_scheduler_locked(unsigned int cpt)
667 {
668         struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
669         struct ksock_sched *sched;
670         int i;
671
672         if (info->ksi_nthreads == 0) {
673                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
674                         if (info->ksi_nthreads > 0) {
675                                 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
676                                        cpt, info->ksi_cpt);
677                                 goto select_sched;
678                         }
679                 }
680                 return NULL;
681         }
682
683 select_sched:
684         sched = &info->ksi_scheds[0];
685         /*
686          * NB: it's safe so far, but info->ksi_nthreads could be changed
687          * at runtime when we have dynamic LNet configuration, then we
688          * need to take care of this.
689          */
690         for (i = 1; i < info->ksi_nthreads; i++) {
691                 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
692                         sched = &info->ksi_scheds[i];
693         }
694
695         return sched;
696 }
697
698 static int
699 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
700 {
701         struct ksock_net *net = ni->ni_data;
702         int i;
703         int nip;
704
705         read_lock(&ksocknal_data.ksnd_global_lock);
706
707         nip = net->ksnn_ninterfaces;
708         LASSERT(nip <= LNET_INTERFACES_NUM);
709
710         /*
711          * Only offer interfaces for additional connections if I have
712          * more than one.
713          */
714         if (nip < 2) {
715                 read_unlock(&ksocknal_data.ksnd_global_lock);
716                 return 0;
717         }
718
719         for (i = 0; i < nip; i++) {
720                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
721                 LASSERT(ipaddrs[i] != 0);
722         }
723
724         read_unlock(&ksocknal_data.ksnd_global_lock);
725         return nip;
726 }
727
728 static int
729 ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
730 {
731         int best_netmatch = 0;
732         int best_xor = 0;
733         int best = -1;
734         int this_xor;
735         int this_netmatch;
736         int i;
737
738         for (i = 0; i < nips; i++) {
739                 if (ips[i] == 0)
740                         continue;
741
742                 this_xor = (ips[i] ^ iface->ksni_ipaddr);
743                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
744
745                 if (!(best < 0 ||
746                       best_netmatch < this_netmatch ||
747                       (best_netmatch == this_netmatch &&
748                        best_xor > this_xor)))
749                         continue;
750
751                 best = i;
752                 best_netmatch = this_netmatch;
753                 best_xor = this_xor;
754         }
755
756         LASSERT (best >= 0);
757         return (best);
758 }
759
760 static int
761 ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
762 {
763         rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
764         struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
765         struct ksock_interface *iface;
766         struct ksock_interface *best_iface;
767         int n_ips;
768         int i;
769         int j;
770         int k;
771         u32 ip;
772         u32 xor;
773         int this_netmatch;
774         int best_netmatch;
775         int best_npeers;
776
777         /* CAVEAT EMPTOR: We do all our interface matching with an
778          * exclusive hold of global lock at IRQ priority.  We're only
779          * expecting to be dealing with small numbers of interfaces, so the
780          * O(n**3)-ness shouldn't matter */
781
782         /* Also note that I'm not going to return more than n_peerips
783          * interfaces, even if I have more myself */
784
785         write_lock_bh(global_lock);
786
787         LASSERT(n_peerips <= LNET_INTERFACES_NUM);
788         LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
789
790         /* Only match interfaces for additional connections
791          * if I have > 1 interface */
792         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
793                 MIN(n_peerips, net->ksnn_ninterfaces);
794
795         for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
796                 /*              ^ yes really... */
797
798                 /* If we have any new interfaces, first tick off all the
799                  * peer_ni IPs that match old interfaces, then choose new
800                  * interfaces to match the remaining peer_ni IPS.
801                  * We don't forget interfaces we've stopped using; we might
802                  * start using them again... */
803
804                 if (i < peer_ni->ksnp_n_passive_ips) {
805                         /* Old interface. */
806                         ip = peer_ni->ksnp_passive_ips[i];
807                         best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
808
809                         /* peer_ni passive ips are kept up to date */
810                         LASSERT(best_iface != NULL);
811                 } else {
812                         /* choose a new interface */
813                         LASSERT (i == peer_ni->ksnp_n_passive_ips);
814
815                         best_iface = NULL;
816                         best_netmatch = 0;
817                         best_npeers = 0;
818
819                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
820                                 iface = &net->ksnn_interfaces[j];
821                                 ip = iface->ksni_ipaddr;
822
823                                 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
824                                         if (peer_ni->ksnp_passive_ips[k] == ip)
825                                                 break;
826
827                                 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
828                                         continue;
829
830                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
831                                 xor = (ip ^ peerips[k]);
832                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
833
834                                 if (!(best_iface == NULL ||
835                                       best_netmatch < this_netmatch ||
836                                       (best_netmatch == this_netmatch &&
837                                        best_npeers > iface->ksni_npeers)))
838                                         continue;
839
840                                 best_iface = iface;
841                                 best_netmatch = this_netmatch;
842                                 best_npeers = iface->ksni_npeers;
843                         }
844
845                         LASSERT(best_iface != NULL);
846
847                         best_iface->ksni_npeers++;
848                         ip = best_iface->ksni_ipaddr;
849                         peer_ni->ksnp_passive_ips[i] = ip;
850                         peer_ni->ksnp_n_passive_ips = i+1;
851                 }
852
853                 /* mark the best matching peer_ni IP used */
854                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
855                 peerips[j] = 0;
856         }
857
858         /* Overwrite input peer_ni IP addresses */
859         memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
860
861         write_unlock_bh(global_lock);
862
863         return (n_ips);
864 }
865
866 static void
867 ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
868                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
869 {
870         struct ksock_route              *newroute = NULL;
871         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
872         struct lnet_ni *ni = peer_ni->ksnp_ni;
873         struct ksock_net                *net = ni->ni_data;
874         struct list_head        *rtmp;
875         struct ksock_route              *route;
876         struct ksock_interface  *iface;
877         struct ksock_interface  *best_iface;
878         int                     best_netmatch;
879         int                     this_netmatch;
880         int                     best_nroutes;
881         int                     i;
882         int                     j;
883
884         /* CAVEAT EMPTOR: We do all our interface matching with an
885          * exclusive hold of global lock at IRQ priority.  We're only
886          * expecting to be dealing with small numbers of interfaces, so the
887          * O(n**3)-ness here shouldn't matter */
888
889         write_lock_bh(global_lock);
890
891         if (net->ksnn_ninterfaces < 2) {
892                 /* Only create additional connections
893                  * if I have > 1 interface */
894                 write_unlock_bh(global_lock);
895                 return;
896         }
897
898         LASSERT(npeer_ipaddrs <= LNET_INTERFACES_NUM);
899
900         for (i = 0; i < npeer_ipaddrs; i++) {
901                 if (newroute != NULL) {
902                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
903                 } else {
904                         write_unlock_bh(global_lock);
905
906                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
907                         if (newroute == NULL)
908                                 return;
909
910                         write_lock_bh(global_lock);
911                 }
912
913                 if (peer_ni->ksnp_closing) {
914                         /* peer_ni got closed under me */
915                         break;
916                 }
917
918                 /* Already got a route? */
919                 route = NULL;
920                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
921                         route = list_entry(rtmp, struct ksock_route, ksnr_list);
922
923                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
924                                 break;
925
926                         route = NULL;
927                 }
928                 if (route != NULL)
929                         continue;
930
931                 best_iface = NULL;
932                 best_nroutes = 0;
933                 best_netmatch = 0;
934
935                 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
936
937                 /* Select interface to connect from */
938                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
939                         iface = &net->ksnn_interfaces[j];
940
941                         /* Using this interface already? */
942                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
943                                 route = list_entry(rtmp, struct ksock_route,
944                                                    ksnr_list);
945
946                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
947                                         break;
948
949                                 route = NULL;
950                         }
951                         if (route != NULL)
952                                 continue;
953
954                         this_netmatch = (((iface->ksni_ipaddr ^
955                                            newroute->ksnr_ipaddr) &
956                                            iface->ksni_netmask) == 0) ? 1 : 0;
957
958                         if (!(best_iface == NULL ||
959                               best_netmatch < this_netmatch ||
960                               (best_netmatch == this_netmatch &&
961                                best_nroutes > iface->ksni_nroutes)))
962                                 continue;
963
964                         best_iface = iface;
965                         best_netmatch = this_netmatch;
966                         best_nroutes = iface->ksni_nroutes;
967                 }
968
969                 if (best_iface == NULL)
970                         continue;
971
972                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
973                 best_iface->ksni_nroutes++;
974
975                 ksocknal_add_route_locked(peer_ni, newroute);
976                 newroute = NULL;
977         }
978
979         write_unlock_bh(global_lock);
980         if (newroute != NULL)
981                 ksocknal_route_decref(newroute);
982 }
983
984 int
985 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
986 {
987         struct ksock_connreq *cr;
988         int rc;
989         u32 peer_ip;
990         int peer_port;
991
992         rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
993         LASSERT(rc == 0);               /* we succeeded before */
994
995         LIBCFS_ALLOC(cr, sizeof(*cr));
996         if (cr == NULL) {
997                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
998                                    "%pI4h: memory exhausted\n", &peer_ip);
999                 return -ENOMEM;
1000         }
1001
1002         lnet_ni_addref(ni);
1003         cr->ksncr_ni   = ni;
1004         cr->ksncr_sock = sock;
1005
1006         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
1007
1008         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
1009         wake_up(&ksocknal_data.ksnd_connd_waitq);
1010
1011         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1012         return 0;
1013 }
1014
1015 static int
1016 ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
1017 {
1018         struct ksock_route *route;
1019
1020         list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1021                 if (route->ksnr_ipaddr == ipaddr)
1022                         return route->ksnr_connecting;
1023         }
1024         return 0;
1025 }
1026
1027 int
1028 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
1029                      struct socket *sock, int type)
1030 {
1031         rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1032         struct list_head zombies = LIST_HEAD_INIT(zombies);
1033         struct lnet_process_id peerid;
1034         struct list_head *tmp;
1035         u64 incarnation;
1036         struct ksock_conn *conn;
1037         struct ksock_conn *conn2;
1038         struct ksock_peer_ni *peer_ni = NULL;
1039         struct ksock_peer_ni *peer2;
1040         struct ksock_sched *sched;
1041         struct ksock_hello_msg *hello;
1042         int cpt;
1043         struct ksock_tx *tx;
1044         struct ksock_tx *txtmp;
1045         int rc;
1046         int rc2;
1047         int active;
1048         char *warn = NULL;
1049
1050         active = (route != NULL);
1051
1052         LASSERT (active == (type != SOCKLND_CONN_NONE));
1053
1054         LIBCFS_ALLOC(conn, sizeof(*conn));
1055         if (conn == NULL) {
1056                 rc = -ENOMEM;
1057                 goto failed_0;
1058         }
1059
1060         conn->ksnc_peer = NULL;
1061         conn->ksnc_route = NULL;
1062         conn->ksnc_sock = sock;
1063         /* 2 ref, 1 for conn, another extra ref prevents socket
1064          * being closed before establishment of connection */
1065         atomic_set (&conn->ksnc_sock_refcount, 2);
1066         conn->ksnc_type = type;
1067         ksocknal_lib_save_callback(sock, conn);
1068         atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1069
1070         conn->ksnc_rx_ready = 0;
1071         conn->ksnc_rx_scheduled = 0;
1072
1073         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1074         conn->ksnc_tx_ready = 0;
1075         conn->ksnc_tx_scheduled = 0;
1076         conn->ksnc_tx_carrier = NULL;
1077         atomic_set (&conn->ksnc_tx_nob, 0);
1078
1079         LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1080                                      kshm_ips[LNET_INTERFACES_NUM]));
1081         if (hello == NULL) {
1082                 rc = -ENOMEM;
1083                 goto failed_1;
1084         }
1085
1086         /* stash conn's local and remote addrs */
1087         rc = ksocknal_lib_get_conn_addrs (conn);
1088         if (rc != 0)
1089                 goto failed_1;
1090
1091         /* Find out/confirm peer_ni's NID and connection type and get the
1092          * vector of interfaces she's willing to let me connect to.
1093          * Passive connections use the listener timeout since the peer_ni sends
1094          * eagerly */
1095
1096         if (active) {
1097                 peer_ni = route->ksnr_peer;
1098                 LASSERT(ni == peer_ni->ksnp_ni);
1099
1100                 /* Active connection sends HELLO eagerly */
1101                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1102                 peerid = peer_ni->ksnp_id;
1103
1104                 write_lock_bh(global_lock);
1105                 conn->ksnc_proto = peer_ni->ksnp_proto;
1106                 write_unlock_bh(global_lock);
1107
1108                 if (conn->ksnc_proto == NULL) {
1109                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1110 #if SOCKNAL_VERSION_DEBUG
1111                          if (*ksocknal_tunables.ksnd_protocol == 2)
1112                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1113                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1114                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1115 #endif
1116                 }
1117
1118                 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1119                 if (rc != 0)
1120                         goto failed_1;
1121         } else {
1122                 peerid.nid = LNET_NID_ANY;
1123                 peerid.pid = LNET_PID_ANY;
1124
1125                 /* Passive, get protocol from peer_ni */
1126                 conn->ksnc_proto = NULL;
1127         }
1128
1129         rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1130         if (rc < 0)
1131                 goto failed_1;
1132
1133         LASSERT (rc == 0 || active);
1134         LASSERT (conn->ksnc_proto != NULL);
1135         LASSERT (peerid.nid != LNET_NID_ANY);
1136
1137         cpt = lnet_cpt_of_nid(peerid.nid, ni);
1138
1139         if (active) {
1140                 ksocknal_peer_addref(peer_ni);
1141                 write_lock_bh(global_lock);
1142         } else {
1143                 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1144                 if (rc != 0)
1145                         goto failed_1;
1146
1147                 write_lock_bh(global_lock);
1148
1149                 /* called with a ref on ni, so shutdown can't have started */
1150                 LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
1151
1152                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1153                 if (peer2 == NULL) {
1154                         /* NB this puts an "empty" peer_ni in the peer_ni
1155                          * table (which takes my ref) */
1156                         list_add_tail(&peer_ni->ksnp_list,
1157                                       ksocknal_nid2peerlist(peerid.nid));
1158                 } else {
1159                         ksocknal_peer_decref(peer_ni);
1160                         peer_ni = peer2;
1161                 }
1162
1163                 /* +1 ref for me */
1164                 ksocknal_peer_addref(peer_ni);
1165                 peer_ni->ksnp_accepting++;
1166
1167                 /* Am I already connecting to this guy?  Resolve in
1168                  * favour of higher NID... */
1169                 if (peerid.nid < ni->ni_nid &&
1170                     ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1171                         rc = EALREADY;
1172                         warn = "connection race resolution";
1173                         goto failed_2;
1174                 }
1175         }
1176
1177         if (peer_ni->ksnp_closing ||
1178             (active && route->ksnr_deleted)) {
1179                 /* peer_ni/route got closed under me */
1180                 rc = -ESTALE;
1181                 warn = "peer_ni/route removed";
1182                 goto failed_2;
1183         }
1184
1185         if (peer_ni->ksnp_proto == NULL) {
1186                 /* Never connected before.
1187                  * NB recv_hello may have returned EPROTO to signal my peer_ni
1188                  * wants a different protocol than the one I asked for.
1189                  */
1190                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1191
1192                 peer_ni->ksnp_proto = conn->ksnc_proto;
1193                 peer_ni->ksnp_incarnation = incarnation;
1194         }
1195
1196         if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1197             peer_ni->ksnp_incarnation != incarnation) {
1198                 /* peer_ni rebooted or I've got the wrong protocol version */
1199                 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1200
1201                 peer_ni->ksnp_proto = NULL;
1202                 rc = ESTALE;
1203                 warn = peer_ni->ksnp_incarnation != incarnation ?
1204                        "peer_ni rebooted" :
1205                        "wrong proto version";
1206                 goto failed_2;
1207         }
1208
1209         switch (rc) {
1210         default:
1211                 LBUG();
1212         case 0:
1213                 break;
1214         case EALREADY:
1215                 warn = "lost conn race";
1216                 goto failed_2;
1217         case EPROTO:
1218                 warn = "retry with different protocol version";
1219                 goto failed_2;
1220         }
1221
1222         /* Refuse to duplicate an existing connection, unless this is a
1223          * loopback connection */
1224         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1225                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1226                         conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1227
1228                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1229                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1230                             conn2->ksnc_type != conn->ksnc_type)
1231                                 continue;
1232
1233                         /* Reply on a passive connection attempt so the peer_ni
1234                          * realises we're connected. */
1235                         LASSERT (rc == 0);
1236                         if (!active)
1237                                 rc = EALREADY;
1238
1239                         warn = "duplicate";
1240                         goto failed_2;
1241                 }
1242         }
1243
1244         /* If the connection created by this route didn't bind to the IP
1245          * address the route connected to, the connection/route matching
1246          * code below probably isn't going to work. */
1247         if (active &&
1248             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1249                 CERROR("Route %s %pI4h connected to %pI4h\n",
1250                        libcfs_id2str(peer_ni->ksnp_id),
1251                        &route->ksnr_ipaddr,
1252                        &conn->ksnc_ipaddr);
1253         }
1254
1255         /* Search for a route corresponding to the new connection and
1256          * create an association.  This allows incoming connections created
1257          * by routes in my peer_ni to match my own route entries so I don't
1258          * continually create duplicate routes. */
1259         list_for_each(tmp, &peer_ni->ksnp_routes) {
1260                 route = list_entry(tmp, struct ksock_route, ksnr_list);
1261
1262                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1263                         continue;
1264
1265                 ksocknal_associate_route_conn_locked(route, conn);
1266                 break;
1267         }
1268
1269         conn->ksnc_peer = peer_ni;                 /* conn takes my ref on peer_ni */
1270         peer_ni->ksnp_last_alive = ktime_get_seconds();
1271         peer_ni->ksnp_send_keepalive = 0;
1272         peer_ni->ksnp_error = 0;
1273
1274         sched = ksocknal_choose_scheduler_locked(cpt);
1275         if (!sched) {
1276                 CERROR("no schedulers available. node is unhealthy\n");
1277                 goto failed_2;
1278         }
1279         /*
1280          * The cpt might have changed if we ended up selecting a non cpt
1281          * native scheduler. So use the scheduler's cpt instead.
1282          */
1283         cpt = sched->kss_info->ksi_cpt;
1284         sched->kss_nconns++;
1285         conn->ksnc_scheduler = sched;
1286
1287         conn->ksnc_tx_last_post = ktime_get_seconds();
1288         /* Set the deadline for the outgoing HELLO to drain */
1289         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1290         conn->ksnc_tx_deadline = ktime_get_seconds() +
1291                                  *ksocknal_tunables.ksnd_timeout;
1292         smp_mb();   /* order with adding to peer_ni's conn list */
1293
1294         list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1295         ksocknal_conn_addref(conn);
1296
1297         ksocknal_new_packet(conn, 0);
1298
1299         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1300
1301         /* Take packets blocking for this connection. */
1302         list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1303                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1304                     SOCKNAL_MATCH_NO)
1305                         continue;
1306
1307                 list_del(&tx->tx_list);
1308                 ksocknal_queue_tx_locked(tx, conn);
1309         }
1310
1311         write_unlock_bh(global_lock);
1312
1313         /* We've now got a new connection.  Any errors from here on are just
1314          * like "normal" comms errors and we close the connection normally.
1315          * NB (a) we still have to send the reply HELLO for passive
1316          *        connections,
1317          *    (b) normal I/O on the conn is blocked until I setup and call the
1318          *        socket callbacks.
1319          */
1320
1321         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1322                " incarnation:%lld sched[%d:%d]\n",
1323                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1324                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1325                conn->ksnc_port, incarnation, cpt,
1326                (int)(sched - &sched->kss_info->ksi_scheds[0]));
1327
1328         if (active) {
1329                 /* additional routes after interface exchange? */
1330                 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1331                                        hello->kshm_ips, hello->kshm_nips);
1332         } else {
1333                 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1334                                                        hello->kshm_nips);
1335                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1336         }
1337
1338         LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1339                                     kshm_ips[LNET_INTERFACES_NUM]));
1340
1341         /* setup the socket AFTER I've received hello (it disables
1342          * SO_LINGER).  I might call back to the acceptor who may want
1343          * to send a protocol version response and then close the
1344          * socket; this ensures the socket only tears down after the
1345          * response has been sent. */
1346         if (rc == 0)
1347                 rc = ksocknal_lib_setup_sock(sock);
1348
1349         write_lock_bh(global_lock);
1350
1351         /* NB my callbacks block while I hold ksnd_global_lock */
1352         ksocknal_lib_set_callback(sock, conn);
1353
1354         if (!active)
1355                 peer_ni->ksnp_accepting--;
1356
1357         write_unlock_bh(global_lock);
1358
1359         if (rc != 0) {
1360                 write_lock_bh(global_lock);
1361                 if (!conn->ksnc_closing) {
1362                         /* could be closed by another thread */
1363                         ksocknal_close_conn_locked(conn, rc);
1364                 }
1365                 write_unlock_bh(global_lock);
1366         } else if (ksocknal_connsock_addref(conn) == 0) {
1367                 /* Allow I/O to proceed. */
1368                 ksocknal_read_callback(conn);
1369                 ksocknal_write_callback(conn);
1370                 ksocknal_connsock_decref(conn);
1371         }
1372
1373         ksocknal_connsock_decref(conn);
1374         ksocknal_conn_decref(conn);
1375         return rc;
1376
1377 failed_2:
1378         if (!peer_ni->ksnp_closing &&
1379             list_empty(&peer_ni->ksnp_conns) &&
1380             list_empty(&peer_ni->ksnp_routes)) {
1381                 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1382                 list_del_init(&peer_ni->ksnp_tx_queue);
1383                 ksocknal_unlink_peer_locked(peer_ni);
1384         }
1385
1386         write_unlock_bh(global_lock);
1387
1388         if (warn != NULL) {
1389                 if (rc < 0)
1390                         CERROR("Not creating conn %s type %d: %s\n",
1391                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1392                 else
1393                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1394                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1395         }
1396
1397         if (!active) {
1398                 if (rc > 0) {
1399                         /* Request retry by replying with CONN_NONE
1400                          * ksnc_proto has been set already */
1401                         conn->ksnc_type = SOCKLND_CONN_NONE;
1402                         hello->kshm_nips = 0;
1403                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1404                 }
1405
1406                 write_lock_bh(global_lock);
1407                 peer_ni->ksnp_accepting--;
1408                 write_unlock_bh(global_lock);
1409         }
1410
1411         /*
1412          * If we get here without an error code, just use -EALREADY.
1413          * Depending on how we got here, the error may be positive
1414          * or negative. Normalize the value for ksocknal_txlist_done().
1415          */
1416         rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1417         ksocknal_txlist_done(ni, &zombies, rc2);
1418         ksocknal_peer_decref(peer_ni);
1419
1420 failed_1:
1421         if (hello != NULL)
1422                 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1423                                             kshm_ips[LNET_INTERFACES_NUM]));
1424
1425         LIBCFS_FREE(conn, sizeof(*conn));
1426
1427 failed_0:
1428         sock_release(sock);
1429         return rc;
1430 }
1431
1432 void
1433 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1434 {
1435         /* This just does the immmediate housekeeping, and queues the
1436          * connection for the reaper to terminate.
1437          * Caller holds ksnd_global_lock exclusively in irq context */
1438         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1439         struct ksock_route *route;
1440         struct ksock_conn *conn2;
1441         struct list_head *tmp;
1442
1443         LASSERT(peer_ni->ksnp_error == 0);
1444         LASSERT(!conn->ksnc_closing);
1445         conn->ksnc_closing = 1;
1446
1447         /* ksnd_deathrow_conns takes over peer_ni's ref */
1448         list_del(&conn->ksnc_list);
1449
1450         route = conn->ksnc_route;
1451         if (route != NULL) {
1452                 /* dissociate conn from route... */
1453                 LASSERT(!route->ksnr_deleted);
1454                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1455
1456                 conn2 = NULL;
1457                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1458                         conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1459
1460                         if (conn2->ksnc_route == route &&
1461                             conn2->ksnc_type == conn->ksnc_type)
1462                                 break;
1463
1464                         conn2 = NULL;
1465                 }
1466                 if (conn2 == NULL)
1467                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1468
1469                 conn->ksnc_route = NULL;
1470
1471                 ksocknal_route_decref(route);   /* drop conn's ref on route */
1472         }
1473
1474         if (list_empty(&peer_ni->ksnp_conns)) {
1475                 /* No more connections to this peer_ni */
1476
1477                 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1478                                 struct ksock_tx *tx;
1479
1480                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1481
1482                         /* throw them to the last connection...,
1483                          * these TXs will be send to /dev/null by scheduler */
1484                         list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1485                                             tx_list)
1486                                 ksocknal_tx_prep(conn, tx);
1487
1488                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1489                         list_splice_init(&peer_ni->ksnp_tx_queue,
1490                                          &conn->ksnc_tx_queue);
1491                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1492                 }
1493
1494                 /* renegotiate protocol version */
1495                 peer_ni->ksnp_proto = NULL;
1496                 /* stash last conn close reason */
1497                 peer_ni->ksnp_error = error;
1498
1499                 if (list_empty(&peer_ni->ksnp_routes)) {
1500                         /* I've just closed last conn belonging to a
1501                          * peer_ni with no routes to it */
1502                         ksocknal_unlink_peer_locked(peer_ni);
1503                 }
1504         }
1505
1506         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1507
1508         list_add_tail(&conn->ksnc_list,
1509                       &ksocknal_data.ksnd_deathrow_conns);
1510         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1511
1512         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1513 }
1514
1515 void
1516 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1517 {
1518         int notify = 0;
1519         time64_t last_alive = 0;
1520
1521         /* There has been a connection failure or comms error; but I'll only
1522          * tell LNET I think the peer_ni is dead if it's to another kernel and
1523          * there are no connections or connection attempts in existence. */
1524
1525         read_lock(&ksocknal_data.ksnd_global_lock);
1526
1527         if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1528              list_empty(&peer_ni->ksnp_conns) &&
1529              peer_ni->ksnp_accepting == 0 &&
1530              ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1531                 notify = 1;
1532                 last_alive = peer_ni->ksnp_last_alive;
1533         }
1534
1535         read_unlock(&ksocknal_data.ksnd_global_lock);
1536
1537         if (notify)
1538                 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
1539                             cfs_time_seconds(last_alive)); /* to jiffies */
1540 }
1541
1542 void
1543 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1544 {
1545         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1546         struct ksock_tx *tx;
1547         struct ksock_tx *tmp;
1548         struct list_head zlist = LIST_HEAD_INIT(zlist);
1549
1550         /* NB safe to finalize TXs because closing of socket will
1551          * abort all buffered data */
1552         LASSERT(conn->ksnc_sock == NULL);
1553
1554         spin_lock(&peer_ni->ksnp_lock);
1555
1556         list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1557                 if (tx->tx_conn != conn)
1558                         continue;
1559
1560                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1561
1562                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1563                 tx->tx_zc_aborted = 1;  /* mark it as not-acked */
1564                 list_del(&tx->tx_zc_list);
1565                 list_add(&tx->tx_zc_list, &zlist);
1566         }
1567
1568         spin_unlock(&peer_ni->ksnp_lock);
1569
1570         while (!list_empty(&zlist)) {
1571                 tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
1572
1573                 list_del(&tx->tx_zc_list);
1574                 ksocknal_tx_decref(tx);
1575         }
1576 }
1577
1578 void
1579 ksocknal_terminate_conn(struct ksock_conn *conn)
1580 {
1581         /* This gets called by the reaper (guaranteed thread context) to
1582          * disengage the socket from its callbacks and close it.
1583          * ksnc_refcount will eventually hit zero, and then the reaper will
1584          * destroy it. */
1585         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1586         struct ksock_sched *sched = conn->ksnc_scheduler;
1587         int failed = 0;
1588
1589         LASSERT(conn->ksnc_closing);
1590
1591         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1592         spin_lock_bh(&sched->kss_lock);
1593
1594         /* a closing conn is always ready to tx */
1595         conn->ksnc_tx_ready = 1;
1596
1597         if (!conn->ksnc_tx_scheduled &&
1598             !list_empty(&conn->ksnc_tx_queue)) {
1599                 list_add_tail(&conn->ksnc_tx_list,
1600                                &sched->kss_tx_conns);
1601                 conn->ksnc_tx_scheduled = 1;
1602                 /* extra ref for scheduler */
1603                 ksocknal_conn_addref(conn);
1604
1605                 wake_up (&sched->kss_waitq);
1606         }
1607
1608         spin_unlock_bh(&sched->kss_lock);
1609
1610         /* serialise with callbacks */
1611         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1612
1613         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1614
1615         /* OK, so this conn may not be completely disengaged from its
1616          * scheduler yet, but it _has_ committed to terminate... */
1617         conn->ksnc_scheduler->kss_nconns--;
1618
1619         if (peer_ni->ksnp_error != 0) {
1620                 /* peer_ni's last conn closed in error */
1621                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1622                 failed = 1;
1623                 peer_ni->ksnp_error = 0;     /* avoid multiple notifications */
1624         }
1625
1626         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1627
1628         if (failed)
1629                 ksocknal_peer_failed(peer_ni);
1630
1631         /* The socket is closed on the final put; either here, or in
1632          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1633          * when the connection was established, this will close the socket
1634          * immediately, aborting anything buffered in it. Any hung
1635          * zero-copy transmits will therefore complete in finite time. */
1636         ksocknal_connsock_decref(conn);
1637 }
1638
1639 void
1640 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1641 {
1642         /* Queue the conn for the reaper to destroy */
1643         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1644         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1645
1646         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1647         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1648
1649         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1650 }
1651
1652 void
1653 ksocknal_destroy_conn(struct ksock_conn *conn)
1654 {
1655         time64_t last_rcv;
1656
1657         /* Final coup-de-grace of the reaper */
1658         CDEBUG (D_NET, "connection %p\n", conn);
1659
1660         LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1661         LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1662         LASSERT (conn->ksnc_sock == NULL);
1663         LASSERT (conn->ksnc_route == NULL);
1664         LASSERT (!conn->ksnc_tx_scheduled);
1665         LASSERT (!conn->ksnc_rx_scheduled);
1666         LASSERT(list_empty(&conn->ksnc_tx_queue));
1667
1668         /* complete current receive if any */
1669         switch (conn->ksnc_rx_state) {
1670         case SOCKNAL_RX_LNET_PAYLOAD:
1671                 last_rcv = conn->ksnc_rx_deadline -
1672                            *ksocknal_tunables.ksnd_timeout;
1673                 CERROR("Completing partial receive from %s[%d], "
1674                        "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1675                        "last alive is %lld secs ago\n",
1676                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1677                        &conn->ksnc_ipaddr, conn->ksnc_port,
1678                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1679                        ktime_get_seconds() - last_rcv);
1680                 lnet_finalize(conn->ksnc_cookie, -EIO);
1681                 break;
1682         case SOCKNAL_RX_LNET_HEADER:
1683                 if (conn->ksnc_rx_started)
1684                         CERROR("Incomplete receive of lnet header from %s, "
1685                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1686                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1687                                &conn->ksnc_ipaddr, conn->ksnc_port,
1688                                conn->ksnc_proto->pro_version);
1689                 break;
1690         case SOCKNAL_RX_KSM_HEADER:
1691                 if (conn->ksnc_rx_started)
1692                         CERROR("Incomplete receive of ksock message from %s, "
1693                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1694                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1695                                &conn->ksnc_ipaddr, conn->ksnc_port,
1696                                conn->ksnc_proto->pro_version);
1697                 break;
1698         case SOCKNAL_RX_SLOP:
1699                 if (conn->ksnc_rx_started)
1700                         CERROR("Incomplete receive of slops from %s, "
1701                                "ip %pI4h:%d, with error\n",
1702                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1703                                &conn->ksnc_ipaddr, conn->ksnc_port);
1704                break;
1705         default:
1706                 LBUG ();
1707                 break;
1708         }
1709
1710         ksocknal_peer_decref(conn->ksnc_peer);
1711
1712         LIBCFS_FREE (conn, sizeof (*conn));
1713 }
1714
1715 int
1716 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
1717 {
1718         struct ksock_conn *conn;
1719         struct list_head *ctmp;
1720         struct list_head *cnxt;
1721         int count = 0;
1722
1723         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1724                 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
1725
1726                 if (ipaddr == 0 ||
1727                     conn->ksnc_ipaddr == ipaddr) {
1728                         count++;
1729                         ksocknal_close_conn_locked (conn, why);
1730                 }
1731         }
1732
1733         return (count);
1734 }
1735
1736 int
1737 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1738 {
1739         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1740         u32 ipaddr = conn->ksnc_ipaddr;
1741         int count;
1742
1743         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1744
1745         count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1746
1747         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1748
1749         return (count);
1750 }
1751
1752 int
1753 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1754 {
1755         struct ksock_peer_ni *peer_ni;
1756         struct list_head *ptmp;
1757         struct list_head *pnxt;
1758         int lo;
1759         int hi;
1760         int i;
1761         int count = 0;
1762
1763         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1764
1765         if (id.nid != LNET_NID_ANY)
1766                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1767         else {
1768                 lo = 0;
1769                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1770         }
1771
1772         for (i = lo; i <= hi; i++) {
1773                 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1774
1775                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
1776
1777                         if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1778                               (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1779                                 continue;
1780
1781                         count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1782                 }
1783         }
1784
1785         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1786
1787         /* wildcards always succeed */
1788         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1789                 return (0);
1790
1791         return (count == 0 ? -ENOENT : 0);
1792 }
1793
1794 void
1795 ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
1796 {
1797         /* The router is telling me she's been notified of a change in
1798          * gateway state....
1799          */
1800         struct lnet_process_id id = {
1801                 .nid    = gw_nid,
1802                 .pid    = LNET_PID_ANY,
1803         };
1804
1805         CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1806                 alive ? "up" : "down");
1807
1808         if (!alive) {
1809                 /* If the gateway crashed, close all open connections... */
1810                 ksocknal_close_matching_conns (id, 0);
1811                 return;
1812         }
1813
1814         /* ...otherwise do nothing.  We can only establish new connections
1815          * if we have autroutes, and these connect on demand. */
1816 }
1817
1818 void
1819 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
1820 {
1821         int connect = 1;
1822         time64_t last_alive = 0;
1823         time64_t now = ktime_get_seconds();
1824         struct ksock_peer_ni *peer_ni = NULL;
1825         rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1826         struct lnet_process_id id = {
1827                 .nid = nid,
1828                 .pid = LNET_PID_LUSTRE,
1829         };
1830
1831         read_lock(glock);
1832
1833         peer_ni = ksocknal_find_peer_locked(ni, id);
1834         if (peer_ni != NULL) {
1835                 struct list_head *tmp;
1836                 struct ksock_conn *conn;
1837                 int bufnob;
1838
1839                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1840                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
1841                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1842
1843                         if (bufnob < conn->ksnc_tx_bufnob) {
1844                                 /* something got ACKed */
1845                                 conn->ksnc_tx_deadline = ktime_get_seconds() +
1846                                                          *ksocknal_tunables.ksnd_timeout;
1847                                 peer_ni->ksnp_last_alive = now;
1848                                 conn->ksnc_tx_bufnob = bufnob;
1849                         }
1850                 }
1851
1852                 last_alive = peer_ni->ksnp_last_alive;
1853                 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1854                         connect = 0;
1855         }
1856
1857         read_unlock(glock);
1858
1859         if (last_alive != 0)
1860                 *when = last_alive;
1861
1862         CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
1863                libcfs_nid2str(nid), peer_ni,
1864                last_alive ? now - last_alive : -1,
1865                connect);
1866
1867         if (!connect)
1868                 return;
1869
1870         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1871
1872         write_lock_bh(glock);
1873
1874         peer_ni = ksocknal_find_peer_locked(ni, id);
1875         if (peer_ni != NULL)
1876                 ksocknal_launch_all_connections_locked(peer_ni);
1877
1878         write_unlock_bh(glock);
1879         return;
1880 }
1881
1882 static void
1883 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1884 {
1885         int index;
1886         int i;
1887         struct list_head *tmp;
1888         struct ksock_conn *conn;
1889
1890         for (index = 0; ; index++) {
1891                 read_lock(&ksocknal_data.ksnd_global_lock);
1892
1893                 i = 0;
1894                 conn = NULL;
1895
1896                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1897                         if (i++ == index) {
1898                                 conn = list_entry(tmp, struct ksock_conn,
1899                                                   ksnc_list);
1900                                 ksocknal_conn_addref(conn);
1901                                 break;
1902                         }
1903                 }
1904
1905                 read_unlock(&ksocknal_data.ksnd_global_lock);
1906
1907                 if (conn == NULL)
1908                         break;
1909
1910                 ksocknal_lib_push_conn (conn);
1911                 ksocknal_conn_decref(conn);
1912         }
1913 }
1914
1915 static int
1916 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1917 {
1918         struct list_head *start;
1919         struct list_head *end;
1920         struct list_head *tmp;
1921         int               rc = -ENOENT;
1922         unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
1923
1924         if (id.nid == LNET_NID_ANY) {
1925                 start = &ksocknal_data.ksnd_peers[0];
1926                 end = &ksocknal_data.ksnd_peers[hsize - 1];
1927         } else {
1928                 start = end = ksocknal_nid2peerlist(id.nid);
1929         }
1930
1931         for (tmp = start; tmp <= end; tmp++) {
1932                 int     peer_off; /* searching offset in peer_ni hash table */
1933
1934                 for (peer_off = 0; ; peer_off++) {
1935                         struct ksock_peer_ni *peer_ni;
1936                         int           i = 0;
1937
1938                         read_lock(&ksocknal_data.ksnd_global_lock);
1939                         list_for_each_entry(peer_ni, tmp, ksnp_list) {
1940                                 if (!((id.nid == LNET_NID_ANY ||
1941                                        id.nid == peer_ni->ksnp_id.nid) &&
1942                                       (id.pid == LNET_PID_ANY ||
1943                                        id.pid == peer_ni->ksnp_id.pid)))
1944                                         continue;
1945
1946                                 if (i++ == peer_off) {
1947                                         ksocknal_peer_addref(peer_ni);
1948                                         break;
1949                                 }
1950                         }
1951                         read_unlock(&ksocknal_data.ksnd_global_lock);
1952
1953                         if (i == 0) /* no match */
1954                                 break;
1955
1956                         rc = 0;
1957                         ksocknal_push_peer(peer_ni);
1958                         ksocknal_peer_decref(peer_ni);
1959                 }
1960         }
1961         return rc;
1962 }
1963
1964 static int
1965 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1966 {
1967         struct ksock_net *net = ni->ni_data;
1968         struct ksock_interface *iface;
1969         int rc;
1970         int i;
1971         int j;
1972         struct list_head *ptmp;
1973         struct ksock_peer_ni *peer_ni;
1974         struct list_head *rtmp;
1975         struct ksock_route *route;
1976
1977         if (ipaddress == 0 ||
1978             netmask == 0)
1979                 return -EINVAL;
1980
1981         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1982
1983         iface = ksocknal_ip2iface(ni, ipaddress);
1984         if (iface != NULL) {
1985                 /* silently ignore dups */
1986                 rc = 0;
1987         } else if (net->ksnn_ninterfaces == LNET_INTERFACES_NUM) {
1988                 rc = -ENOSPC;
1989         } else {
1990                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1991
1992                 iface->ksni_ipaddr = ipaddress;
1993                 iface->ksni_netmask = netmask;
1994                 iface->ksni_nroutes = 0;
1995                 iface->ksni_npeers = 0;
1996
1997                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1998                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1999                                 peer_ni = list_entry(ptmp, struct ksock_peer_ni,
2000                                                      ksnp_list);
2001
2002                                 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
2003                                         if (peer_ni->ksnp_passive_ips[j] == ipaddress)
2004                                                 iface->ksni_npeers++;
2005
2006                                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
2007                                         route = list_entry(rtmp,
2008                                                            struct ksock_route,
2009                                                            ksnr_list);
2010
2011                                         if (route->ksnr_myipaddr == ipaddress)
2012                                                 iface->ksni_nroutes++;
2013                                 }
2014                         }
2015                 }
2016
2017                 rc = 0;
2018                 /* NB only new connections will pay attention to the new interface! */
2019         }
2020
2021         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2022
2023         return rc;
2024 }
2025
2026 static void
2027 ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
2028 {
2029         struct list_head *tmp;
2030         struct list_head *nxt;
2031         struct ksock_route *route;
2032         struct ksock_conn *conn;
2033         int i;
2034         int j;
2035
2036         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2037                 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2038                         for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2039                                 peer_ni->ksnp_passive_ips[j-1] =
2040                                         peer_ni->ksnp_passive_ips[j];
2041                         peer_ni->ksnp_n_passive_ips--;
2042                         break;
2043                 }
2044
2045         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2046                 route = list_entry(tmp, struct ksock_route, ksnr_list);
2047
2048                 if (route->ksnr_myipaddr != ipaddr)
2049                         continue;
2050
2051                 if (route->ksnr_share_count != 0) {
2052                         /* Manually created; keep, but unbind */
2053                         route->ksnr_myipaddr = 0;
2054                 } else {
2055                         ksocknal_del_route_locked(route);
2056                 }
2057         }
2058
2059         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2060                 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2061
2062                 if (conn->ksnc_myipaddr == ipaddr)
2063                         ksocknal_close_conn_locked (conn, 0);
2064         }
2065 }
2066
2067 static int
2068 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2069 {
2070         struct ksock_net *net = ni->ni_data;
2071         int rc = -ENOENT;
2072         struct list_head *tmp;
2073         struct list_head *nxt;
2074         struct ksock_peer_ni *peer_ni;
2075         u32 this_ip;
2076         int i;
2077         int j;
2078
2079         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2080
2081         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2082                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2083
2084                 if (!(ipaddress == 0 ||
2085                       ipaddress == this_ip))
2086                         continue;
2087
2088                 rc = 0;
2089
2090                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2091                         net->ksnn_interfaces[j-1] =
2092                                 net->ksnn_interfaces[j];
2093
2094                 net->ksnn_ninterfaces--;
2095
2096                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2097                         list_for_each_safe(tmp, nxt,
2098                                            &ksocknal_data.ksnd_peers[j]) {
2099                                 peer_ni = list_entry(tmp, struct ksock_peer_ni,
2100                                                      ksnp_list);
2101
2102                                 if (peer_ni->ksnp_ni != ni)
2103                                         continue;
2104
2105                                 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2106                         }
2107                 }
2108         }
2109
2110         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2111
2112         return (rc);
2113 }
2114
2115 int
2116 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2117 {
2118         struct lnet_process_id id = {0};
2119         struct libcfs_ioctl_data *data = arg;
2120         int rc;
2121
2122         switch(cmd) {
2123         case IOC_LIBCFS_GET_INTERFACE: {
2124                 struct ksock_net *net = ni->ni_data;
2125                 struct ksock_interface *iface;
2126
2127                 read_lock(&ksocknal_data.ksnd_global_lock);
2128
2129                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2130                         rc = -ENOENT;
2131                 } else {
2132                         rc = 0;
2133                         iface = &net->ksnn_interfaces[data->ioc_count];
2134
2135                         data->ioc_u32[0] = iface->ksni_ipaddr;
2136                         data->ioc_u32[1] = iface->ksni_netmask;
2137                         data->ioc_u32[2] = iface->ksni_npeers;
2138                         data->ioc_u32[3] = iface->ksni_nroutes;
2139                 }
2140
2141                 read_unlock(&ksocknal_data.ksnd_global_lock);
2142                 return rc;
2143         }
2144
2145         case IOC_LIBCFS_ADD_INTERFACE:
2146                 return ksocknal_add_interface(ni,
2147                                               data->ioc_u32[0], /* IP address */
2148                                               data->ioc_u32[1]); /* net mask */
2149
2150         case IOC_LIBCFS_DEL_INTERFACE:
2151                 return ksocknal_del_interface(ni,
2152                                               data->ioc_u32[0]); /* IP address */
2153
2154         case IOC_LIBCFS_GET_PEER: {
2155                 __u32            myip = 0;
2156                 __u32            ip = 0;
2157                 int              port = 0;
2158                 int              conn_count = 0;
2159                 int              share_count = 0;
2160
2161                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2162                                             &id, &myip, &ip, &port,
2163                                             &conn_count,  &share_count);
2164                 if (rc != 0)
2165                         return rc;
2166
2167                 data->ioc_nid    = id.nid;
2168                 data->ioc_count  = share_count;
2169                 data->ioc_u32[0] = ip;
2170                 data->ioc_u32[1] = port;
2171                 data->ioc_u32[2] = myip;
2172                 data->ioc_u32[3] = conn_count;
2173                 data->ioc_u32[4] = id.pid;
2174                 return 0;
2175         }
2176
2177         case IOC_LIBCFS_ADD_PEER:
2178                 id.nid = data->ioc_nid;
2179                 id.pid = LNET_PID_LUSTRE;
2180                 return ksocknal_add_peer (ni, id,
2181                                           data->ioc_u32[0], /* IP */
2182                                           data->ioc_u32[1]); /* port */
2183
2184         case IOC_LIBCFS_DEL_PEER:
2185                 id.nid = data->ioc_nid;
2186                 id.pid = LNET_PID_ANY;
2187                 return ksocknal_del_peer (ni, id,
2188                                           data->ioc_u32[0]); /* IP */
2189
2190         case IOC_LIBCFS_GET_CONN: {
2191                 int           txmem;
2192                 int           rxmem;
2193                 int           nagle;
2194                 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2195
2196                 if (conn == NULL)
2197                         return -ENOENT;
2198
2199                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2200
2201                 data->ioc_count  = txmem;
2202                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2203                 data->ioc_flags  = nagle;
2204                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2205                 data->ioc_u32[1] = conn->ksnc_port;
2206                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2207                 data->ioc_u32[3] = conn->ksnc_type;
2208                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2209                 data->ioc_u32[5] = rxmem;
2210                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2211                 ksocknal_conn_decref(conn);
2212                 return 0;
2213         }
2214
2215         case IOC_LIBCFS_CLOSE_CONNECTION:
2216                 id.nid = data->ioc_nid;
2217                 id.pid = LNET_PID_ANY;
2218                 return ksocknal_close_matching_conns (id,
2219                                                       data->ioc_u32[0]);
2220
2221         case IOC_LIBCFS_REGISTER_MYNID:
2222                 /* Ignore if this is a noop */
2223                 if (data->ioc_nid == ni->ni_nid)
2224                         return 0;
2225
2226                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2227                        libcfs_nid2str(data->ioc_nid),
2228                        libcfs_nid2str(ni->ni_nid));
2229                 return -EINVAL;
2230
2231         case IOC_LIBCFS_PUSH_CONNECTION:
2232                 id.nid = data->ioc_nid;
2233                 id.pid = LNET_PID_ANY;
2234                 return ksocknal_push(ni, id);
2235
2236         default:
2237                 return -EINVAL;
2238         }
2239         /* not reached */
2240 }
2241
2242 static void
2243 ksocknal_free_buffers (void)
2244 {
2245         LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2246
2247         if (ksocknal_data.ksnd_sched_info != NULL) {
2248                 struct ksock_sched_info *info;
2249                 int                     i;
2250
2251                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2252                         if (info->ksi_scheds != NULL) {
2253                                 LIBCFS_FREE(info->ksi_scheds,
2254                                             info->ksi_nthreads_max *
2255                                             sizeof(info->ksi_scheds[0]));
2256                         }
2257                 }
2258                 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2259         }
2260
2261         LIBCFS_FREE (ksocknal_data.ksnd_peers,
2262                      sizeof(struct list_head) *
2263                      ksocknal_data.ksnd_peer_hash_size);
2264
2265         spin_lock(&ksocknal_data.ksnd_tx_lock);
2266
2267         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2268                 struct list_head zlist;
2269                 struct ksock_tx *tx;
2270
2271                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2272                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2273                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2274
2275                 while (!list_empty(&zlist)) {
2276                         tx = list_entry(zlist.next, struct ksock_tx, tx_list);
2277                         list_del(&tx->tx_list);
2278                         LIBCFS_FREE(tx, tx->tx_desc_size);
2279                 }
2280         } else {
2281                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2282         }
2283 }
2284
2285 static void
2286 ksocknal_base_shutdown(void)
2287 {
2288         struct ksock_sched_info *info;
2289         struct ksock_sched *sched;
2290         int i;
2291         int j;
2292
2293         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2294                atomic_read (&libcfs_kmemory));
2295         LASSERT (ksocknal_data.ksnd_nnets == 0);
2296
2297         switch (ksocknal_data.ksnd_init) {
2298         default:
2299                 LASSERT (0);
2300
2301         case SOCKNAL_INIT_ALL:
2302         case SOCKNAL_INIT_DATA:
2303                 LASSERT (ksocknal_data.ksnd_peers != NULL);
2304                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2305                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2306                 }
2307
2308                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2309                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2310                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2311                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2312                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2313
2314                 if (ksocknal_data.ksnd_sched_info != NULL) {
2315                         cfs_percpt_for_each(info, i,
2316                                             ksocknal_data.ksnd_sched_info) {
2317                                 if (info->ksi_scheds == NULL)
2318                                         continue;
2319
2320                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2321
2322                                         sched = &info->ksi_scheds[j];
2323                                         LASSERT(list_empty(&sched->\
2324                                                                kss_tx_conns));
2325                                         LASSERT(list_empty(&sched->\
2326                                                                kss_rx_conns));
2327                                         LASSERT(list_empty(&sched-> \
2328                                                   kss_zombie_noop_txs));
2329                                         LASSERT(sched->kss_nconns == 0);
2330                                 }
2331                         }
2332                 }
2333
2334                 /* flag threads to terminate; wake and wait for them to die */
2335                 ksocknal_data.ksnd_shuttingdown = 1;
2336                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2337                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2338
2339                 if (ksocknal_data.ksnd_sched_info != NULL) {
2340                         cfs_percpt_for_each(info, i,
2341                                             ksocknal_data.ksnd_sched_info) {
2342                                 if (info->ksi_scheds == NULL)
2343                                         continue;
2344
2345                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2346                                         sched = &info->ksi_scheds[j];
2347                                         wake_up_all(&sched->kss_waitq);
2348                                 }
2349                         }
2350                 }
2351
2352                 i = 4;
2353                 read_lock(&ksocknal_data.ksnd_global_lock);
2354                 while (ksocknal_data.ksnd_nthreads != 0) {
2355                         i++;
2356                         /* power of 2? */
2357                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2358                                 "waiting for %d threads to terminate\n",
2359                                 ksocknal_data.ksnd_nthreads);
2360                         read_unlock(&ksocknal_data.ksnd_global_lock);
2361                         set_current_state(TASK_UNINTERRUPTIBLE);
2362                         schedule_timeout(cfs_time_seconds(1));
2363                         read_lock(&ksocknal_data.ksnd_global_lock);
2364                 }
2365                 read_unlock(&ksocknal_data.ksnd_global_lock);
2366
2367                 ksocknal_free_buffers();
2368
2369                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2370                 break;
2371         }
2372
2373         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2374                atomic_read (&libcfs_kmemory));
2375
2376         module_put(THIS_MODULE);
2377 }
2378
2379 static int
2380 ksocknal_base_startup(void)
2381 {
2382         struct ksock_sched_info *info;
2383         int                     rc;
2384         int                     i;
2385
2386         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2387         LASSERT (ksocknal_data.ksnd_nnets == 0);
2388
2389         memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2390
2391         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2392         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2393                      sizeof(struct list_head) *
2394                      ksocknal_data.ksnd_peer_hash_size);
2395         if (ksocknal_data.ksnd_peers == NULL)
2396                 return -ENOMEM;
2397
2398         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2399                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2400
2401         rwlock_init(&ksocknal_data.ksnd_global_lock);
2402         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2403
2404         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2405         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2406         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2407         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2408         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2409
2410         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2411         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2412         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2413         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2414
2415         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2416         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2417
2418         /* NB memset above zeros whole of ksocknal_data */
2419
2420         /* flag lists/ptrs/locks initialised */
2421         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2422         try_module_get(THIS_MODULE);
2423
2424         ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2425                                                          sizeof(*info));
2426         if (ksocknal_data.ksnd_sched_info == NULL)
2427                 goto failed;
2428
2429         cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2430                 struct ksock_sched *sched;
2431                 int nthrs;
2432
2433                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2434                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2435                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2436                 } else {
2437                         /* max to half of CPUs, assume another half should be
2438                          * reserved for upper layer modules */
2439                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2440                 }
2441
2442                 info->ksi_nthreads_max = nthrs;
2443                 info->ksi_cpt = i;
2444
2445                 if (nthrs != 0) {
2446                         LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2447                                          info->ksi_nthreads_max *
2448                                                 sizeof(*sched));
2449                         if (info->ksi_scheds == NULL)
2450                                 goto failed;
2451
2452                         for (; nthrs > 0; nthrs--) {
2453                                 sched = &info->ksi_scheds[nthrs - 1];
2454
2455                                 sched->kss_info = info;
2456                                 spin_lock_init(&sched->kss_lock);
2457                                 INIT_LIST_HEAD(&sched->kss_rx_conns);
2458                                 INIT_LIST_HEAD(&sched->kss_tx_conns);
2459                                 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2460                                 init_waitqueue_head(&sched->kss_waitq);
2461                         }
2462                 }
2463         }
2464
2465         ksocknal_data.ksnd_connd_starting         = 0;
2466         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2467         ksocknal_data.ksnd_connd_starting_stamp   = ktime_get_real_seconds();
2468         /* must have at least 2 connds to remain responsive to accepts while
2469          * connecting */
2470         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2471                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2472
2473         if (*ksocknal_tunables.ksnd_nconnds_max <
2474             *ksocknal_tunables.ksnd_nconnds) {
2475                 ksocknal_tunables.ksnd_nconnds_max =
2476                         ksocknal_tunables.ksnd_nconnds;
2477         }
2478
2479         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2480                 char name[16];
2481                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2482                 ksocknal_data.ksnd_connd_starting++;
2483                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2484
2485
2486                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2487                 rc = ksocknal_thread_start(ksocknal_connd,
2488                                            (void *)((uintptr_t)i), name);
2489                 if (rc != 0) {
2490                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2491                         ksocknal_data.ksnd_connd_starting--;
2492                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2493                         CERROR("Can't spawn socknal connd: %d\n", rc);
2494                         goto failed;
2495                 }
2496         }
2497
2498         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2499         if (rc != 0) {
2500                 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2501                 goto failed;
2502         }
2503
2504         /* flag everything initialised */
2505         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2506
2507         return 0;
2508
2509  failed:
2510         ksocknal_base_shutdown();
2511         return -ENETDOWN;
2512 }
2513
2514 static void
2515 ksocknal_debug_peerhash(struct lnet_ni *ni)
2516 {
2517         struct ksock_peer_ni *peer_ni = NULL;
2518         struct list_head *tmp;
2519         int i;
2520
2521         read_lock(&ksocknal_data.ksnd_global_lock);
2522
2523         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2524                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2525                         peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
2526
2527                         if (peer_ni->ksnp_ni == ni) break;
2528
2529                         peer_ni = NULL;
2530                 }
2531         }
2532
2533         if (peer_ni != NULL) {
2534                 struct ksock_route *route;
2535                 struct ksock_conn  *conn;
2536
2537                 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2538                        "closing %d, accepting %d, err %d, zcookie %llu, "
2539                        "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2540                        atomic_read(&peer_ni->ksnp_refcount),
2541                        peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2542                        peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2543                        peer_ni->ksnp_zc_next_cookie,
2544                        !list_empty(&peer_ni->ksnp_tx_queue),
2545                        !list_empty(&peer_ni->ksnp_zc_req_list));
2546
2547                 list_for_each(tmp, &peer_ni->ksnp_routes) {
2548                         route = list_entry(tmp, struct ksock_route, ksnr_list);
2549                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2550                                "del %d\n", atomic_read(&route->ksnr_refcount),
2551                                route->ksnr_scheduled, route->ksnr_connecting,
2552                                route->ksnr_connected, route->ksnr_deleted);
2553                 }
2554
2555                 list_for_each(tmp, &peer_ni->ksnp_conns) {
2556                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2557                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2558                                atomic_read(&conn->ksnc_conn_refcount),
2559                                atomic_read(&conn->ksnc_sock_refcount),
2560                                conn->ksnc_type, conn->ksnc_closing);
2561                 }
2562         }
2563
2564         read_unlock(&ksocknal_data.ksnd_global_lock);
2565         return;
2566 }
2567
2568 void
2569 ksocknal_shutdown(struct lnet_ni *ni)
2570 {
2571         struct ksock_net *net = ni->ni_data;
2572         struct lnet_process_id anyid = {
2573                 .nid = LNET_NID_ANY,
2574                 .pid = LNET_PID_ANY,
2575         };
2576         int i;
2577
2578         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2579         LASSERT(ksocknal_data.ksnd_nnets > 0);
2580
2581         spin_lock_bh(&net->ksnn_lock);
2582         net->ksnn_shutdown = 1;                 /* prevent new peers */
2583         spin_unlock_bh(&net->ksnn_lock);
2584
2585         /* Delete all peers */
2586         ksocknal_del_peer(ni, anyid, 0);
2587
2588         /* Wait for all peer_ni state to clean up */
2589         i = 2;
2590         spin_lock_bh(&net->ksnn_lock);
2591         while (net->ksnn_npeers != 0) {
2592                 spin_unlock_bh(&net->ksnn_lock);
2593
2594                 i++;
2595                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2596                        "waiting for %d peers to disconnect\n",
2597                        net->ksnn_npeers);
2598                 set_current_state(TASK_UNINTERRUPTIBLE);
2599                 schedule_timeout(cfs_time_seconds(1));
2600
2601                 ksocknal_debug_peerhash(ni);
2602
2603                 spin_lock_bh(&net->ksnn_lock);
2604         }
2605         spin_unlock_bh(&net->ksnn_lock);
2606
2607         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2608                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2609                 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2610         }
2611
2612         list_del(&net->ksnn_list);
2613         LIBCFS_FREE(net, sizeof(*net));
2614
2615         ksocknal_data.ksnd_nnets--;
2616         if (ksocknal_data.ksnd_nnets == 0)
2617                 ksocknal_base_shutdown();
2618 }
2619
2620 static int
2621 ksocknal_enumerate_interfaces(struct ksock_net *net)
2622 {
2623         char **names;
2624         int i;
2625         int j;
2626         int rc;
2627         int n;
2628
2629         n = lnet_ipif_enumerate(&names);
2630         if (n <= 0) {
2631                 CERROR("Can't enumerate interfaces: %d\n", n);
2632                 return n;
2633         }
2634
2635         for (i = j = 0; i < n; i++) {
2636                 int        up;
2637                 __u32      ip;
2638                 __u32      mask;
2639
2640                 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2641                         continue;
2642
2643                 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2644                 if (rc != 0) {
2645                         CWARN("Can't get interface %s info: %d\n",
2646                               names[i], rc);
2647                         continue;
2648                 }
2649
2650                 if (!up) {
2651                         CWARN("Ignoring interface %s (down)\n",
2652                               names[i]);
2653                         continue;
2654                 }
2655
2656                 if (j == LNET_INTERFACES_NUM) {
2657                         CWARN("Ignoring interface %s (too many interfaces)\n",
2658                               names[i]);
2659                         continue;
2660                 }
2661
2662                 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2663                 net->ksnn_interfaces[j].ksni_netmask = mask;
2664                 strlcpy(net->ksnn_interfaces[j].ksni_name,
2665                         names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2666                 j++;
2667         }
2668
2669         lnet_ipif_free_enumeration(names, n);
2670
2671         if (j == 0)
2672                 CERROR("Can't find any usable interfaces\n");
2673
2674         return j;
2675 }
2676
2677 static int
2678 ksocknal_search_new_ipif(struct ksock_net *net)
2679 {
2680         int new_ipif = 0;
2681         int i;
2682
2683         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2684                 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2685                 char *colon = strchr(ifnam, ':');
2686                 int found  = 0;
2687                 struct ksock_net *tmp;
2688                 int j;
2689
2690                 if (colon != NULL) /* ignore alias device */
2691                         *colon = 0;
2692
2693                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2694                                         ksnn_list) {
2695                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2696                                 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2697                                              ksni_name[0];
2698                                 char *colon2 = strchr(ifnam2, ':');
2699
2700                                 if (colon2 != NULL)
2701                                         *colon2 = 0;
2702
2703                                 found = strcmp(ifnam, ifnam2) == 0;
2704                                 if (colon2 != NULL)
2705                                         *colon2 = ':';
2706                         }
2707                         if (found)
2708                                 break;
2709                 }
2710
2711                 new_ipif += !found;
2712                 if (colon != NULL)
2713                         *colon = ':';
2714         }
2715
2716         return new_ipif;
2717 }
2718
2719 static int
2720 ksocknal_start_schedulers(struct ksock_sched_info *info)
2721 {
2722         int     nthrs;
2723         int     rc = 0;
2724         int     i;
2725
2726         if (info->ksi_nthreads == 0) {
2727                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2728                         nthrs = info->ksi_nthreads_max;
2729                 } else {
2730                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2731                                                info->ksi_cpt);
2732                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2733                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2734                 }
2735                 nthrs = min(nthrs, info->ksi_nthreads_max);
2736         } else {
2737                 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2738                 /* increase two threads if there is new interface */
2739                 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2740         }
2741
2742         for (i = 0; i < nthrs; i++) {
2743                 long id;
2744                 char name[20];
2745                 struct ksock_sched *sched;
2746
2747                 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2748                 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2749                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2750                          info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2751
2752                 rc = ksocknal_thread_start(ksocknal_scheduler,
2753                                            (void *)id, name);
2754                 if (rc == 0)
2755                         continue;
2756
2757                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2758                        info->ksi_cpt, info->ksi_nthreads + i, rc);
2759                 break;
2760         }
2761
2762         info->ksi_nthreads += i;
2763         return rc;
2764 }
2765
2766 static int
2767 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2768 {
2769         int newif = ksocknal_search_new_ipif(net);
2770         int rc;
2771         int i;
2772
2773         if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2774                 return -EINVAL;
2775
2776         for (i = 0; i < ncpts; i++) {
2777                 struct ksock_sched_info *info;
2778                 int cpt = (cpts == NULL) ? i : cpts[i];
2779
2780                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2781                 info = ksocknal_data.ksnd_sched_info[cpt];
2782
2783                 if (!newif && info->ksi_nthreads > 0)
2784                         continue;
2785
2786                 rc = ksocknal_start_schedulers(info);
2787                 if (rc != 0)
2788                         return rc;
2789         }
2790         return 0;
2791 }
2792
2793 int
2794 ksocknal_startup(struct lnet_ni *ni)
2795 {
2796         struct ksock_net *net;
2797         int rc;
2798         int i;
2799         struct net_device *net_dev;
2800         int node_id;
2801
2802         LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2803
2804         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2805                 rc = ksocknal_base_startup();
2806                 if (rc != 0)
2807                         return rc;
2808         }
2809
2810         LIBCFS_ALLOC(net, sizeof(*net));
2811         if (net == NULL)
2812                 goto fail_0;
2813
2814         spin_lock_init(&net->ksnn_lock);
2815         net->ksnn_incarnation = ktime_get_real_ns();
2816         ni->ni_data = net;
2817         if (!ni->ni_net->net_tunables_set) {
2818                 ni->ni_net->net_tunables.lct_peer_timeout =
2819                         *ksocknal_tunables.ksnd_peertimeout;
2820                 ni->ni_net->net_tunables.lct_max_tx_credits =
2821                         *ksocknal_tunables.ksnd_credits;
2822                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2823                         *ksocknal_tunables.ksnd_peertxcredits;
2824                 ni->ni_net->net_tunables.lct_peer_rtr_credits =
2825                         *ksocknal_tunables.ksnd_peerrtrcredits;
2826                 ni->ni_net->net_tunables_set = true;
2827         }
2828
2829
2830         if (ni->ni_interfaces[0] == NULL) {
2831                 rc = ksocknal_enumerate_interfaces(net);
2832                 if (rc <= 0)
2833                         goto fail_1;
2834
2835                 net->ksnn_ninterfaces = 1;
2836         } else {
2837                 for (i = 0; i < LNET_INTERFACES_NUM; i++) {
2838                         int up;
2839
2840                         if (ni->ni_interfaces[i] == NULL)
2841                                 break;
2842
2843                         rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2844                                 &net->ksnn_interfaces[i].ksni_ipaddr,
2845                                 &net->ksnn_interfaces[i].ksni_netmask);
2846
2847                         if (rc != 0) {
2848                                 CERROR("Can't get interface %s info: %d\n",
2849                                        ni->ni_interfaces[i], rc);
2850                                 goto fail_1;
2851                         }
2852
2853                         if (!up) {
2854                                 CERROR("Interface %s is down\n",
2855                                        ni->ni_interfaces[i]);
2856                                 goto fail_1;
2857                         }
2858
2859                         strlcpy(net->ksnn_interfaces[i].ksni_name,
2860                                 ni->ni_interfaces[i],
2861                                 sizeof(net->ksnn_interfaces[i].ksni_name));
2862
2863                 }
2864                 net->ksnn_ninterfaces = i;
2865         }
2866
2867         net_dev = dev_get_by_name(&init_net,
2868                                   net->ksnn_interfaces[0].ksni_name);
2869         if (net_dev != NULL) {
2870                 node_id = dev_to_node(&net_dev->dev);
2871                 ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
2872                 dev_put(net_dev);
2873         } else {
2874                 ni->ni_dev_cpt = CFS_CPT_ANY;
2875         }
2876
2877         /* call it before add it to ksocknal_data.ksnd_nets */
2878         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2879         if (rc != 0)
2880                 goto fail_1;
2881
2882         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2883                                 net->ksnn_interfaces[0].ksni_ipaddr);
2884         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2885
2886         ksocknal_data.ksnd_nnets++;
2887
2888         return 0;
2889
2890  fail_1:
2891         LIBCFS_FREE(net, sizeof(*net));
2892  fail_0:
2893         if (ksocknal_data.ksnd_nnets == 0)
2894                 ksocknal_base_shutdown();
2895
2896         return -ENETDOWN;
2897 }
2898
2899
2900 static void __exit ksocklnd_exit(void)
2901 {
2902         lnet_unregister_lnd(&the_ksocklnd);
2903 }
2904
2905 static int __init ksocklnd_init(void)
2906 {
2907         int rc;
2908
2909         /* check ksnr_connected/connecting field large enough */
2910         CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2911         CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2912
2913         /* initialize the_ksocklnd */
2914         the_ksocklnd.lnd_type     = SOCKLND;
2915         the_ksocklnd.lnd_startup  = ksocknal_startup;
2916         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2917         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2918         the_ksocklnd.lnd_send     = ksocknal_send;
2919         the_ksocklnd.lnd_recv     = ksocknal_recv;
2920         the_ksocklnd.lnd_notify   = ksocknal_notify;
2921         the_ksocklnd.lnd_query    = ksocknal_query;
2922         the_ksocklnd.lnd_accept   = ksocknal_accept;
2923
2924         rc = ksocknal_tunables_init();
2925         if (rc != 0)
2926                 return rc;
2927
2928         lnet_register_lnd(&the_ksocklnd);
2929
2930         return 0;
2931 }
2932
2933 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2934 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2935 MODULE_VERSION("2.8.0");
2936 MODULE_LICENSE("GPL");
2937
2938 module_init(ksocklnd_init);
2939 module_exit(ksocklnd_exit);