Whamcloud - gitweb
LU-11371 socklnd: dynamically set LND parameters
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/socklnd/socklnd.c
33  *
34  * Author: Zach Brown <zab@zabbo.net>
35  * Author: Peter J. Braam <braam@clusterfs.com>
36  * Author: Phil Schwan <phil@clusterfs.com>
37  * Author: Eric Barton <eric@bartonsoftware.com>
38  */
39
40 #include <linux/pci.h>
41 #include "socklnd.h"
42
43 static struct lnet_lnd the_ksocklnd;
44 struct ksock_nal_data ksocknal_data;
45
46 static struct ksock_interface *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
48 {
49         struct ksock_net *net = ni->ni_data;
50         int i;
51         struct ksock_interface *iface;
52
53         for (i = 0; i < net->ksnn_ninterfaces; i++) {
54                 LASSERT(i < LNET_INTERFACES_NUM);
55                 iface = &net->ksnn_interfaces[i];
56
57                 if (iface->ksni_ipaddr == ip)
58                         return iface;
59         }
60
61         return NULL;
62 }
63
64 static struct ksock_route *
65 ksocknal_create_route(__u32 ipaddr, int port)
66 {
67         struct ksock_route *route;
68
69         LIBCFS_ALLOC (route, sizeof (*route));
70         if (route == NULL)
71                 return (NULL);
72
73         atomic_set (&route->ksnr_refcount, 1);
74         route->ksnr_peer = NULL;
75         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
76         route->ksnr_ipaddr = ipaddr;
77         route->ksnr_port = port;
78         route->ksnr_scheduled = 0;
79         route->ksnr_connecting = 0;
80         route->ksnr_connected = 0;
81         route->ksnr_deleted = 0;
82         route->ksnr_conn_count = 0;
83         route->ksnr_share_count = 0;
84
85         return (route);
86 }
87
88 void
89 ksocknal_destroy_route(struct ksock_route *route)
90 {
91         LASSERT (atomic_read(&route->ksnr_refcount) == 0);
92
93         if (route->ksnr_peer != NULL)
94                 ksocknal_peer_decref(route->ksnr_peer);
95
96         LIBCFS_FREE (route, sizeof (*route));
97 }
98
99 static int
100 ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
101                      struct lnet_process_id id)
102 {
103         int cpt = lnet_cpt_of_nid(id.nid, ni);
104         struct ksock_net *net = ni->ni_data;
105         struct ksock_peer_ni *peer_ni;
106
107         LASSERT(id.nid != LNET_NID_ANY);
108         LASSERT(id.pid != LNET_PID_ANY);
109         LASSERT(!in_interrupt());
110
111         LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
112         if (peer_ni == NULL)
113                 return -ENOMEM;
114
115         peer_ni->ksnp_ni = ni;
116         peer_ni->ksnp_id = id;
117         atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
118         peer_ni->ksnp_closing = 0;
119         peer_ni->ksnp_accepting = 0;
120         peer_ni->ksnp_proto = NULL;
121         peer_ni->ksnp_last_alive = 0;
122         peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
123
124         INIT_LIST_HEAD(&peer_ni->ksnp_conns);
125         INIT_LIST_HEAD(&peer_ni->ksnp_routes);
126         INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
127         INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
128         spin_lock_init(&peer_ni->ksnp_lock);
129
130         spin_lock_bh(&net->ksnn_lock);
131
132         if (net->ksnn_shutdown) {
133                 spin_unlock_bh(&net->ksnn_lock);
134
135                 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
136                 CERROR("Can't create peer_ni: network shutdown\n");
137                 return -ESHUTDOWN;
138         }
139
140         net->ksnn_npeers++;
141
142         spin_unlock_bh(&net->ksnn_lock);
143
144         *peerp = peer_ni;
145         return 0;
146 }
147
148 void
149 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
150 {
151         struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
152
153         CDEBUG (D_NET, "peer_ni %s %p deleted\n",
154                 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
155
156         LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
157         LASSERT(peer_ni->ksnp_accepting == 0);
158         LASSERT(list_empty(&peer_ni->ksnp_conns));
159         LASSERT(list_empty(&peer_ni->ksnp_routes));
160         LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
161         LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
162
163         LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
164
165         /* NB a peer_ni's connections and routes keep a reference on their peer_ni
166          * until they are destroyed, so we can be assured that _all_ state to
167          * do with this peer_ni has been cleaned up when its refcount drops to
168          * zero. */
169         spin_lock_bh(&net->ksnn_lock);
170         net->ksnn_npeers--;
171         spin_unlock_bh(&net->ksnn_lock);
172 }
173
174 struct ksock_peer_ni *
175 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
176 {
177         struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178         struct list_head *tmp;
179         struct ksock_peer_ni *peer_ni;
180
181         list_for_each(tmp, peer_list) {
182                 peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
183
184                 LASSERT(!peer_ni->ksnp_closing);
185
186                 if (peer_ni->ksnp_ni != ni)
187                         continue;
188
189                 if (peer_ni->ksnp_id.nid != id.nid ||
190                     peer_ni->ksnp_id.pid != id.pid)
191                         continue;
192
193                 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
194                        peer_ni, libcfs_id2str(id),
195                        atomic_read(&peer_ni->ksnp_refcount));
196                 return peer_ni;
197         }
198         return NULL;
199 }
200
201 struct ksock_peer_ni *
202 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
203 {
204         struct ksock_peer_ni *peer_ni;
205
206         read_lock(&ksocknal_data.ksnd_global_lock);
207         peer_ni = ksocknal_find_peer_locked(ni, id);
208         if (peer_ni != NULL)                    /* +1 ref for caller? */
209                 ksocknal_peer_addref(peer_ni);
210         read_unlock(&ksocknal_data.ksnd_global_lock);
211
212         return (peer_ni);
213 }
214
215 static void
216 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
217 {
218         int i;
219         __u32 ip;
220         struct ksock_interface *iface;
221
222         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
223                 LASSERT(i < LNET_INTERFACES_NUM);
224                 ip = peer_ni->ksnp_passive_ips[i];
225
226                 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
227                 /*
228                  * All IPs in peer_ni->ksnp_passive_ips[] come from the
229                  * interface list, therefore the call must succeed.
230                  */
231                 LASSERT(iface != NULL);
232
233                 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
234                        peer_ni, iface, iface->ksni_nroutes);
235                 iface->ksni_npeers--;
236         }
237
238         LASSERT(list_empty(&peer_ni->ksnp_conns));
239         LASSERT(list_empty(&peer_ni->ksnp_routes));
240         LASSERT(!peer_ni->ksnp_closing);
241         peer_ni->ksnp_closing = 1;
242         list_del(&peer_ni->ksnp_list);
243         /* lose peerlist's ref */
244         ksocknal_peer_decref(peer_ni);
245 }
246
247 static int
248 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
249                        struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
250                        int *port, int *conn_count, int *share_count)
251 {
252         struct ksock_peer_ni *peer_ni;
253         struct list_head *ptmp;
254         struct ksock_route *route;
255         struct list_head *rtmp;
256         int i;
257         int j;
258         int rc = -ENOENT;
259
260         read_lock(&ksocknal_data.ksnd_global_lock);
261
262         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
263                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
264                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
265
266                         if (peer_ni->ksnp_ni != ni)
267                                 continue;
268
269                         if (peer_ni->ksnp_n_passive_ips == 0 &&
270                             list_empty(&peer_ni->ksnp_routes)) {
271                                 if (index-- > 0)
272                                         continue;
273
274                                 *id = peer_ni->ksnp_id;
275                                 *myip = 0;
276                                 *peer_ip = 0;
277                                 *port = 0;
278                                 *conn_count = 0;
279                                 *share_count = 0;
280                                 rc = 0;
281                                 goto out;
282                         }
283
284                         for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
285                                 if (index-- > 0)
286                                         continue;
287
288                                 *id = peer_ni->ksnp_id;
289                                 *myip = peer_ni->ksnp_passive_ips[j];
290                                 *peer_ip = 0;
291                                 *port = 0;
292                                 *conn_count = 0;
293                                 *share_count = 0;
294                                 rc = 0;
295                                 goto out;
296                         }
297
298                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
299                                 if (index-- > 0)
300                                         continue;
301
302                                 route = list_entry(rtmp, struct ksock_route,
303                                                    ksnr_list);
304
305                                 *id = peer_ni->ksnp_id;
306                                 *myip = route->ksnr_myipaddr;
307                                 *peer_ip = route->ksnr_ipaddr;
308                                 *port = route->ksnr_port;
309                                 *conn_count = route->ksnr_conn_count;
310                                 *share_count = route->ksnr_share_count;
311                                 rc = 0;
312                                 goto out;
313                         }
314                 }
315         }
316 out:
317         read_unlock(&ksocknal_data.ksnd_global_lock);
318         return rc;
319 }
320
321 static void
322 ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
323 {
324         struct ksock_peer_ni *peer_ni = route->ksnr_peer;
325         int type = conn->ksnc_type;
326         struct ksock_interface *iface;
327
328         conn->ksnc_route = route;
329         ksocknal_route_addref(route);
330
331         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
332                 if (route->ksnr_myipaddr == 0) {
333                         /* route wasn't bound locally yet (the initial route) */
334                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
335                                libcfs_id2str(peer_ni->ksnp_id),
336                                &route->ksnr_ipaddr,
337                                &conn->ksnc_myipaddr);
338                 } else {
339                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
340                                "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
341                                &route->ksnr_ipaddr,
342                                &route->ksnr_myipaddr,
343                                &conn->ksnc_myipaddr);
344
345                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
346                                                   route->ksnr_myipaddr);
347                         if (iface != NULL)
348                                 iface->ksni_nroutes--;
349                 }
350                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
351                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
352                                           route->ksnr_myipaddr);
353                 if (iface != NULL)
354                         iface->ksni_nroutes++;
355         }
356
357         route->ksnr_connected |= (1<<type);
358         route->ksnr_conn_count++;
359
360         /* Successful connection => further attempts can
361          * proceed immediately */
362         route->ksnr_retry_interval = 0;
363 }
364
365 static void
366 ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
367 {
368         struct list_head *tmp;
369         struct ksock_conn *conn;
370         struct ksock_route *route2;
371
372         LASSERT(!peer_ni->ksnp_closing);
373         LASSERT(route->ksnr_peer == NULL);
374         LASSERT(!route->ksnr_scheduled);
375         LASSERT(!route->ksnr_connecting);
376         LASSERT(route->ksnr_connected == 0);
377
378         /* LASSERT(unique) */
379         list_for_each(tmp, &peer_ni->ksnp_routes) {
380                 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
381
382                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
383                         CERROR("Duplicate route %s %pI4h\n",
384                                libcfs_id2str(peer_ni->ksnp_id),
385                                &route->ksnr_ipaddr);
386                         LBUG();
387                 }
388         }
389
390         route->ksnr_peer = peer_ni;
391         ksocknal_peer_addref(peer_ni);
392         /* peer_ni's routelist takes over my ref on 'route' */
393         list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
394
395         list_for_each(tmp, &peer_ni->ksnp_conns) {
396                 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
397
398                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
399                         continue;
400
401                 ksocknal_associate_route_conn_locked(route, conn);
402                 /* keep going (typed routes) */
403         }
404 }
405
406 static void
407 ksocknal_del_route_locked(struct ksock_route *route)
408 {
409         struct ksock_peer_ni *peer_ni = route->ksnr_peer;
410         struct ksock_interface *iface;
411         struct ksock_conn *conn;
412         struct list_head *ctmp;
413         struct list_head *cnxt;
414
415         LASSERT(!route->ksnr_deleted);
416
417         /* Close associated conns */
418         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
419                 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
420
421                 if (conn->ksnc_route != route)
422                         continue;
423
424                 ksocknal_close_conn_locked(conn, 0);
425         }
426
427         if (route->ksnr_myipaddr != 0) {
428                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
429                                           route->ksnr_myipaddr);
430                 if (iface != NULL)
431                         iface->ksni_nroutes--;
432         }
433
434         route->ksnr_deleted = 1;
435         list_del(&route->ksnr_list);
436         ksocknal_route_decref(route);           /* drop peer_ni's ref */
437
438         if (list_empty(&peer_ni->ksnp_routes) &&
439             list_empty(&peer_ni->ksnp_conns)) {
440                 /* I've just removed the last route to a peer_ni with no active
441                  * connections */
442                 ksocknal_unlink_peer_locked(peer_ni);
443         }
444 }
445
446 int
447 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
448                   int port)
449 {
450         struct list_head *tmp;
451         struct ksock_peer_ni *peer_ni;
452         struct ksock_peer_ni *peer2;
453         struct ksock_route *route;
454         struct ksock_route *route2;
455         int rc;
456
457         if (id.nid == LNET_NID_ANY ||
458             id.pid == LNET_PID_ANY)
459                 return (-EINVAL);
460
461         /* Have a brand new peer_ni ready... */
462         rc = ksocknal_create_peer(&peer_ni, ni, id);
463         if (rc != 0)
464                 return rc;
465
466         route = ksocknal_create_route (ipaddr, port);
467         if (route == NULL) {
468                 ksocknal_peer_decref(peer_ni);
469                 return (-ENOMEM);
470         }
471
472         write_lock_bh(&ksocknal_data.ksnd_global_lock);
473
474         /* always called with a ref on ni, so shutdown can't have started */
475         LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
476
477         peer2 = ksocknal_find_peer_locked(ni, id);
478         if (peer2 != NULL) {
479                 ksocknal_peer_decref(peer_ni);
480                 peer_ni = peer2;
481         } else {
482                 /* peer_ni table takes my ref on peer_ni */
483                 list_add_tail(&peer_ni->ksnp_list,
484                               ksocknal_nid2peerlist(id.nid));
485         }
486
487         route2 = NULL;
488         list_for_each(tmp, &peer_ni->ksnp_routes) {
489                 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
490
491                 if (route2->ksnr_ipaddr == ipaddr)
492                         break;
493
494                 route2 = NULL;
495         }
496         if (route2 == NULL) {
497                 ksocknal_add_route_locked(peer_ni, route);
498                 route->ksnr_share_count++;
499         } else {
500                 ksocknal_route_decref(route);
501                 route2->ksnr_share_count++;
502         }
503
504         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
505
506         return 0;
507 }
508
509 static void
510 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
511 {
512         struct ksock_conn *conn;
513         struct ksock_route *route;
514         struct list_head *tmp;
515         struct list_head *nxt;
516         int nshared;
517
518         LASSERT(!peer_ni->ksnp_closing);
519
520         /* Extra ref prevents peer_ni disappearing until I'm done with it */
521         ksocknal_peer_addref(peer_ni);
522
523         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
524                 route = list_entry(tmp, struct ksock_route, ksnr_list);
525
526                 /* no match */
527                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
528                         continue;
529
530                 route->ksnr_share_count = 0;
531                 /* This deletes associated conns too */
532                 ksocknal_del_route_locked(route);
533         }
534
535         nshared = 0;
536         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
537                 route = list_entry(tmp, struct ksock_route, ksnr_list);
538                 nshared += route->ksnr_share_count;
539         }
540
541         if (nshared == 0) {
542                 /* remove everything else if there are no explicit entries
543                  * left */
544
545                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
546                         route = list_entry(tmp, struct ksock_route, ksnr_list);
547
548                         /* we should only be removing auto-entries */
549                         LASSERT(route->ksnr_share_count == 0);
550                         ksocknal_del_route_locked(route);
551                 }
552
553                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
554                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
555
556                         ksocknal_close_conn_locked(conn, 0);
557                 }
558         }
559
560         ksocknal_peer_decref(peer_ni);
561         /* NB peer_ni unlinks itself when last conn/route is removed */
562 }
563
564 static int
565 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
566 {
567         struct list_head zombies = LIST_HEAD_INIT(zombies);
568         struct list_head *ptmp;
569         struct list_head *pnxt;
570         struct ksock_peer_ni *peer_ni;
571         int lo;
572         int hi;
573         int i;
574         int rc = -ENOENT;
575
576         write_lock_bh(&ksocknal_data.ksnd_global_lock);
577
578         if (id.nid != LNET_NID_ANY) {
579                 hi = (int)(ksocknal_nid2peerlist(id.nid) -
580                            ksocknal_data.ksnd_peers);
581                 lo = hi;
582         } else {
583                 lo = 0;
584                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
585         }
586
587         for (i = lo; i <= hi; i++) {
588                 list_for_each_safe(ptmp, pnxt,
589                                    &ksocknal_data.ksnd_peers[i]) {
590                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
591
592                         if (peer_ni->ksnp_ni != ni)
593                                 continue;
594
595                         if (!((id.nid == LNET_NID_ANY ||
596                                peer_ni->ksnp_id.nid == id.nid) &&
597                               (id.pid == LNET_PID_ANY ||
598                                peer_ni->ksnp_id.pid == id.pid)))
599                                 continue;
600
601                         ksocknal_peer_addref(peer_ni);  /* a ref for me... */
602
603                         ksocknal_del_peer_locked(peer_ni, ip);
604
605                         if (peer_ni->ksnp_closing &&
606                             !list_empty(&peer_ni->ksnp_tx_queue)) {
607                                 LASSERT(list_empty(&peer_ni->ksnp_conns));
608                                 LASSERT(list_empty(&peer_ni->ksnp_routes));
609
610                                 list_splice_init(&peer_ni->ksnp_tx_queue,
611                                                  &zombies);
612                         }
613
614                         ksocknal_peer_decref(peer_ni);  /* ...till here */
615
616                         rc = 0;                         /* matched! */
617                 }
618         }
619
620         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
621
622         ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
623
624         return rc;
625 }
626
627 static struct ksock_conn *
628 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
629 {
630         struct ksock_peer_ni *peer_ni;
631         struct list_head *ptmp;
632         struct ksock_conn *conn;
633         struct list_head *ctmp;
634         int i;
635
636         read_lock(&ksocknal_data.ksnd_global_lock);
637
638         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
639                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
640                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
641
642                         LASSERT(!peer_ni->ksnp_closing);
643
644                         if (peer_ni->ksnp_ni != ni)
645                                 continue;
646
647                         list_for_each(ctmp, &peer_ni->ksnp_conns) {
648                                 if (index-- > 0)
649                                         continue;
650
651                                 conn = list_entry(ctmp, struct ksock_conn,
652                                                   ksnc_list);
653                                 ksocknal_conn_addref(conn);
654                                 read_unlock(&ksocknal_data. \
655                                             ksnd_global_lock);
656                                 return conn;
657                         }
658                 }
659         }
660
661         read_unlock(&ksocknal_data.ksnd_global_lock);
662         return NULL;
663 }
664
665 static struct ksock_sched *
666 ksocknal_choose_scheduler_locked(unsigned int cpt)
667 {
668         struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
669         struct ksock_sched *sched;
670         int i;
671
672         if (info->ksi_nthreads == 0) {
673                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
674                         if (info->ksi_nthreads > 0) {
675                                 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
676                                        cpt, info->ksi_cpt);
677                                 goto select_sched;
678                         }
679                 }
680                 return NULL;
681         }
682
683 select_sched:
684         sched = &info->ksi_scheds[0];
685         /*
686          * NB: it's safe so far, but info->ksi_nthreads could be changed
687          * at runtime when we have dynamic LNet configuration, then we
688          * need to take care of this.
689          */
690         for (i = 1; i < info->ksi_nthreads; i++) {
691                 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
692                         sched = &info->ksi_scheds[i];
693         }
694
695         return sched;
696 }
697
698 static int
699 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
700 {
701         struct ksock_net *net = ni->ni_data;
702         int i;
703         int nip;
704
705         read_lock(&ksocknal_data.ksnd_global_lock);
706
707         nip = net->ksnn_ninterfaces;
708         LASSERT(nip <= LNET_INTERFACES_NUM);
709
710         /*
711          * Only offer interfaces for additional connections if I have
712          * more than one.
713          */
714         if (nip < 2) {
715                 read_unlock(&ksocknal_data.ksnd_global_lock);
716                 return 0;
717         }
718
719         for (i = 0; i < nip; i++) {
720                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
721                 LASSERT(ipaddrs[i] != 0);
722         }
723
724         read_unlock(&ksocknal_data.ksnd_global_lock);
725         return nip;
726 }
727
728 static int
729 ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
730 {
731         int best_netmatch = 0;
732         int best_xor = 0;
733         int best = -1;
734         int this_xor;
735         int this_netmatch;
736         int i;
737
738         for (i = 0; i < nips; i++) {
739                 if (ips[i] == 0)
740                         continue;
741
742                 this_xor = (ips[i] ^ iface->ksni_ipaddr);
743                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
744
745                 if (!(best < 0 ||
746                       best_netmatch < this_netmatch ||
747                       (best_netmatch == this_netmatch &&
748                        best_xor > this_xor)))
749                         continue;
750
751                 best = i;
752                 best_netmatch = this_netmatch;
753                 best_xor = this_xor;
754         }
755
756         LASSERT (best >= 0);
757         return (best);
758 }
759
760 static int
761 ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
762 {
763         rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
764         struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
765         struct ksock_interface *iface;
766         struct ksock_interface *best_iface;
767         int n_ips;
768         int i;
769         int j;
770         int k;
771         u32 ip;
772         u32 xor;
773         int this_netmatch;
774         int best_netmatch;
775         int best_npeers;
776
777         /* CAVEAT EMPTOR: We do all our interface matching with an
778          * exclusive hold of global lock at IRQ priority.  We're only
779          * expecting to be dealing with small numbers of interfaces, so the
780          * O(n**3)-ness shouldn't matter */
781
782         /* Also note that I'm not going to return more than n_peerips
783          * interfaces, even if I have more myself */
784
785         write_lock_bh(global_lock);
786
787         LASSERT(n_peerips <= LNET_INTERFACES_NUM);
788         LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
789
790         /* Only match interfaces for additional connections
791          * if I have > 1 interface */
792         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
793                 MIN(n_peerips, net->ksnn_ninterfaces);
794
795         for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
796                 /*              ^ yes really... */
797
798                 /* If we have any new interfaces, first tick off all the
799                  * peer_ni IPs that match old interfaces, then choose new
800                  * interfaces to match the remaining peer_ni IPS.
801                  * We don't forget interfaces we've stopped using; we might
802                  * start using them again... */
803
804                 if (i < peer_ni->ksnp_n_passive_ips) {
805                         /* Old interface. */
806                         ip = peer_ni->ksnp_passive_ips[i];
807                         best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
808
809                         /* peer_ni passive ips are kept up to date */
810                         LASSERT(best_iface != NULL);
811                 } else {
812                         /* choose a new interface */
813                         LASSERT (i == peer_ni->ksnp_n_passive_ips);
814
815                         best_iface = NULL;
816                         best_netmatch = 0;
817                         best_npeers = 0;
818
819                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
820                                 iface = &net->ksnn_interfaces[j];
821                                 ip = iface->ksni_ipaddr;
822
823                                 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
824                                         if (peer_ni->ksnp_passive_ips[k] == ip)
825                                                 break;
826
827                                 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
828                                         continue;
829
830                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
831                                 xor = (ip ^ peerips[k]);
832                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
833
834                                 if (!(best_iface == NULL ||
835                                       best_netmatch < this_netmatch ||
836                                       (best_netmatch == this_netmatch &&
837                                        best_npeers > iface->ksni_npeers)))
838                                         continue;
839
840                                 best_iface = iface;
841                                 best_netmatch = this_netmatch;
842                                 best_npeers = iface->ksni_npeers;
843                         }
844
845                         LASSERT(best_iface != NULL);
846
847                         best_iface->ksni_npeers++;
848                         ip = best_iface->ksni_ipaddr;
849                         peer_ni->ksnp_passive_ips[i] = ip;
850                         peer_ni->ksnp_n_passive_ips = i+1;
851                 }
852
853                 /* mark the best matching peer_ni IP used */
854                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
855                 peerips[j] = 0;
856         }
857
858         /* Overwrite input peer_ni IP addresses */
859         memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
860
861         write_unlock_bh(global_lock);
862
863         return (n_ips);
864 }
865
866 static void
867 ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
868                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
869 {
870         struct ksock_route              *newroute = NULL;
871         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
872         struct lnet_ni *ni = peer_ni->ksnp_ni;
873         struct ksock_net                *net = ni->ni_data;
874         struct list_head        *rtmp;
875         struct ksock_route              *route;
876         struct ksock_interface  *iface;
877         struct ksock_interface  *best_iface;
878         int                     best_netmatch;
879         int                     this_netmatch;
880         int                     best_nroutes;
881         int                     i;
882         int                     j;
883
884         /* CAVEAT EMPTOR: We do all our interface matching with an
885          * exclusive hold of global lock at IRQ priority.  We're only
886          * expecting to be dealing with small numbers of interfaces, so the
887          * O(n**3)-ness here shouldn't matter */
888
889         write_lock_bh(global_lock);
890
891         if (net->ksnn_ninterfaces < 2) {
892                 /* Only create additional connections
893                  * if I have > 1 interface */
894                 write_unlock_bh(global_lock);
895                 return;
896         }
897
898         LASSERT(npeer_ipaddrs <= LNET_INTERFACES_NUM);
899
900         for (i = 0; i < npeer_ipaddrs; i++) {
901                 if (newroute != NULL) {
902                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
903                 } else {
904                         write_unlock_bh(global_lock);
905
906                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
907                         if (newroute == NULL)
908                                 return;
909
910                         write_lock_bh(global_lock);
911                 }
912
913                 if (peer_ni->ksnp_closing) {
914                         /* peer_ni got closed under me */
915                         break;
916                 }
917
918                 /* Already got a route? */
919                 route = NULL;
920                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
921                         route = list_entry(rtmp, struct ksock_route, ksnr_list);
922
923                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
924                                 break;
925
926                         route = NULL;
927                 }
928                 if (route != NULL)
929                         continue;
930
931                 best_iface = NULL;
932                 best_nroutes = 0;
933                 best_netmatch = 0;
934
935                 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
936
937                 /* Select interface to connect from */
938                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
939                         iface = &net->ksnn_interfaces[j];
940
941                         /* Using this interface already? */
942                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
943                                 route = list_entry(rtmp, struct ksock_route,
944                                                    ksnr_list);
945
946                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
947                                         break;
948
949                                 route = NULL;
950                         }
951                         if (route != NULL)
952                                 continue;
953
954                         this_netmatch = (((iface->ksni_ipaddr ^
955                                            newroute->ksnr_ipaddr) &
956                                            iface->ksni_netmask) == 0) ? 1 : 0;
957
958                         if (!(best_iface == NULL ||
959                               best_netmatch < this_netmatch ||
960                               (best_netmatch == this_netmatch &&
961                                best_nroutes > iface->ksni_nroutes)))
962                                 continue;
963
964                         best_iface = iface;
965                         best_netmatch = this_netmatch;
966                         best_nroutes = iface->ksni_nroutes;
967                 }
968
969                 if (best_iface == NULL)
970                         continue;
971
972                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
973                 best_iface->ksni_nroutes++;
974
975                 ksocknal_add_route_locked(peer_ni, newroute);
976                 newroute = NULL;
977         }
978
979         write_unlock_bh(global_lock);
980         if (newroute != NULL)
981                 ksocknal_route_decref(newroute);
982 }
983
984 int
985 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
986 {
987         struct ksock_connreq *cr;
988         int rc;
989         u32 peer_ip;
990         int peer_port;
991
992         rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
993         LASSERT(rc == 0);               /* we succeeded before */
994
995         LIBCFS_ALLOC(cr, sizeof(*cr));
996         if (cr == NULL) {
997                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
998                                    "%pI4h: memory exhausted\n", &peer_ip);
999                 return -ENOMEM;
1000         }
1001
1002         lnet_ni_addref(ni);
1003         cr->ksncr_ni   = ni;
1004         cr->ksncr_sock = sock;
1005
1006         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
1007
1008         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
1009         wake_up(&ksocknal_data.ksnd_connd_waitq);
1010
1011         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1012         return 0;
1013 }
1014
1015 static int
1016 ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
1017 {
1018         struct ksock_route *route;
1019
1020         list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1021                 if (route->ksnr_ipaddr == ipaddr)
1022                         return route->ksnr_connecting;
1023         }
1024         return 0;
1025 }
1026
1027 int
1028 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
1029                      struct socket *sock, int type)
1030 {
1031         rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1032         struct list_head zombies = LIST_HEAD_INIT(zombies);
1033         struct lnet_process_id peerid;
1034         struct list_head *tmp;
1035         u64 incarnation;
1036         struct ksock_conn *conn;
1037         struct ksock_conn *conn2;
1038         struct ksock_peer_ni *peer_ni = NULL;
1039         struct ksock_peer_ni *peer2;
1040         struct ksock_sched *sched;
1041         struct ksock_hello_msg *hello;
1042         int cpt;
1043         struct ksock_tx *tx;
1044         struct ksock_tx *txtmp;
1045         int rc;
1046         int rc2;
1047         int active;
1048         char *warn = NULL;
1049
1050         active = (route != NULL);
1051
1052         LASSERT (active == (type != SOCKLND_CONN_NONE));
1053
1054         LIBCFS_ALLOC(conn, sizeof(*conn));
1055         if (conn == NULL) {
1056                 rc = -ENOMEM;
1057                 goto failed_0;
1058         }
1059
1060         conn->ksnc_peer = NULL;
1061         conn->ksnc_route = NULL;
1062         conn->ksnc_sock = sock;
1063         /* 2 ref, 1 for conn, another extra ref prevents socket
1064          * being closed before establishment of connection */
1065         atomic_set (&conn->ksnc_sock_refcount, 2);
1066         conn->ksnc_type = type;
1067         ksocknal_lib_save_callback(sock, conn);
1068         atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1069
1070         conn->ksnc_rx_ready = 0;
1071         conn->ksnc_rx_scheduled = 0;
1072
1073         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1074         conn->ksnc_tx_ready = 0;
1075         conn->ksnc_tx_scheduled = 0;
1076         conn->ksnc_tx_carrier = NULL;
1077         atomic_set (&conn->ksnc_tx_nob, 0);
1078
1079         LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1080                                      kshm_ips[LNET_INTERFACES_NUM]));
1081         if (hello == NULL) {
1082                 rc = -ENOMEM;
1083                 goto failed_1;
1084         }
1085
1086         /* stash conn's local and remote addrs */
1087         rc = ksocknal_lib_get_conn_addrs (conn);
1088         if (rc != 0)
1089                 goto failed_1;
1090
1091         /* Find out/confirm peer_ni's NID and connection type and get the
1092          * vector of interfaces she's willing to let me connect to.
1093          * Passive connections use the listener timeout since the peer_ni sends
1094          * eagerly */
1095
1096         if (active) {
1097                 peer_ni = route->ksnr_peer;
1098                 LASSERT(ni == peer_ni->ksnp_ni);
1099
1100                 /* Active connection sends HELLO eagerly */
1101                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1102                 peerid = peer_ni->ksnp_id;
1103
1104                 write_lock_bh(global_lock);
1105                 conn->ksnc_proto = peer_ni->ksnp_proto;
1106                 write_unlock_bh(global_lock);
1107
1108                 if (conn->ksnc_proto == NULL) {
1109                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1110 #if SOCKNAL_VERSION_DEBUG
1111                          if (*ksocknal_tunables.ksnd_protocol == 2)
1112                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1113                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1114                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1115 #endif
1116                 }
1117
1118                 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1119                 if (rc != 0)
1120                         goto failed_1;
1121         } else {
1122                 peerid.nid = LNET_NID_ANY;
1123                 peerid.pid = LNET_PID_ANY;
1124
1125                 /* Passive, get protocol from peer_ni */
1126                 conn->ksnc_proto = NULL;
1127         }
1128
1129         rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1130         if (rc < 0)
1131                 goto failed_1;
1132
1133         LASSERT (rc == 0 || active);
1134         LASSERT (conn->ksnc_proto != NULL);
1135         LASSERT (peerid.nid != LNET_NID_ANY);
1136
1137         cpt = lnet_cpt_of_nid(peerid.nid, ni);
1138
1139         if (active) {
1140                 ksocknal_peer_addref(peer_ni);
1141                 write_lock_bh(global_lock);
1142         } else {
1143                 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1144                 if (rc != 0)
1145                         goto failed_1;
1146
1147                 write_lock_bh(global_lock);
1148
1149                 /* called with a ref on ni, so shutdown can't have started */
1150                 LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
1151
1152                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1153                 if (peer2 == NULL) {
1154                         /* NB this puts an "empty" peer_ni in the peer_ni
1155                          * table (which takes my ref) */
1156                         list_add_tail(&peer_ni->ksnp_list,
1157                                       ksocknal_nid2peerlist(peerid.nid));
1158                 } else {
1159                         ksocknal_peer_decref(peer_ni);
1160                         peer_ni = peer2;
1161                 }
1162
1163                 /* +1 ref for me */
1164                 ksocknal_peer_addref(peer_ni);
1165                 peer_ni->ksnp_accepting++;
1166
1167                 /* Am I already connecting to this guy?  Resolve in
1168                  * favour of higher NID... */
1169                 if (peerid.nid < ni->ni_nid &&
1170                     ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1171                         rc = EALREADY;
1172                         warn = "connection race resolution";
1173                         goto failed_2;
1174                 }
1175         }
1176
1177         if (peer_ni->ksnp_closing ||
1178             (active && route->ksnr_deleted)) {
1179                 /* peer_ni/route got closed under me */
1180                 rc = -ESTALE;
1181                 warn = "peer_ni/route removed";
1182                 goto failed_2;
1183         }
1184
1185         if (peer_ni->ksnp_proto == NULL) {
1186                 /* Never connected before.
1187                  * NB recv_hello may have returned EPROTO to signal my peer_ni
1188                  * wants a different protocol than the one I asked for.
1189                  */
1190                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1191
1192                 peer_ni->ksnp_proto = conn->ksnc_proto;
1193                 peer_ni->ksnp_incarnation = incarnation;
1194         }
1195
1196         if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1197             peer_ni->ksnp_incarnation != incarnation) {
1198                 /* peer_ni rebooted or I've got the wrong protocol version */
1199                 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1200
1201                 peer_ni->ksnp_proto = NULL;
1202                 rc = ESTALE;
1203                 warn = peer_ni->ksnp_incarnation != incarnation ?
1204                        "peer_ni rebooted" :
1205                        "wrong proto version";
1206                 goto failed_2;
1207         }
1208
1209         switch (rc) {
1210         default:
1211                 LBUG();
1212         case 0:
1213                 break;
1214         case EALREADY:
1215                 warn = "lost conn race";
1216                 goto failed_2;
1217         case EPROTO:
1218                 warn = "retry with different protocol version";
1219                 goto failed_2;
1220         }
1221
1222         /* Refuse to duplicate an existing connection, unless this is a
1223          * loopback connection */
1224         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1225                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1226                         conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1227
1228                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1229                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1230                             conn2->ksnc_type != conn->ksnc_type)
1231                                 continue;
1232
1233                         /* Reply on a passive connection attempt so the peer_ni
1234                          * realises we're connected. */
1235                         LASSERT (rc == 0);
1236                         if (!active)
1237                                 rc = EALREADY;
1238
1239                         warn = "duplicate";
1240                         goto failed_2;
1241                 }
1242         }
1243
1244         /* If the connection created by this route didn't bind to the IP
1245          * address the route connected to, the connection/route matching
1246          * code below probably isn't going to work. */
1247         if (active &&
1248             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1249                 CERROR("Route %s %pI4h connected to %pI4h\n",
1250                        libcfs_id2str(peer_ni->ksnp_id),
1251                        &route->ksnr_ipaddr,
1252                        &conn->ksnc_ipaddr);
1253         }
1254
1255         /* Search for a route corresponding to the new connection and
1256          * create an association.  This allows incoming connections created
1257          * by routes in my peer_ni to match my own route entries so I don't
1258          * continually create duplicate routes. */
1259         list_for_each(tmp, &peer_ni->ksnp_routes) {
1260                 route = list_entry(tmp, struct ksock_route, ksnr_list);
1261
1262                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1263                         continue;
1264
1265                 ksocknal_associate_route_conn_locked(route, conn);
1266                 break;
1267         }
1268
1269         conn->ksnc_peer = peer_ni;                 /* conn takes my ref on peer_ni */
1270         peer_ni->ksnp_last_alive = ktime_get_seconds();
1271         peer_ni->ksnp_send_keepalive = 0;
1272         peer_ni->ksnp_error = 0;
1273
1274         sched = ksocknal_choose_scheduler_locked(cpt);
1275         if (!sched) {
1276                 CERROR("no schedulers available. node is unhealthy\n");
1277                 goto failed_2;
1278         }
1279         /*
1280          * The cpt might have changed if we ended up selecting a non cpt
1281          * native scheduler. So use the scheduler's cpt instead.
1282          */
1283         cpt = sched->kss_info->ksi_cpt;
1284         sched->kss_nconns++;
1285         conn->ksnc_scheduler = sched;
1286
1287         conn->ksnc_tx_last_post = ktime_get_seconds();
1288         /* Set the deadline for the outgoing HELLO to drain */
1289         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1290         conn->ksnc_tx_deadline = ktime_get_seconds() +
1291                                  lnet_get_lnd_timeout();
1292         smp_mb();   /* order with adding to peer_ni's conn list */
1293
1294         list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1295         ksocknal_conn_addref(conn);
1296
1297         ksocknal_new_packet(conn, 0);
1298
1299         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1300
1301         /* Take packets blocking for this connection. */
1302         list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1303                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1304                     SOCKNAL_MATCH_NO)
1305                         continue;
1306
1307                 list_del(&tx->tx_list);
1308                 ksocknal_queue_tx_locked(tx, conn);
1309         }
1310
1311         write_unlock_bh(global_lock);
1312
1313         /* We've now got a new connection.  Any errors from here on are just
1314          * like "normal" comms errors and we close the connection normally.
1315          * NB (a) we still have to send the reply HELLO for passive
1316          *        connections,
1317          *    (b) normal I/O on the conn is blocked until I setup and call the
1318          *        socket callbacks.
1319          */
1320
1321         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1322                " incarnation:%lld sched[%d:%d]\n",
1323                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1324                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1325                conn->ksnc_port, incarnation, cpt,
1326                (int)(sched - &sched->kss_info->ksi_scheds[0]));
1327
1328         if (active) {
1329                 /* additional routes after interface exchange? */
1330                 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1331                                        hello->kshm_ips, hello->kshm_nips);
1332         } else {
1333                 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1334                                                        hello->kshm_nips);
1335                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1336         }
1337
1338         LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1339                                     kshm_ips[LNET_INTERFACES_NUM]));
1340
1341         /* setup the socket AFTER I've received hello (it disables
1342          * SO_LINGER).  I might call back to the acceptor who may want
1343          * to send a protocol version response and then close the
1344          * socket; this ensures the socket only tears down after the
1345          * response has been sent. */
1346         if (rc == 0)
1347                 rc = ksocknal_lib_setup_sock(sock);
1348
1349         write_lock_bh(global_lock);
1350
1351         /* NB my callbacks block while I hold ksnd_global_lock */
1352         ksocknal_lib_set_callback(sock, conn);
1353
1354         if (!active)
1355                 peer_ni->ksnp_accepting--;
1356
1357         write_unlock_bh(global_lock);
1358
1359         if (rc != 0) {
1360                 write_lock_bh(global_lock);
1361                 if (!conn->ksnc_closing) {
1362                         /* could be closed by another thread */
1363                         ksocknal_close_conn_locked(conn, rc);
1364                 }
1365                 write_unlock_bh(global_lock);
1366         } else if (ksocknal_connsock_addref(conn) == 0) {
1367                 /* Allow I/O to proceed. */
1368                 ksocknal_read_callback(conn);
1369                 ksocknal_write_callback(conn);
1370                 ksocknal_connsock_decref(conn);
1371         }
1372
1373         ksocknal_connsock_decref(conn);
1374         ksocknal_conn_decref(conn);
1375         return rc;
1376
1377 failed_2:
1378         if (!peer_ni->ksnp_closing &&
1379             list_empty(&peer_ni->ksnp_conns) &&
1380             list_empty(&peer_ni->ksnp_routes)) {
1381                 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1382                 list_del_init(&peer_ni->ksnp_tx_queue);
1383                 ksocknal_unlink_peer_locked(peer_ni);
1384         }
1385
1386         write_unlock_bh(global_lock);
1387
1388         if (warn != NULL) {
1389                 if (rc < 0)
1390                         CERROR("Not creating conn %s type %d: %s\n",
1391                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1392                 else
1393                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1394                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1395         }
1396
1397         if (!active) {
1398                 if (rc > 0) {
1399                         /* Request retry by replying with CONN_NONE
1400                          * ksnc_proto has been set already */
1401                         conn->ksnc_type = SOCKLND_CONN_NONE;
1402                         hello->kshm_nips = 0;
1403                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1404                 }
1405
1406                 write_lock_bh(global_lock);
1407                 peer_ni->ksnp_accepting--;
1408                 write_unlock_bh(global_lock);
1409         }
1410
1411         /*
1412          * If we get here without an error code, just use -EALREADY.
1413          * Depending on how we got here, the error may be positive
1414          * or negative. Normalize the value for ksocknal_txlist_done().
1415          */
1416         rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1417         ksocknal_txlist_done(ni, &zombies, rc2);
1418         ksocknal_peer_decref(peer_ni);
1419
1420 failed_1:
1421         if (hello != NULL)
1422                 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1423                                             kshm_ips[LNET_INTERFACES_NUM]));
1424
1425         LIBCFS_FREE(conn, sizeof(*conn));
1426
1427 failed_0:
1428         sock_release(sock);
1429         return rc;
1430 }
1431
1432 void
1433 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1434 {
1435         /* This just does the immmediate housekeeping, and queues the
1436          * connection for the reaper to terminate.
1437          * Caller holds ksnd_global_lock exclusively in irq context */
1438         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1439         struct ksock_route *route;
1440         struct ksock_conn *conn2;
1441         struct list_head *tmp;
1442
1443         LASSERT(peer_ni->ksnp_error == 0);
1444         LASSERT(!conn->ksnc_closing);
1445         conn->ksnc_closing = 1;
1446
1447         /* ksnd_deathrow_conns takes over peer_ni's ref */
1448         list_del(&conn->ksnc_list);
1449
1450         route = conn->ksnc_route;
1451         if (route != NULL) {
1452                 /* dissociate conn from route... */
1453                 LASSERT(!route->ksnr_deleted);
1454                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1455
1456                 conn2 = NULL;
1457                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1458                         conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1459
1460                         if (conn2->ksnc_route == route &&
1461                             conn2->ksnc_type == conn->ksnc_type)
1462                                 break;
1463
1464                         conn2 = NULL;
1465                 }
1466                 if (conn2 == NULL)
1467                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1468
1469                 conn->ksnc_route = NULL;
1470
1471                 ksocknal_route_decref(route);   /* drop conn's ref on route */
1472         }
1473
1474         if (list_empty(&peer_ni->ksnp_conns)) {
1475                 /* No more connections to this peer_ni */
1476
1477                 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1478                                 struct ksock_tx *tx;
1479
1480                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1481
1482                         /* throw them to the last connection...,
1483                          * these TXs will be send to /dev/null by scheduler */
1484                         list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1485                                             tx_list)
1486                                 ksocknal_tx_prep(conn, tx);
1487
1488                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1489                         list_splice_init(&peer_ni->ksnp_tx_queue,
1490                                          &conn->ksnc_tx_queue);
1491                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1492                 }
1493
1494                 /* renegotiate protocol version */
1495                 peer_ni->ksnp_proto = NULL;
1496                 /* stash last conn close reason */
1497                 peer_ni->ksnp_error = error;
1498
1499                 if (list_empty(&peer_ni->ksnp_routes)) {
1500                         /* I've just closed last conn belonging to a
1501                          * peer_ni with no routes to it */
1502                         ksocknal_unlink_peer_locked(peer_ni);
1503                 }
1504         }
1505
1506         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1507
1508         list_add_tail(&conn->ksnc_list,
1509                       &ksocknal_data.ksnd_deathrow_conns);
1510         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1511
1512         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1513 }
1514
1515 void
1516 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1517 {
1518         int notify = 0;
1519         time64_t last_alive = 0;
1520
1521         /* There has been a connection failure or comms error; but I'll only
1522          * tell LNET I think the peer_ni is dead if it's to another kernel and
1523          * there are no connections or connection attempts in existence. */
1524
1525         read_lock(&ksocknal_data.ksnd_global_lock);
1526
1527         if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1528              list_empty(&peer_ni->ksnp_conns) &&
1529              peer_ni->ksnp_accepting == 0 &&
1530              ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1531                 notify = 1;
1532                 last_alive = peer_ni->ksnp_last_alive;
1533         }
1534
1535         read_unlock(&ksocknal_data.ksnd_global_lock);
1536
1537         if (notify)
1538                 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
1539                             last_alive);
1540 }
1541
1542 void
1543 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1544 {
1545         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1546         struct ksock_tx *tx;
1547         struct ksock_tx *tmp;
1548         struct list_head zlist = LIST_HEAD_INIT(zlist);
1549
1550         /* NB safe to finalize TXs because closing of socket will
1551          * abort all buffered data */
1552         LASSERT(conn->ksnc_sock == NULL);
1553
1554         spin_lock(&peer_ni->ksnp_lock);
1555
1556         list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1557                 if (tx->tx_conn != conn)
1558                         continue;
1559
1560                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1561
1562                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1563                 tx->tx_zc_aborted = 1;  /* mark it as not-acked */
1564                 list_del(&tx->tx_zc_list);
1565                 list_add(&tx->tx_zc_list, &zlist);
1566         }
1567
1568         spin_unlock(&peer_ni->ksnp_lock);
1569
1570         while (!list_empty(&zlist)) {
1571                 tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
1572
1573                 list_del(&tx->tx_zc_list);
1574                 ksocknal_tx_decref(tx);
1575         }
1576 }
1577
1578 void
1579 ksocknal_terminate_conn(struct ksock_conn *conn)
1580 {
1581         /* This gets called by the reaper (guaranteed thread context) to
1582          * disengage the socket from its callbacks and close it.
1583          * ksnc_refcount will eventually hit zero, and then the reaper will
1584          * destroy it. */
1585         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1586         struct ksock_sched *sched = conn->ksnc_scheduler;
1587         int failed = 0;
1588
1589         LASSERT(conn->ksnc_closing);
1590
1591         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1592         spin_lock_bh(&sched->kss_lock);
1593
1594         /* a closing conn is always ready to tx */
1595         conn->ksnc_tx_ready = 1;
1596
1597         if (!conn->ksnc_tx_scheduled &&
1598             !list_empty(&conn->ksnc_tx_queue)) {
1599                 list_add_tail(&conn->ksnc_tx_list,
1600                                &sched->kss_tx_conns);
1601                 conn->ksnc_tx_scheduled = 1;
1602                 /* extra ref for scheduler */
1603                 ksocknal_conn_addref(conn);
1604
1605                 wake_up (&sched->kss_waitq);
1606         }
1607
1608         spin_unlock_bh(&sched->kss_lock);
1609
1610         /* serialise with callbacks */
1611         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1612
1613         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1614
1615         /* OK, so this conn may not be completely disengaged from its
1616          * scheduler yet, but it _has_ committed to terminate... */
1617         conn->ksnc_scheduler->kss_nconns--;
1618
1619         if (peer_ni->ksnp_error != 0) {
1620                 /* peer_ni's last conn closed in error */
1621                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1622                 failed = 1;
1623                 peer_ni->ksnp_error = 0;     /* avoid multiple notifications */
1624         }
1625
1626         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1627
1628         if (failed)
1629                 ksocknal_peer_failed(peer_ni);
1630
1631         /* The socket is closed on the final put; either here, or in
1632          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1633          * when the connection was established, this will close the socket
1634          * immediately, aborting anything buffered in it. Any hung
1635          * zero-copy transmits will therefore complete in finite time. */
1636         ksocknal_connsock_decref(conn);
1637 }
1638
1639 void
1640 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1641 {
1642         /* Queue the conn for the reaper to destroy */
1643         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1644         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1645
1646         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1647         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1648
1649         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1650 }
1651
1652 void
1653 ksocknal_destroy_conn(struct ksock_conn *conn)
1654 {
1655         time64_t last_rcv;
1656
1657         /* Final coup-de-grace of the reaper */
1658         CDEBUG (D_NET, "connection %p\n", conn);
1659
1660         LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1661         LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1662         LASSERT (conn->ksnc_sock == NULL);
1663         LASSERT (conn->ksnc_route == NULL);
1664         LASSERT (!conn->ksnc_tx_scheduled);
1665         LASSERT (!conn->ksnc_rx_scheduled);
1666         LASSERT(list_empty(&conn->ksnc_tx_queue));
1667
1668         /* complete current receive if any */
1669         switch (conn->ksnc_rx_state) {
1670         case SOCKNAL_RX_LNET_PAYLOAD:
1671                 last_rcv = conn->ksnc_rx_deadline -
1672                            lnet_get_lnd_timeout();
1673                 CERROR("Completing partial receive from %s[%d], "
1674                        "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1675                        "last alive is %lld secs ago\n",
1676                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1677                        &conn->ksnc_ipaddr, conn->ksnc_port,
1678                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1679                        ktime_get_seconds() - last_rcv);
1680                 if (conn->ksnc_lnet_msg)
1681                         conn->ksnc_lnet_msg->msg_health_status =
1682                                 LNET_MSG_STATUS_REMOTE_ERROR;
1683                 lnet_finalize(conn->ksnc_lnet_msg, -EIO);
1684                 break;
1685         case SOCKNAL_RX_LNET_HEADER:
1686                 if (conn->ksnc_rx_started)
1687                         CERROR("Incomplete receive of lnet header from %s, "
1688                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1689                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1690                                &conn->ksnc_ipaddr, conn->ksnc_port,
1691                                conn->ksnc_proto->pro_version);
1692                 break;
1693         case SOCKNAL_RX_KSM_HEADER:
1694                 if (conn->ksnc_rx_started)
1695                         CERROR("Incomplete receive of ksock message from %s, "
1696                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1697                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1698                                &conn->ksnc_ipaddr, conn->ksnc_port,
1699                                conn->ksnc_proto->pro_version);
1700                 break;
1701         case SOCKNAL_RX_SLOP:
1702                 if (conn->ksnc_rx_started)
1703                         CERROR("Incomplete receive of slops from %s, "
1704                                "ip %pI4h:%d, with error\n",
1705                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1706                                &conn->ksnc_ipaddr, conn->ksnc_port);
1707                break;
1708         default:
1709                 LBUG ();
1710                 break;
1711         }
1712
1713         ksocknal_peer_decref(conn->ksnc_peer);
1714
1715         LIBCFS_FREE (conn, sizeof (*conn));
1716 }
1717
1718 int
1719 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
1720 {
1721         struct ksock_conn *conn;
1722         struct list_head *ctmp;
1723         struct list_head *cnxt;
1724         int count = 0;
1725
1726         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1727                 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
1728
1729                 if (ipaddr == 0 ||
1730                     conn->ksnc_ipaddr == ipaddr) {
1731                         count++;
1732                         ksocknal_close_conn_locked (conn, why);
1733                 }
1734         }
1735
1736         return (count);
1737 }
1738
1739 int
1740 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1741 {
1742         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1743         u32 ipaddr = conn->ksnc_ipaddr;
1744         int count;
1745
1746         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1747
1748         count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1749
1750         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1751
1752         return (count);
1753 }
1754
1755 int
1756 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1757 {
1758         struct ksock_peer_ni *peer_ni;
1759         struct list_head *ptmp;
1760         struct list_head *pnxt;
1761         int lo;
1762         int hi;
1763         int i;
1764         int count = 0;
1765
1766         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1767
1768         if (id.nid != LNET_NID_ANY)
1769                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1770         else {
1771                 lo = 0;
1772                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1773         }
1774
1775         for (i = lo; i <= hi; i++) {
1776                 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1777
1778                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
1779
1780                         if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1781                               (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1782                                 continue;
1783
1784                         count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1785                 }
1786         }
1787
1788         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1789
1790         /* wildcards always succeed */
1791         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1792                 return (0);
1793
1794         return (count == 0 ? -ENOENT : 0);
1795 }
1796
1797 void
1798 ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
1799 {
1800         /* The router is telling me she's been notified of a change in
1801          * gateway state....
1802          */
1803         struct lnet_process_id id = {
1804                 .nid    = gw_nid,
1805                 .pid    = LNET_PID_ANY,
1806         };
1807
1808         CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1809                 alive ? "up" : "down");
1810
1811         if (!alive) {
1812                 /* If the gateway crashed, close all open connections... */
1813                 ksocknal_close_matching_conns (id, 0);
1814                 return;
1815         }
1816
1817         /* ...otherwise do nothing.  We can only establish new connections
1818          * if we have autroutes, and these connect on demand. */
1819 }
1820
1821 void
1822 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
1823 {
1824         int connect = 1;
1825         time64_t last_alive = 0;
1826         time64_t now = ktime_get_seconds();
1827         struct ksock_peer_ni *peer_ni = NULL;
1828         rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1829         struct lnet_process_id id = {
1830                 .nid = nid,
1831                 .pid = LNET_PID_LUSTRE,
1832         };
1833
1834         read_lock(glock);
1835
1836         peer_ni = ksocknal_find_peer_locked(ni, id);
1837         if (peer_ni != NULL) {
1838                 struct list_head *tmp;
1839                 struct ksock_conn *conn;
1840                 int bufnob;
1841
1842                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1843                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
1844                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1845
1846                         if (bufnob < conn->ksnc_tx_bufnob) {
1847                                 /* something got ACKed */
1848                                 conn->ksnc_tx_deadline = ktime_get_seconds() +
1849                                                          lnet_get_lnd_timeout();
1850                                 peer_ni->ksnp_last_alive = now;
1851                                 conn->ksnc_tx_bufnob = bufnob;
1852                         }
1853                 }
1854
1855                 last_alive = peer_ni->ksnp_last_alive;
1856                 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1857                         connect = 0;
1858         }
1859
1860         read_unlock(glock);
1861
1862         if (last_alive != 0)
1863                 *when = last_alive;
1864
1865         CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
1866                libcfs_nid2str(nid), peer_ni,
1867                last_alive ? now - last_alive : -1,
1868                connect);
1869
1870         if (!connect)
1871                 return;
1872
1873         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1874
1875         write_lock_bh(glock);
1876
1877         peer_ni = ksocknal_find_peer_locked(ni, id);
1878         if (peer_ni != NULL)
1879                 ksocknal_launch_all_connections_locked(peer_ni);
1880
1881         write_unlock_bh(glock);
1882         return;
1883 }
1884
1885 static void
1886 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1887 {
1888         int index;
1889         int i;
1890         struct list_head *tmp;
1891         struct ksock_conn *conn;
1892
1893         for (index = 0; ; index++) {
1894                 read_lock(&ksocknal_data.ksnd_global_lock);
1895
1896                 i = 0;
1897                 conn = NULL;
1898
1899                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1900                         if (i++ == index) {
1901                                 conn = list_entry(tmp, struct ksock_conn,
1902                                                   ksnc_list);
1903                                 ksocknal_conn_addref(conn);
1904                                 break;
1905                         }
1906                 }
1907
1908                 read_unlock(&ksocknal_data.ksnd_global_lock);
1909
1910                 if (conn == NULL)
1911                         break;
1912
1913                 ksocknal_lib_push_conn (conn);
1914                 ksocknal_conn_decref(conn);
1915         }
1916 }
1917
1918 static int
1919 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1920 {
1921         struct list_head *start;
1922         struct list_head *end;
1923         struct list_head *tmp;
1924         int               rc = -ENOENT;
1925         unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
1926
1927         if (id.nid == LNET_NID_ANY) {
1928                 start = &ksocknal_data.ksnd_peers[0];
1929                 end = &ksocknal_data.ksnd_peers[hsize - 1];
1930         } else {
1931                 start = end = ksocknal_nid2peerlist(id.nid);
1932         }
1933
1934         for (tmp = start; tmp <= end; tmp++) {
1935                 int     peer_off; /* searching offset in peer_ni hash table */
1936
1937                 for (peer_off = 0; ; peer_off++) {
1938                         struct ksock_peer_ni *peer_ni;
1939                         int           i = 0;
1940
1941                         read_lock(&ksocknal_data.ksnd_global_lock);
1942                         list_for_each_entry(peer_ni, tmp, ksnp_list) {
1943                                 if (!((id.nid == LNET_NID_ANY ||
1944                                        id.nid == peer_ni->ksnp_id.nid) &&
1945                                       (id.pid == LNET_PID_ANY ||
1946                                        id.pid == peer_ni->ksnp_id.pid)))
1947                                         continue;
1948
1949                                 if (i++ == peer_off) {
1950                                         ksocknal_peer_addref(peer_ni);
1951                                         break;
1952                                 }
1953                         }
1954                         read_unlock(&ksocknal_data.ksnd_global_lock);
1955
1956                         if (i == 0) /* no match */
1957                                 break;
1958
1959                         rc = 0;
1960                         ksocknal_push_peer(peer_ni);
1961                         ksocknal_peer_decref(peer_ni);
1962                 }
1963         }
1964         return rc;
1965 }
1966
1967 static int
1968 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1969 {
1970         struct ksock_net *net = ni->ni_data;
1971         struct ksock_interface *iface;
1972         int rc;
1973         int i;
1974         int j;
1975         struct list_head *ptmp;
1976         struct ksock_peer_ni *peer_ni;
1977         struct list_head *rtmp;
1978         struct ksock_route *route;
1979
1980         if (ipaddress == 0 ||
1981             netmask == 0)
1982                 return -EINVAL;
1983
1984         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1985
1986         iface = ksocknal_ip2iface(ni, ipaddress);
1987         if (iface != NULL) {
1988                 /* silently ignore dups */
1989                 rc = 0;
1990         } else if (net->ksnn_ninterfaces == LNET_INTERFACES_NUM) {
1991                 rc = -ENOSPC;
1992         } else {
1993                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1994
1995                 iface->ksni_ipaddr = ipaddress;
1996                 iface->ksni_netmask = netmask;
1997                 iface->ksni_nroutes = 0;
1998                 iface->ksni_npeers = 0;
1999
2000                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2001                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
2002                                 peer_ni = list_entry(ptmp, struct ksock_peer_ni,
2003                                                      ksnp_list);
2004
2005                                 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
2006                                         if (peer_ni->ksnp_passive_ips[j] == ipaddress)
2007                                                 iface->ksni_npeers++;
2008
2009                                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
2010                                         route = list_entry(rtmp,
2011                                                            struct ksock_route,
2012                                                            ksnr_list);
2013
2014                                         if (route->ksnr_myipaddr == ipaddress)
2015                                                 iface->ksni_nroutes++;
2016                                 }
2017                         }
2018                 }
2019
2020                 rc = 0;
2021                 /* NB only new connections will pay attention to the new interface! */
2022         }
2023
2024         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2025
2026         return rc;
2027 }
2028
2029 static void
2030 ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
2031 {
2032         struct list_head *tmp;
2033         struct list_head *nxt;
2034         struct ksock_route *route;
2035         struct ksock_conn *conn;
2036         int i;
2037         int j;
2038
2039         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2040                 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2041                         for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2042                                 peer_ni->ksnp_passive_ips[j-1] =
2043                                         peer_ni->ksnp_passive_ips[j];
2044                         peer_ni->ksnp_n_passive_ips--;
2045                         break;
2046                 }
2047
2048         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2049                 route = list_entry(tmp, struct ksock_route, ksnr_list);
2050
2051                 if (route->ksnr_myipaddr != ipaddr)
2052                         continue;
2053
2054                 if (route->ksnr_share_count != 0) {
2055                         /* Manually created; keep, but unbind */
2056                         route->ksnr_myipaddr = 0;
2057                 } else {
2058                         ksocknal_del_route_locked(route);
2059                 }
2060         }
2061
2062         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2063                 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2064
2065                 if (conn->ksnc_myipaddr == ipaddr)
2066                         ksocknal_close_conn_locked (conn, 0);
2067         }
2068 }
2069
2070 static int
2071 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2072 {
2073         struct ksock_net *net = ni->ni_data;
2074         int rc = -ENOENT;
2075         struct list_head *tmp;
2076         struct list_head *nxt;
2077         struct ksock_peer_ni *peer_ni;
2078         u32 this_ip;
2079         int i;
2080         int j;
2081
2082         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2083
2084         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2085                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2086
2087                 if (!(ipaddress == 0 ||
2088                       ipaddress == this_ip))
2089                         continue;
2090
2091                 rc = 0;
2092
2093                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2094                         net->ksnn_interfaces[j-1] =
2095                                 net->ksnn_interfaces[j];
2096
2097                 net->ksnn_ninterfaces--;
2098
2099                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2100                         list_for_each_safe(tmp, nxt,
2101                                            &ksocknal_data.ksnd_peers[j]) {
2102                                 peer_ni = list_entry(tmp, struct ksock_peer_ni,
2103                                                      ksnp_list);
2104
2105                                 if (peer_ni->ksnp_ni != ni)
2106                                         continue;
2107
2108                                 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2109                         }
2110                 }
2111         }
2112
2113         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2114
2115         return (rc);
2116 }
2117
2118 int
2119 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2120 {
2121         struct lnet_process_id id = {0};
2122         struct libcfs_ioctl_data *data = arg;
2123         int rc;
2124
2125         switch(cmd) {
2126         case IOC_LIBCFS_GET_INTERFACE: {
2127                 struct ksock_net *net = ni->ni_data;
2128                 struct ksock_interface *iface;
2129
2130                 read_lock(&ksocknal_data.ksnd_global_lock);
2131
2132                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2133                         rc = -ENOENT;
2134                 } else {
2135                         rc = 0;
2136                         iface = &net->ksnn_interfaces[data->ioc_count];
2137
2138                         data->ioc_u32[0] = iface->ksni_ipaddr;
2139                         data->ioc_u32[1] = iface->ksni_netmask;
2140                         data->ioc_u32[2] = iface->ksni_npeers;
2141                         data->ioc_u32[3] = iface->ksni_nroutes;
2142                 }
2143
2144                 read_unlock(&ksocknal_data.ksnd_global_lock);
2145                 return rc;
2146         }
2147
2148         case IOC_LIBCFS_ADD_INTERFACE:
2149                 return ksocknal_add_interface(ni,
2150                                               data->ioc_u32[0], /* IP address */
2151                                               data->ioc_u32[1]); /* net mask */
2152
2153         case IOC_LIBCFS_DEL_INTERFACE:
2154                 return ksocknal_del_interface(ni,
2155                                               data->ioc_u32[0]); /* IP address */
2156
2157         case IOC_LIBCFS_GET_PEER: {
2158                 __u32            myip = 0;
2159                 __u32            ip = 0;
2160                 int              port = 0;
2161                 int              conn_count = 0;
2162                 int              share_count = 0;
2163
2164                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2165                                             &id, &myip, &ip, &port,
2166                                             &conn_count,  &share_count);
2167                 if (rc != 0)
2168                         return rc;
2169
2170                 data->ioc_nid    = id.nid;
2171                 data->ioc_count  = share_count;
2172                 data->ioc_u32[0] = ip;
2173                 data->ioc_u32[1] = port;
2174                 data->ioc_u32[2] = myip;
2175                 data->ioc_u32[3] = conn_count;
2176                 data->ioc_u32[4] = id.pid;
2177                 return 0;
2178         }
2179
2180         case IOC_LIBCFS_ADD_PEER:
2181                 id.nid = data->ioc_nid;
2182                 id.pid = LNET_PID_LUSTRE;
2183                 return ksocknal_add_peer (ni, id,
2184                                           data->ioc_u32[0], /* IP */
2185                                           data->ioc_u32[1]); /* port */
2186
2187         case IOC_LIBCFS_DEL_PEER:
2188                 id.nid = data->ioc_nid;
2189                 id.pid = LNET_PID_ANY;
2190                 return ksocknal_del_peer (ni, id,
2191                                           data->ioc_u32[0]); /* IP */
2192
2193         case IOC_LIBCFS_GET_CONN: {
2194                 int           txmem;
2195                 int           rxmem;
2196                 int           nagle;
2197                 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2198
2199                 if (conn == NULL)
2200                         return -ENOENT;
2201
2202                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2203
2204                 data->ioc_count  = txmem;
2205                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2206                 data->ioc_flags  = nagle;
2207                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2208                 data->ioc_u32[1] = conn->ksnc_port;
2209                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2210                 data->ioc_u32[3] = conn->ksnc_type;
2211                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2212                 data->ioc_u32[5] = rxmem;
2213                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2214                 ksocknal_conn_decref(conn);
2215                 return 0;
2216         }
2217
2218         case IOC_LIBCFS_CLOSE_CONNECTION:
2219                 id.nid = data->ioc_nid;
2220                 id.pid = LNET_PID_ANY;
2221                 return ksocknal_close_matching_conns (id,
2222                                                       data->ioc_u32[0]);
2223
2224         case IOC_LIBCFS_REGISTER_MYNID:
2225                 /* Ignore if this is a noop */
2226                 if (data->ioc_nid == ni->ni_nid)
2227                         return 0;
2228
2229                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2230                        libcfs_nid2str(data->ioc_nid),
2231                        libcfs_nid2str(ni->ni_nid));
2232                 return -EINVAL;
2233
2234         case IOC_LIBCFS_PUSH_CONNECTION:
2235                 id.nid = data->ioc_nid;
2236                 id.pid = LNET_PID_ANY;
2237                 return ksocknal_push(ni, id);
2238
2239         default:
2240                 return -EINVAL;
2241         }
2242         /* not reached */
2243 }
2244
2245 static void
2246 ksocknal_free_buffers (void)
2247 {
2248         LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2249
2250         if (ksocknal_data.ksnd_sched_info != NULL) {
2251                 struct ksock_sched_info *info;
2252                 int                     i;
2253
2254                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2255                         if (info->ksi_scheds != NULL) {
2256                                 LIBCFS_FREE(info->ksi_scheds,
2257                                             info->ksi_nthreads_max *
2258                                             sizeof(info->ksi_scheds[0]));
2259                         }
2260                 }
2261                 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2262         }
2263
2264         LIBCFS_FREE (ksocknal_data.ksnd_peers,
2265                      sizeof(struct list_head) *
2266                      ksocknal_data.ksnd_peer_hash_size);
2267
2268         spin_lock(&ksocknal_data.ksnd_tx_lock);
2269
2270         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2271                 struct list_head zlist;
2272                 struct ksock_tx *tx;
2273
2274                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2275                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2276                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2277
2278                 while (!list_empty(&zlist)) {
2279                         tx = list_entry(zlist.next, struct ksock_tx, tx_list);
2280                         list_del(&tx->tx_list);
2281                         LIBCFS_FREE(tx, tx->tx_desc_size);
2282                 }
2283         } else {
2284                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2285         }
2286 }
2287
2288 static void
2289 ksocknal_base_shutdown(void)
2290 {
2291         struct ksock_sched_info *info;
2292         struct ksock_sched *sched;
2293         int i;
2294         int j;
2295
2296         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2297                atomic_read (&libcfs_kmemory));
2298         LASSERT (ksocknal_data.ksnd_nnets == 0);
2299
2300         switch (ksocknal_data.ksnd_init) {
2301         default:
2302                 LASSERT (0);
2303
2304         case SOCKNAL_INIT_ALL:
2305         case SOCKNAL_INIT_DATA:
2306                 LASSERT (ksocknal_data.ksnd_peers != NULL);
2307                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2308                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2309                 }
2310
2311                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2312                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2313                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2314                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2315                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2316
2317                 if (ksocknal_data.ksnd_sched_info != NULL) {
2318                         cfs_percpt_for_each(info, i,
2319                                             ksocknal_data.ksnd_sched_info) {
2320                                 if (info->ksi_scheds == NULL)
2321                                         continue;
2322
2323                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2324
2325                                         sched = &info->ksi_scheds[j];
2326                                         LASSERT(list_empty(&sched->\
2327                                                                kss_tx_conns));
2328                                         LASSERT(list_empty(&sched->\
2329                                                                kss_rx_conns));
2330                                         LASSERT(list_empty(&sched-> \
2331                                                   kss_zombie_noop_txs));
2332                                         LASSERT(sched->kss_nconns == 0);
2333                                 }
2334                         }
2335                 }
2336
2337                 /* flag threads to terminate; wake and wait for them to die */
2338                 ksocknal_data.ksnd_shuttingdown = 1;
2339                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2340                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2341
2342                 if (ksocknal_data.ksnd_sched_info != NULL) {
2343                         cfs_percpt_for_each(info, i,
2344                                             ksocknal_data.ksnd_sched_info) {
2345                                 if (info->ksi_scheds == NULL)
2346                                         continue;
2347
2348                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2349                                         sched = &info->ksi_scheds[j];
2350                                         wake_up_all(&sched->kss_waitq);
2351                                 }
2352                         }
2353                 }
2354
2355                 i = 4;
2356                 read_lock(&ksocknal_data.ksnd_global_lock);
2357                 while (ksocknal_data.ksnd_nthreads != 0) {
2358                         i++;
2359                         /* power of 2? */
2360                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2361                                 "waiting for %d threads to terminate\n",
2362                                 ksocknal_data.ksnd_nthreads);
2363                         read_unlock(&ksocknal_data.ksnd_global_lock);
2364                         set_current_state(TASK_UNINTERRUPTIBLE);
2365                         schedule_timeout(cfs_time_seconds(1));
2366                         read_lock(&ksocknal_data.ksnd_global_lock);
2367                 }
2368                 read_unlock(&ksocknal_data.ksnd_global_lock);
2369
2370                 ksocknal_free_buffers();
2371
2372                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2373                 break;
2374         }
2375
2376         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2377                atomic_read (&libcfs_kmemory));
2378
2379         module_put(THIS_MODULE);
2380 }
2381
2382 static int
2383 ksocknal_base_startup(void)
2384 {
2385         struct ksock_sched_info *info;
2386         int                     rc;
2387         int                     i;
2388
2389         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2390         LASSERT (ksocknal_data.ksnd_nnets == 0);
2391
2392         memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2393
2394         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2395         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2396                      sizeof(struct list_head) *
2397                      ksocknal_data.ksnd_peer_hash_size);
2398         if (ksocknal_data.ksnd_peers == NULL)
2399                 return -ENOMEM;
2400
2401         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2402                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2403
2404         rwlock_init(&ksocknal_data.ksnd_global_lock);
2405         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2406
2407         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2408         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2409         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2410         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2411         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2412
2413         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2414         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2415         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2416         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2417
2418         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2419         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2420
2421         /* NB memset above zeros whole of ksocknal_data */
2422
2423         /* flag lists/ptrs/locks initialised */
2424         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2425         try_module_get(THIS_MODULE);
2426
2427         ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2428                                                          sizeof(*info));
2429         if (ksocknal_data.ksnd_sched_info == NULL)
2430                 goto failed;
2431
2432         cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2433                 struct ksock_sched *sched;
2434                 int nthrs;
2435
2436                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2437                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2438                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2439                 } else {
2440                         /* max to half of CPUs, assume another half should be
2441                          * reserved for upper layer modules */
2442                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2443                 }
2444
2445                 info->ksi_nthreads_max = nthrs;
2446                 info->ksi_cpt = i;
2447
2448                 if (nthrs != 0) {
2449                         LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2450                                          info->ksi_nthreads_max *
2451                                                 sizeof(*sched));
2452                         if (info->ksi_scheds == NULL)
2453                                 goto failed;
2454
2455                         for (; nthrs > 0; nthrs--) {
2456                                 sched = &info->ksi_scheds[nthrs - 1];
2457
2458                                 sched->kss_info = info;
2459                                 spin_lock_init(&sched->kss_lock);
2460                                 INIT_LIST_HEAD(&sched->kss_rx_conns);
2461                                 INIT_LIST_HEAD(&sched->kss_tx_conns);
2462                                 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2463                                 init_waitqueue_head(&sched->kss_waitq);
2464                         }
2465                 }
2466         }
2467
2468         ksocknal_data.ksnd_connd_starting         = 0;
2469         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2470         ksocknal_data.ksnd_connd_starting_stamp   = ktime_get_real_seconds();
2471         /* must have at least 2 connds to remain responsive to accepts while
2472          * connecting */
2473         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2474                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2475
2476         if (*ksocknal_tunables.ksnd_nconnds_max <
2477             *ksocknal_tunables.ksnd_nconnds) {
2478                 ksocknal_tunables.ksnd_nconnds_max =
2479                         ksocknal_tunables.ksnd_nconnds;
2480         }
2481
2482         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2483                 char name[16];
2484                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2485                 ksocknal_data.ksnd_connd_starting++;
2486                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2487
2488
2489                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2490                 rc = ksocknal_thread_start(ksocknal_connd,
2491                                            (void *)((uintptr_t)i), name);
2492                 if (rc != 0) {
2493                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2494                         ksocknal_data.ksnd_connd_starting--;
2495                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2496                         CERROR("Can't spawn socknal connd: %d\n", rc);
2497                         goto failed;
2498                 }
2499         }
2500
2501         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2502         if (rc != 0) {
2503                 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2504                 goto failed;
2505         }
2506
2507         /* flag everything initialised */
2508         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2509
2510         return 0;
2511
2512  failed:
2513         ksocknal_base_shutdown();
2514         return -ENETDOWN;
2515 }
2516
2517 static void
2518 ksocknal_debug_peerhash(struct lnet_ni *ni)
2519 {
2520         struct ksock_peer_ni *peer_ni = NULL;
2521         struct list_head *tmp;
2522         int i;
2523
2524         read_lock(&ksocknal_data.ksnd_global_lock);
2525
2526         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2527                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2528                         peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
2529
2530                         if (peer_ni->ksnp_ni == ni) break;
2531
2532                         peer_ni = NULL;
2533                 }
2534         }
2535
2536         if (peer_ni != NULL) {
2537                 struct ksock_route *route;
2538                 struct ksock_conn  *conn;
2539
2540                 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2541                        "closing %d, accepting %d, err %d, zcookie %llu, "
2542                        "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2543                        atomic_read(&peer_ni->ksnp_refcount),
2544                        peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2545                        peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2546                        peer_ni->ksnp_zc_next_cookie,
2547                        !list_empty(&peer_ni->ksnp_tx_queue),
2548                        !list_empty(&peer_ni->ksnp_zc_req_list));
2549
2550                 list_for_each(tmp, &peer_ni->ksnp_routes) {
2551                         route = list_entry(tmp, struct ksock_route, ksnr_list);
2552                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2553                                "del %d\n", atomic_read(&route->ksnr_refcount),
2554                                route->ksnr_scheduled, route->ksnr_connecting,
2555                                route->ksnr_connected, route->ksnr_deleted);
2556                 }
2557
2558                 list_for_each(tmp, &peer_ni->ksnp_conns) {
2559                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2560                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2561                                atomic_read(&conn->ksnc_conn_refcount),
2562                                atomic_read(&conn->ksnc_sock_refcount),
2563                                conn->ksnc_type, conn->ksnc_closing);
2564                 }
2565         }
2566
2567         read_unlock(&ksocknal_data.ksnd_global_lock);
2568         return;
2569 }
2570
2571 void
2572 ksocknal_shutdown(struct lnet_ni *ni)
2573 {
2574         struct ksock_net *net = ni->ni_data;
2575         struct lnet_process_id anyid = {
2576                 .nid = LNET_NID_ANY,
2577                 .pid = LNET_PID_ANY,
2578         };
2579         int i;
2580
2581         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2582         LASSERT(ksocknal_data.ksnd_nnets > 0);
2583
2584         spin_lock_bh(&net->ksnn_lock);
2585         net->ksnn_shutdown = 1;                 /* prevent new peers */
2586         spin_unlock_bh(&net->ksnn_lock);
2587
2588         /* Delete all peers */
2589         ksocknal_del_peer(ni, anyid, 0);
2590
2591         /* Wait for all peer_ni state to clean up */
2592         i = 2;
2593         spin_lock_bh(&net->ksnn_lock);
2594         while (net->ksnn_npeers != 0) {
2595                 spin_unlock_bh(&net->ksnn_lock);
2596
2597                 i++;
2598                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2599                        "waiting for %d peers to disconnect\n",
2600                        net->ksnn_npeers);
2601                 set_current_state(TASK_UNINTERRUPTIBLE);
2602                 schedule_timeout(cfs_time_seconds(1));
2603
2604                 ksocknal_debug_peerhash(ni);
2605
2606                 spin_lock_bh(&net->ksnn_lock);
2607         }
2608         spin_unlock_bh(&net->ksnn_lock);
2609
2610         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2611                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2612                 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2613         }
2614
2615         list_del(&net->ksnn_list);
2616         LIBCFS_FREE(net, sizeof(*net));
2617
2618         ksocknal_data.ksnd_nnets--;
2619         if (ksocknal_data.ksnd_nnets == 0)
2620                 ksocknal_base_shutdown();
2621 }
2622
2623 static int
2624 ksocknal_enumerate_interfaces(struct ksock_net *net)
2625 {
2626         char **names;
2627         int i;
2628         int j;
2629         int rc;
2630         int n;
2631
2632         n = lnet_ipif_enumerate(&names);
2633         if (n <= 0) {
2634                 CERROR("Can't enumerate interfaces: %d\n", n);
2635                 return n;
2636         }
2637
2638         for (i = j = 0; i < n; i++) {
2639                 int        up;
2640                 __u32      ip;
2641                 __u32      mask;
2642
2643                 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2644                         continue;
2645
2646                 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2647                 if (rc != 0) {
2648                         CWARN("Can't get interface %s info: %d\n",
2649                               names[i], rc);
2650                         continue;
2651                 }
2652
2653                 if (!up) {
2654                         CWARN("Ignoring interface %s (down)\n",
2655                               names[i]);
2656                         continue;
2657                 }
2658
2659                 if (j == LNET_INTERFACES_NUM) {
2660                         CWARN("Ignoring interface %s (too many interfaces)\n",
2661                               names[i]);
2662                         continue;
2663                 }
2664
2665                 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2666                 net->ksnn_interfaces[j].ksni_netmask = mask;
2667                 strlcpy(net->ksnn_interfaces[j].ksni_name,
2668                         names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2669                 j++;
2670         }
2671
2672         lnet_ipif_free_enumeration(names, n);
2673
2674         if (j == 0)
2675                 CERROR("Can't find any usable interfaces\n");
2676
2677         return j;
2678 }
2679
2680 static int
2681 ksocknal_search_new_ipif(struct ksock_net *net)
2682 {
2683         int new_ipif = 0;
2684         int i;
2685
2686         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2687                 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2688                 char *colon = strchr(ifnam, ':');
2689                 int found  = 0;
2690                 struct ksock_net *tmp;
2691                 int j;
2692
2693                 if (colon != NULL) /* ignore alias device */
2694                         *colon = 0;
2695
2696                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2697                                         ksnn_list) {
2698                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2699                                 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2700                                              ksni_name[0];
2701                                 char *colon2 = strchr(ifnam2, ':');
2702
2703                                 if (colon2 != NULL)
2704                                         *colon2 = 0;
2705
2706                                 found = strcmp(ifnam, ifnam2) == 0;
2707                                 if (colon2 != NULL)
2708                                         *colon2 = ':';
2709                         }
2710                         if (found)
2711                                 break;
2712                 }
2713
2714                 new_ipif += !found;
2715                 if (colon != NULL)
2716                         *colon = ':';
2717         }
2718
2719         return new_ipif;
2720 }
2721
2722 static int
2723 ksocknal_start_schedulers(struct ksock_sched_info *info)
2724 {
2725         int     nthrs;
2726         int     rc = 0;
2727         int     i;
2728
2729         if (info->ksi_nthreads == 0) {
2730                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2731                         nthrs = info->ksi_nthreads_max;
2732                 } else {
2733                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2734                                                info->ksi_cpt);
2735                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2736                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2737                 }
2738                 nthrs = min(nthrs, info->ksi_nthreads_max);
2739         } else {
2740                 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2741                 /* increase two threads if there is new interface */
2742                 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2743         }
2744
2745         for (i = 0; i < nthrs; i++) {
2746                 long id;
2747                 char name[20];
2748                 struct ksock_sched *sched;
2749
2750                 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2751                 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2752                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2753                          info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2754
2755                 rc = ksocknal_thread_start(ksocknal_scheduler,
2756                                            (void *)id, name);
2757                 if (rc == 0)
2758                         continue;
2759
2760                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2761                        info->ksi_cpt, info->ksi_nthreads + i, rc);
2762                 break;
2763         }
2764
2765         info->ksi_nthreads += i;
2766         return rc;
2767 }
2768
2769 static int
2770 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2771 {
2772         int newif = ksocknal_search_new_ipif(net);
2773         int rc;
2774         int i;
2775
2776         if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2777                 return -EINVAL;
2778
2779         for (i = 0; i < ncpts; i++) {
2780                 struct ksock_sched_info *info;
2781                 int cpt = (cpts == NULL) ? i : cpts[i];
2782
2783                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2784                 info = ksocknal_data.ksnd_sched_info[cpt];
2785
2786                 if (!newif && info->ksi_nthreads > 0)
2787                         continue;
2788
2789                 rc = ksocknal_start_schedulers(info);
2790                 if (rc != 0)
2791                         return rc;
2792         }
2793         return 0;
2794 }
2795
2796 int
2797 ksocknal_startup(struct lnet_ni *ni)
2798 {
2799         struct ksock_net *net;
2800         struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
2801         int rc;
2802         int i;
2803         struct net_device *net_dev;
2804         int node_id;
2805
2806         LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2807
2808         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2809                 rc = ksocknal_base_startup();
2810                 if (rc != 0)
2811                         return rc;
2812         }
2813
2814         LIBCFS_ALLOC(net, sizeof(*net));
2815         if (net == NULL)
2816                 goto fail_0;
2817
2818         spin_lock_init(&net->ksnn_lock);
2819         net->ksnn_incarnation = ktime_get_real_ns();
2820         ni->ni_data = net;
2821         net_tunables = &ni->ni_net->net_tunables;
2822
2823         if (net_tunables->lct_peer_timeout == -1)
2824                 net_tunables->lct_peer_timeout =
2825                         *ksocknal_tunables.ksnd_peertimeout;
2826
2827         if (net_tunables->lct_max_tx_credits == -1)
2828                 net_tunables->lct_max_tx_credits =
2829                         *ksocknal_tunables.ksnd_credits;
2830
2831         if (net_tunables->lct_peer_tx_credits == -1)
2832                 net_tunables->lct_peer_tx_credits =
2833                         *ksocknal_tunables.ksnd_peertxcredits;
2834
2835         if (net_tunables->lct_peer_tx_credits >
2836             net_tunables->lct_max_tx_credits)
2837                 net_tunables->lct_peer_tx_credits =
2838                         net_tunables->lct_max_tx_credits;
2839
2840         if (net_tunables->lct_peer_rtr_credits == -1)
2841                 net_tunables->lct_peer_rtr_credits =
2842                         *ksocknal_tunables.ksnd_peerrtrcredits;
2843
2844         if (ni->ni_interfaces[0] == NULL) {
2845                 rc = ksocknal_enumerate_interfaces(net);
2846                 if (rc <= 0)
2847                         goto fail_1;
2848
2849                 net->ksnn_ninterfaces = 1;
2850         } else {
2851                 for (i = 0; i < LNET_INTERFACES_NUM; i++) {
2852                         int up;
2853
2854                         if (ni->ni_interfaces[i] == NULL)
2855                                 break;
2856
2857                         rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2858                                 &net->ksnn_interfaces[i].ksni_ipaddr,
2859                                 &net->ksnn_interfaces[i].ksni_netmask);
2860
2861                         if (rc != 0) {
2862                                 CERROR("Can't get interface %s info: %d\n",
2863                                        ni->ni_interfaces[i], rc);
2864                                 goto fail_1;
2865                         }
2866
2867                         if (!up) {
2868                                 CERROR("Interface %s is down\n",
2869                                        ni->ni_interfaces[i]);
2870                                 goto fail_1;
2871                         }
2872
2873                         strlcpy(net->ksnn_interfaces[i].ksni_name,
2874                                 ni->ni_interfaces[i],
2875                                 sizeof(net->ksnn_interfaces[i].ksni_name));
2876
2877                 }
2878                 net->ksnn_ninterfaces = i;
2879         }
2880
2881         net_dev = dev_get_by_name(&init_net,
2882                                   net->ksnn_interfaces[0].ksni_name);
2883         if (net_dev != NULL) {
2884                 node_id = dev_to_node(&net_dev->dev);
2885                 ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
2886                 dev_put(net_dev);
2887         } else {
2888                 ni->ni_dev_cpt = CFS_CPT_ANY;
2889         }
2890
2891         /* call it before add it to ksocknal_data.ksnd_nets */
2892         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2893         if (rc != 0)
2894                 goto fail_1;
2895
2896         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2897                                 net->ksnn_interfaces[0].ksni_ipaddr);
2898         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2899
2900         ksocknal_data.ksnd_nnets++;
2901
2902         return 0;
2903
2904  fail_1:
2905         LIBCFS_FREE(net, sizeof(*net));
2906  fail_0:
2907         if (ksocknal_data.ksnd_nnets == 0)
2908                 ksocknal_base_shutdown();
2909
2910         return -ENETDOWN;
2911 }
2912
2913
2914 static void __exit ksocklnd_exit(void)
2915 {
2916         lnet_unregister_lnd(&the_ksocklnd);
2917 }
2918
2919 static int __init ksocklnd_init(void)
2920 {
2921         int rc;
2922
2923         /* check ksnr_connected/connecting field large enough */
2924         CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2925         CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2926
2927         /* initialize the_ksocklnd */
2928         the_ksocklnd.lnd_type     = SOCKLND;
2929         the_ksocklnd.lnd_startup  = ksocknal_startup;
2930         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2931         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2932         the_ksocklnd.lnd_send     = ksocknal_send;
2933         the_ksocklnd.lnd_recv     = ksocknal_recv;
2934         the_ksocklnd.lnd_notify   = ksocknal_notify;
2935         the_ksocklnd.lnd_query    = ksocknal_query;
2936         the_ksocklnd.lnd_accept   = ksocknal_accept;
2937
2938         rc = ksocknal_tunables_init();
2939         if (rc != 0)
2940                 return rc;
2941
2942         lnet_register_lnd(&the_ksocklnd);
2943
2944         return 0;
2945 }
2946
2947 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2948 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2949 MODULE_VERSION("2.8.0");
2950 MODULE_LICENSE("GPL");
2951
2952 module_init(ksocklnd_init);
2953 module_exit(ksocklnd_exit);