Whamcloud - gitweb
ad3245773c4d98f058a817464e70d16a2ad4060d
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/socklnd/socklnd.c
33  *
34  * Author: Zach Brown <zab@zabbo.net>
35  * Author: Peter J. Braam <braam@clusterfs.com>
36  * Author: Phil Schwan <phil@clusterfs.com>
37  * Author: Eric Barton <eric@bartonsoftware.com>
38  */
39
40 #include <linux/pci.h>
41 #include "socklnd.h"
42
43 static lnd_t                   the_ksocklnd;
44 ksock_nal_data_t        ksocknal_data;
45
46 static ksock_interface_t *
47 ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
48 {
49         ksock_net_t       *net = ni->ni_data;
50         int                i;
51         ksock_interface_t *iface;
52
53         for (i = 0; i < net->ksnn_ninterfaces; i++) {
54                 LASSERT(i < LNET_MAX_INTERFACES);
55                 iface = &net->ksnn_interfaces[i];
56
57                 if (iface->ksni_ipaddr == ip)
58                         return (iface);
59         }
60
61         return (NULL);
62 }
63
64 static ksock_route_t *
65 ksocknal_create_route (__u32 ipaddr, int port)
66 {
67         ksock_route_t *route;
68
69         LIBCFS_ALLOC (route, sizeof (*route));
70         if (route == NULL)
71                 return (NULL);
72
73         atomic_set (&route->ksnr_refcount, 1);
74         route->ksnr_peer = NULL;
75         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
76         route->ksnr_ipaddr = ipaddr;
77         route->ksnr_port = port;
78         route->ksnr_scheduled = 0;
79         route->ksnr_connecting = 0;
80         route->ksnr_connected = 0;
81         route->ksnr_deleted = 0;
82         route->ksnr_conn_count = 0;
83         route->ksnr_share_count = 0;
84
85         return (route);
86 }
87
88 void
89 ksocknal_destroy_route (ksock_route_t *route)
90 {
91         LASSERT (atomic_read(&route->ksnr_refcount) == 0);
92
93         if (route->ksnr_peer != NULL)
94                 ksocknal_peer_decref(route->ksnr_peer);
95
96         LIBCFS_FREE (route, sizeof (*route));
97 }
98
99 static int
100 ksocknal_create_peer(ksock_peer_ni_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
101 {
102         int             cpt = lnet_cpt_of_nid(id.nid, ni);
103         ksock_net_t     *net = ni->ni_data;
104         ksock_peer_ni_t *peer_ni;
105
106         LASSERT(id.nid != LNET_NID_ANY);
107         LASSERT(id.pid != LNET_PID_ANY);
108         LASSERT(!in_interrupt());
109
110         LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
111         if (peer_ni == NULL)
112                 return -ENOMEM;
113
114         peer_ni->ksnp_ni = ni;
115         peer_ni->ksnp_id = id;
116         atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
117         peer_ni->ksnp_closing = 0;
118         peer_ni->ksnp_accepting = 0;
119         peer_ni->ksnp_proto = NULL;
120         peer_ni->ksnp_last_alive = 0;
121         peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
122
123         INIT_LIST_HEAD(&peer_ni->ksnp_conns);
124         INIT_LIST_HEAD(&peer_ni->ksnp_routes);
125         INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
126         INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
127         spin_lock_init(&peer_ni->ksnp_lock);
128
129         spin_lock_bh(&net->ksnn_lock);
130
131         if (net->ksnn_shutdown) {
132                 spin_unlock_bh(&net->ksnn_lock);
133
134                 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
135                 CERROR("Can't create peer_ni: network shutdown\n");
136                 return -ESHUTDOWN;
137         }
138
139         net->ksnn_npeers++;
140
141         spin_unlock_bh(&net->ksnn_lock);
142
143         *peerp = peer_ni;
144         return 0;
145 }
146
147 void
148 ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni)
149 {
150         ksock_net_t    *net = peer_ni->ksnp_ni->ni_data;
151
152         CDEBUG (D_NET, "peer_ni %s %p deleted\n",
153                 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
154
155         LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
156         LASSERT(peer_ni->ksnp_accepting == 0);
157         LASSERT(list_empty(&peer_ni->ksnp_conns));
158         LASSERT(list_empty(&peer_ni->ksnp_routes));
159         LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
160         LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
161
162         LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
163
164         /* NB a peer_ni's connections and routes keep a reference on their peer_ni
165          * until they are destroyed, so we can be assured that _all_ state to
166          * do with this peer_ni has been cleaned up when its refcount drops to
167          * zero. */
168         spin_lock_bh(&net->ksnn_lock);
169         net->ksnn_npeers--;
170         spin_unlock_bh(&net->ksnn_lock);
171 }
172
173 ksock_peer_ni_t *
174 ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
175 {
176         struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
177         struct list_head *tmp;
178         ksock_peer_ni_t  *peer_ni;
179
180         list_for_each(tmp, peer_list) {
181
182                 peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
183
184                 LASSERT(!peer_ni->ksnp_closing);
185
186                 if (peer_ni->ksnp_ni != ni)
187                         continue;
188
189                 if (peer_ni->ksnp_id.nid != id.nid ||
190                     peer_ni->ksnp_id.pid != id.pid)
191                         continue;
192
193                 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
194                        peer_ni, libcfs_id2str(id),
195                        atomic_read(&peer_ni->ksnp_refcount));
196                 return peer_ni;
197         }
198         return NULL;
199 }
200
201 ksock_peer_ni_t *
202 ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
203 {
204         ksock_peer_ni_t     *peer_ni;
205
206         read_lock(&ksocknal_data.ksnd_global_lock);
207         peer_ni = ksocknal_find_peer_locked(ni, id);
208         if (peer_ni != NULL)                    /* +1 ref for caller? */
209                 ksocknal_peer_addref(peer_ni);
210         read_unlock(&ksocknal_data.ksnd_global_lock);
211
212         return (peer_ni);
213 }
214
215 static void
216 ksocknal_unlink_peer_locked (ksock_peer_ni_t *peer_ni)
217 {
218         int                i;
219         __u32              ip;
220         ksock_interface_t *iface;
221
222         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
223                 LASSERT (i < LNET_MAX_INTERFACES);
224                 ip = peer_ni->ksnp_passive_ips[i];
225
226                 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
227                 /* All IPs in peer_ni->ksnp_passive_ips[] come from the
228                  * interface list, therefore the call must succeed. */
229                 LASSERT (iface != NULL);
230
231                 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
232                        peer_ni, iface, iface->ksni_nroutes);
233                 iface->ksni_npeers--;
234         }
235
236         LASSERT(list_empty(&peer_ni->ksnp_conns));
237         LASSERT(list_empty(&peer_ni->ksnp_routes));
238         LASSERT(!peer_ni->ksnp_closing);
239         peer_ni->ksnp_closing = 1;
240         list_del(&peer_ni->ksnp_list);
241         /* lose peerlist's ref */
242         ksocknal_peer_decref(peer_ni);
243 }
244
245 static int
246 ksocknal_get_peer_info (lnet_ni_t *ni, int index,
247                         lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
248                         int *port, int *conn_count, int *share_count)
249 {
250         ksock_peer_ni_t   *peer_ni;
251         struct list_head  *ptmp;
252         ksock_route_t     *route;
253         struct list_head  *rtmp;
254         int                i;
255         int                j;
256         int                rc = -ENOENT;
257
258         read_lock(&ksocknal_data.ksnd_global_lock);
259
260         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
261                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
262                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
263
264                         if (peer_ni->ksnp_ni != ni)
265                                 continue;
266
267                         if (peer_ni->ksnp_n_passive_ips == 0 &&
268                             list_empty(&peer_ni->ksnp_routes)) {
269                                 if (index-- > 0)
270                                         continue;
271
272                                 *id = peer_ni->ksnp_id;
273                                 *myip = 0;
274                                 *peer_ip = 0;
275                                 *port = 0;
276                                 *conn_count = 0;
277                                 *share_count = 0;
278                                 rc = 0;
279                                 goto out;
280                         }
281
282                         for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
283                                 if (index-- > 0)
284                                         continue;
285
286                                 *id = peer_ni->ksnp_id;
287                                 *myip = peer_ni->ksnp_passive_ips[j];
288                                 *peer_ip = 0;
289                                 *port = 0;
290                                 *conn_count = 0;
291                                 *share_count = 0;
292                                 rc = 0;
293                                 goto out;
294                         }
295
296                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
297                                 if (index-- > 0)
298                                         continue;
299
300                                 route = list_entry(rtmp, ksock_route_t,
301                                                    ksnr_list);
302
303                                 *id = peer_ni->ksnp_id;
304                                 *myip = route->ksnr_myipaddr;
305                                 *peer_ip = route->ksnr_ipaddr;
306                                 *port = route->ksnr_port;
307                                 *conn_count = route->ksnr_conn_count;
308                                 *share_count = route->ksnr_share_count;
309                                 rc = 0;
310                                 goto out;
311                         }
312                 }
313         }
314 out:
315         read_unlock(&ksocknal_data.ksnd_global_lock);
316         return rc;
317 }
318
319 static void
320 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
321 {
322         ksock_peer_ni_t   *peer_ni = route->ksnr_peer;
323         int                type = conn->ksnc_type;
324         ksock_interface_t *iface;
325
326         conn->ksnc_route = route;
327         ksocknal_route_addref(route);
328
329         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
330                 if (route->ksnr_myipaddr == 0) {
331                         /* route wasn't bound locally yet (the initial route) */
332                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
333                                libcfs_id2str(peer_ni->ksnp_id),
334                                &route->ksnr_ipaddr,
335                                &conn->ksnc_myipaddr);
336                 } else {
337                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
338                                "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
339                                &route->ksnr_ipaddr,
340                                &route->ksnr_myipaddr,
341                                &conn->ksnc_myipaddr);
342
343                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
344                                                   route->ksnr_myipaddr);
345                         if (iface != NULL)
346                                 iface->ksni_nroutes--;
347                 }
348                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
349                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
350                                           route->ksnr_myipaddr);
351                 if (iface != NULL)
352                         iface->ksni_nroutes++;
353         }
354
355         route->ksnr_connected |= (1<<type);
356         route->ksnr_conn_count++;
357
358         /* Successful connection => further attempts can
359          * proceed immediately */
360         route->ksnr_retry_interval = 0;
361 }
362
363 static void
364 ksocknal_add_route_locked (ksock_peer_ni_t *peer_ni, ksock_route_t *route)
365 {
366         struct list_head *tmp;
367         ksock_conn_t     *conn;
368         ksock_route_t    *route2;
369
370         LASSERT(!peer_ni->ksnp_closing);
371         LASSERT(route->ksnr_peer == NULL);
372         LASSERT(!route->ksnr_scheduled);
373         LASSERT(!route->ksnr_connecting);
374         LASSERT(route->ksnr_connected == 0);
375
376         /* LASSERT(unique) */
377         list_for_each(tmp, &peer_ni->ksnp_routes) {
378                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
379
380                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
381                         CERROR("Duplicate route %s %pI4h\n",
382                                libcfs_id2str(peer_ni->ksnp_id),
383                                &route->ksnr_ipaddr);
384                         LBUG();
385                 }
386         }
387
388         route->ksnr_peer = peer_ni;
389         ksocknal_peer_addref(peer_ni);
390         /* peer_ni's routelist takes over my ref on 'route' */
391         list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
392
393         list_for_each(tmp, &peer_ni->ksnp_conns) {
394                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
395
396                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
397                         continue;
398
399                 ksocknal_associate_route_conn_locked(route, conn);
400                 /* keep going (typed routes) */
401         }
402 }
403
404 static void
405 ksocknal_del_route_locked (ksock_route_t *route)
406 {
407         ksock_peer_ni_t   *peer_ni = route->ksnr_peer;
408         ksock_interface_t *iface;
409         ksock_conn_t      *conn;
410         struct list_head  *ctmp;
411         struct list_head  *cnxt;
412
413         LASSERT(!route->ksnr_deleted);
414
415         /* Close associated conns */
416         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
417                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
418
419                 if (conn->ksnc_route != route)
420                         continue;
421
422                 ksocknal_close_conn_locked(conn, 0);
423         }
424
425         if (route->ksnr_myipaddr != 0) {
426                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
427                                           route->ksnr_myipaddr);
428                 if (iface != NULL)
429                         iface->ksni_nroutes--;
430         }
431
432         route->ksnr_deleted = 1;
433         list_del(&route->ksnr_list);
434         ksocknal_route_decref(route);           /* drop peer_ni's ref */
435
436         if (list_empty(&peer_ni->ksnp_routes) &&
437             list_empty(&peer_ni->ksnp_conns)) {
438                 /* I've just removed the last route to a peer_ni with no active
439                  * connections */
440                 ksocknal_unlink_peer_locked(peer_ni);
441         }
442 }
443
444 int
445 ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
446 {
447         struct list_head *tmp;
448         ksock_peer_ni_t  *peer_ni;
449         ksock_peer_ni_t  *peer2;
450         ksock_route_t    *route;
451         ksock_route_t    *route2;
452         int               rc;
453
454         if (id.nid == LNET_NID_ANY ||
455             id.pid == LNET_PID_ANY)
456                 return (-EINVAL);
457
458         /* Have a brand new peer_ni ready... */
459         rc = ksocknal_create_peer(&peer_ni, ni, id);
460         if (rc != 0)
461                 return rc;
462
463         route = ksocknal_create_route (ipaddr, port);
464         if (route == NULL) {
465                 ksocknal_peer_decref(peer_ni);
466                 return (-ENOMEM);
467         }
468
469         write_lock_bh(&ksocknal_data.ksnd_global_lock);
470
471         /* always called with a ref on ni, so shutdown can't have started */
472         LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
473
474         peer2 = ksocknal_find_peer_locked(ni, id);
475         if (peer2 != NULL) {
476                 ksocknal_peer_decref(peer_ni);
477                 peer_ni = peer2;
478         } else {
479                 /* peer_ni table takes my ref on peer_ni */
480                 list_add_tail(&peer_ni->ksnp_list,
481                               ksocknal_nid2peerlist(id.nid));
482         }
483
484         route2 = NULL;
485         list_for_each(tmp, &peer_ni->ksnp_routes) {
486                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
487
488                 if (route2->ksnr_ipaddr == ipaddr)
489                         break;
490
491                 route2 = NULL;
492         }
493         if (route2 == NULL) {
494                 ksocknal_add_route_locked(peer_ni, route);
495                 route->ksnr_share_count++;
496         } else {
497                 ksocknal_route_decref(route);
498                 route2->ksnr_share_count++;
499         }
500
501         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
502
503         return 0;
504 }
505
506 static void
507 ksocknal_del_peer_locked (ksock_peer_ni_t *peer_ni, __u32 ip)
508 {
509         ksock_conn_t     *conn;
510         ksock_route_t    *route;
511         struct list_head *tmp;
512         struct list_head *nxt;
513         int               nshared;
514
515         LASSERT(!peer_ni->ksnp_closing);
516
517         /* Extra ref prevents peer_ni disappearing until I'm done with it */
518         ksocknal_peer_addref(peer_ni);
519
520         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
521                 route = list_entry(tmp, ksock_route_t, ksnr_list);
522
523                 /* no match */
524                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
525                         continue;
526
527                 route->ksnr_share_count = 0;
528                 /* This deletes associated conns too */
529                 ksocknal_del_route_locked(route);
530         }
531
532         nshared = 0;
533         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
534                 route = list_entry(tmp, ksock_route_t, ksnr_list);
535                 nshared += route->ksnr_share_count;
536         }
537
538         if (nshared == 0) {
539                 /* remove everything else if there are no explicit entries
540                  * left */
541
542                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
543                         route = list_entry(tmp, ksock_route_t, ksnr_list);
544
545                         /* we should only be removing auto-entries */
546                         LASSERT(route->ksnr_share_count == 0);
547                         ksocknal_del_route_locked(route);
548                 }
549
550                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
551                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
552
553                         ksocknal_close_conn_locked(conn, 0);
554                 }
555         }
556
557         ksocknal_peer_decref(peer_ni);
558                 /* NB peer_ni unlinks itself when last conn/route is removed */
559 }
560
561 static int
562 ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
563 {
564         struct list_head  zombies = LIST_HEAD_INIT(zombies);
565         struct list_head *ptmp;
566         struct list_head *pnxt;
567         ksock_peer_ni_t     *peer_ni;
568         int               lo;
569         int               hi;
570         int               i;
571         int               rc = -ENOENT;
572
573         write_lock_bh(&ksocknal_data.ksnd_global_lock);
574
575         if (id.nid != LNET_NID_ANY) {
576                 hi = (int)(ksocknal_nid2peerlist(id.nid) -
577                            ksocknal_data.ksnd_peers);
578                 lo = hi;
579         } else {
580                 lo = 0;
581                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
582         }
583
584         for (i = lo; i <= hi; i++) {
585                 list_for_each_safe(ptmp, pnxt,
586                                    &ksocknal_data.ksnd_peers[i]) {
587                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
588
589                         if (peer_ni->ksnp_ni != ni)
590                                 continue;
591
592                         if (!((id.nid == LNET_NID_ANY ||
593                                peer_ni->ksnp_id.nid == id.nid) &&
594                               (id.pid == LNET_PID_ANY ||
595                                peer_ni->ksnp_id.pid == id.pid)))
596                                 continue;
597
598                         ksocknal_peer_addref(peer_ni);  /* a ref for me... */
599
600                         ksocknal_del_peer_locked(peer_ni, ip);
601
602                         if (peer_ni->ksnp_closing &&
603                             !list_empty(&peer_ni->ksnp_tx_queue)) {
604                                 LASSERT(list_empty(&peer_ni->ksnp_conns));
605                                 LASSERT(list_empty(&peer_ni->ksnp_routes));
606
607                                 list_splice_init(&peer_ni->ksnp_tx_queue,
608                                                  &zombies);
609                         }
610
611                         ksocknal_peer_decref(peer_ni);  /* ...till here */
612
613                         rc = 0;                         /* matched! */
614                 }
615         }
616
617         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
618
619         ksocknal_txlist_done(ni, &zombies, 1);
620
621         return rc;
622 }
623
624 static ksock_conn_t *
625 ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
626 {
627         ksock_peer_ni_t  *peer_ni;
628         struct list_head *ptmp;
629         ksock_conn_t     *conn;
630         struct list_head *ctmp;
631         int               i;
632
633         read_lock(&ksocknal_data.ksnd_global_lock);
634
635         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
636                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
637                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
638
639                         LASSERT(!peer_ni->ksnp_closing);
640
641                         if (peer_ni->ksnp_ni != ni)
642                                 continue;
643
644                         list_for_each(ctmp, &peer_ni->ksnp_conns) {
645                                 if (index-- > 0)
646                                         continue;
647
648                                 conn = list_entry(ctmp, ksock_conn_t,
649                                                   ksnc_list);
650                                 ksocknal_conn_addref(conn);
651                                 read_unlock(&ksocknal_data. \
652                                             ksnd_global_lock);
653                                 return conn;
654                         }
655                 }
656         }
657
658         read_unlock(&ksocknal_data.ksnd_global_lock);
659         return NULL;
660 }
661
662 static ksock_sched_t *
663 ksocknal_choose_scheduler_locked(unsigned int cpt)
664 {
665         struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
666         ksock_sched_t           *sched;
667         int                     i;
668
669         LASSERT(info->ksi_nthreads > 0);
670
671         sched = &info->ksi_scheds[0];
672         /*
673          * NB: it's safe so far, but info->ksi_nthreads could be changed
674          * at runtime when we have dynamic LNet configuration, then we
675          * need to take care of this.
676          */
677         for (i = 1; i < info->ksi_nthreads; i++) {
678                 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
679                         sched = &info->ksi_scheds[i];
680         }
681
682         return sched;
683 }
684
685 static int
686 ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
687 {
688         ksock_net_t       *net = ni->ni_data;
689         int                i;
690         int                nip;
691
692         read_lock(&ksocknal_data.ksnd_global_lock);
693
694         nip = net->ksnn_ninterfaces;
695         LASSERT (nip <= LNET_MAX_INTERFACES);
696
697         /* Only offer interfaces for additional connections if I have
698          * more than one. */
699         if (nip < 2) {
700                 read_unlock(&ksocknal_data.ksnd_global_lock);
701                 return 0;
702         }
703
704         for (i = 0; i < nip; i++) {
705                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
706                 LASSERT (ipaddrs[i] != 0);
707         }
708
709         read_unlock(&ksocknal_data.ksnd_global_lock);
710         return (nip);
711 }
712
713 static int
714 ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
715 {
716         int   best_netmatch = 0;
717         int   best_xor      = 0;
718         int   best          = -1;
719         int   this_xor;
720         int   this_netmatch;
721         int   i;
722
723         for (i = 0; i < nips; i++) {
724                 if (ips[i] == 0)
725                         continue;
726
727                 this_xor = (ips[i] ^ iface->ksni_ipaddr);
728                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
729
730                 if (!(best < 0 ||
731                       best_netmatch < this_netmatch ||
732                       (best_netmatch == this_netmatch &&
733                        best_xor > this_xor)))
734                         continue;
735
736                 best = i;
737                 best_netmatch = this_netmatch;
738                 best_xor = this_xor;
739         }
740
741         LASSERT (best >= 0);
742         return (best);
743 }
744
745 static int
746 ksocknal_select_ips(ksock_peer_ni_t *peer_ni, __u32 *peerips, int n_peerips)
747 {
748         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
749         ksock_net_t        *net = peer_ni->ksnp_ni->ni_data;
750         ksock_interface_t  *iface;
751         ksock_interface_t  *best_iface;
752         int                 n_ips;
753         int                 i;
754         int                 j;
755         int                 k;
756         __u32               ip;
757         __u32               xor;
758         int                 this_netmatch;
759         int                 best_netmatch;
760         int                 best_npeers;
761
762         /* CAVEAT EMPTOR: We do all our interface matching with an
763          * exclusive hold of global lock at IRQ priority.  We're only
764          * expecting to be dealing with small numbers of interfaces, so the
765          * O(n**3)-ness shouldn't matter */
766
767         /* Also note that I'm not going to return more than n_peerips
768          * interfaces, even if I have more myself */
769
770         write_lock_bh(global_lock);
771
772         LASSERT (n_peerips <= LNET_MAX_INTERFACES);
773         LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
774
775         /* Only match interfaces for additional connections
776          * if I have > 1 interface */
777         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
778                 MIN(n_peerips, net->ksnn_ninterfaces);
779
780         for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
781                 /*              ^ yes really... */
782
783                 /* If we have any new interfaces, first tick off all the
784                  * peer_ni IPs that match old interfaces, then choose new
785                  * interfaces to match the remaining peer_ni IPS.
786                  * We don't forget interfaces we've stopped using; we might
787                  * start using them again... */
788
789                 if (i < peer_ni->ksnp_n_passive_ips) {
790                         /* Old interface. */
791                         ip = peer_ni->ksnp_passive_ips[i];
792                         best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
793
794                         /* peer_ni passive ips are kept up to date */
795                         LASSERT(best_iface != NULL);
796                 } else {
797                         /* choose a new interface */
798                         LASSERT (i == peer_ni->ksnp_n_passive_ips);
799
800                         best_iface = NULL;
801                         best_netmatch = 0;
802                         best_npeers = 0;
803
804                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
805                                 iface = &net->ksnn_interfaces[j];
806                                 ip = iface->ksni_ipaddr;
807
808                                 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
809                                         if (peer_ni->ksnp_passive_ips[k] == ip)
810                                                 break;
811
812                                 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
813                                         continue;
814
815                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
816                                 xor = (ip ^ peerips[k]);
817                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
818
819                                 if (!(best_iface == NULL ||
820                                       best_netmatch < this_netmatch ||
821                                       (best_netmatch == this_netmatch &&
822                                        best_npeers > iface->ksni_npeers)))
823                                         continue;
824
825                                 best_iface = iface;
826                                 best_netmatch = this_netmatch;
827                                 best_npeers = iface->ksni_npeers;
828                         }
829
830                         LASSERT(best_iface != NULL);
831
832                         best_iface->ksni_npeers++;
833                         ip = best_iface->ksni_ipaddr;
834                         peer_ni->ksnp_passive_ips[i] = ip;
835                         peer_ni->ksnp_n_passive_ips = i+1;
836                 }
837
838                 /* mark the best matching peer_ni IP used */
839                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
840                 peerips[j] = 0;
841         }
842
843         /* Overwrite input peer_ni IP addresses */
844         memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
845
846         write_unlock_bh(global_lock);
847
848         return (n_ips);
849 }
850
851 static void
852 ksocknal_create_routes(ksock_peer_ni_t *peer_ni, int port,
853                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
854 {
855         ksock_route_t           *newroute = NULL;
856         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
857         lnet_ni_t               *ni = peer_ni->ksnp_ni;
858         ksock_net_t             *net = ni->ni_data;
859         struct list_head        *rtmp;
860         ksock_route_t           *route;
861         ksock_interface_t       *iface;
862         ksock_interface_t       *best_iface;
863         int                     best_netmatch;
864         int                     this_netmatch;
865         int                     best_nroutes;
866         int                     i;
867         int                     j;
868
869         /* CAVEAT EMPTOR: We do all our interface matching with an
870          * exclusive hold of global lock at IRQ priority.  We're only
871          * expecting to be dealing with small numbers of interfaces, so the
872          * O(n**3)-ness here shouldn't matter */
873
874         write_lock_bh(global_lock);
875
876         if (net->ksnn_ninterfaces < 2) {
877                 /* Only create additional connections
878                  * if I have > 1 interface */
879                 write_unlock_bh(global_lock);
880                 return;
881         }
882
883         LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
884
885         for (i = 0; i < npeer_ipaddrs; i++) {
886                 if (newroute != NULL) {
887                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
888                 } else {
889                         write_unlock_bh(global_lock);
890
891                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
892                         if (newroute == NULL)
893                                 return;
894
895                         write_lock_bh(global_lock);
896                 }
897
898                 if (peer_ni->ksnp_closing) {
899                         /* peer_ni got closed under me */
900                         break;
901                 }
902
903                 /* Already got a route? */
904                 route = NULL;
905                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
906                         route = list_entry(rtmp, ksock_route_t, ksnr_list);
907
908                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
909                                 break;
910
911                         route = NULL;
912                 }
913                 if (route != NULL)
914                         continue;
915
916                 best_iface = NULL;
917                 best_nroutes = 0;
918                 best_netmatch = 0;
919
920                 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
921
922                 /* Select interface to connect from */
923                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
924                         iface = &net->ksnn_interfaces[j];
925
926                         /* Using this interface already? */
927                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
928                                 route = list_entry(rtmp, ksock_route_t,
929                                                    ksnr_list);
930
931                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
932                                         break;
933
934                                 route = NULL;
935                         }
936                         if (route != NULL)
937                                 continue;
938
939                         this_netmatch = (((iface->ksni_ipaddr ^
940                                            newroute->ksnr_ipaddr) &
941                                            iface->ksni_netmask) == 0) ? 1 : 0;
942
943                         if (!(best_iface == NULL ||
944                               best_netmatch < this_netmatch ||
945                               (best_netmatch == this_netmatch &&
946                                best_nroutes > iface->ksni_nroutes)))
947                                 continue;
948
949                         best_iface = iface;
950                         best_netmatch = this_netmatch;
951                         best_nroutes = iface->ksni_nroutes;
952                 }
953
954                 if (best_iface == NULL)
955                         continue;
956
957                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
958                 best_iface->ksni_nroutes++;
959
960                 ksocknal_add_route_locked(peer_ni, newroute);
961                 newroute = NULL;
962         }
963
964         write_unlock_bh(global_lock);
965         if (newroute != NULL)
966                 ksocknal_route_decref(newroute);
967 }
968
969 int
970 ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
971 {
972         ksock_connreq_t *cr;
973         int              rc;
974         __u32            peer_ip;
975         int              peer_port;
976
977         rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
978         LASSERT(rc == 0);               /* we succeeded before */
979
980         LIBCFS_ALLOC(cr, sizeof(*cr));
981         if (cr == NULL) {
982                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
983                                    "%pI4h: memory exhausted\n", &peer_ip);
984                 return -ENOMEM;
985         }
986
987         lnet_ni_addref(ni);
988         cr->ksncr_ni   = ni;
989         cr->ksncr_sock = sock;
990
991         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
992
993         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
994         wake_up(&ksocknal_data.ksnd_connd_waitq);
995
996         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
997         return 0;
998 }
999
1000 static int
1001 ksocknal_connecting (ksock_peer_ni_t *peer_ni, __u32 ipaddr)
1002 {
1003         ksock_route_t *route;
1004
1005         list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1006                 if (route->ksnr_ipaddr == ipaddr)
1007                         return route->ksnr_connecting;
1008         }
1009         return 0;
1010 }
1011
1012 int
1013 ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
1014                      struct socket *sock, int type)
1015 {
1016         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
1017         struct list_head        zombies = LIST_HEAD_INIT(zombies);
1018         lnet_process_id_t       peerid;
1019         struct list_head        *tmp;
1020         __u64              incarnation;
1021         ksock_conn_t      *conn;
1022         ksock_conn_t      *conn2;
1023         ksock_peer_ni_t      *peer_ni = NULL;
1024         ksock_peer_ni_t      *peer2;
1025         ksock_sched_t     *sched;
1026         struct ksock_hello_msg *hello;
1027         int                cpt;
1028         ksock_tx_t        *tx;
1029         ksock_tx_t        *txtmp;
1030         int                rc;
1031         int                active;
1032         char              *warn = NULL;
1033
1034         active = (route != NULL);
1035
1036         LASSERT (active == (type != SOCKLND_CONN_NONE));
1037
1038         LIBCFS_ALLOC(conn, sizeof(*conn));
1039         if (conn == NULL) {
1040                 rc = -ENOMEM;
1041                 goto failed_0;
1042         }
1043
1044         conn->ksnc_peer = NULL;
1045         conn->ksnc_route = NULL;
1046         conn->ksnc_sock = sock;
1047         /* 2 ref, 1 for conn, another extra ref prevents socket
1048          * being closed before establishment of connection */
1049         atomic_set (&conn->ksnc_sock_refcount, 2);
1050         conn->ksnc_type = type;
1051         ksocknal_lib_save_callback(sock, conn);
1052         atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1053
1054         conn->ksnc_rx_ready = 0;
1055         conn->ksnc_rx_scheduled = 0;
1056
1057         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1058         conn->ksnc_tx_ready = 0;
1059         conn->ksnc_tx_scheduled = 0;
1060         conn->ksnc_tx_carrier = NULL;
1061         atomic_set (&conn->ksnc_tx_nob, 0);
1062
1063         LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1064                                      kshm_ips[LNET_MAX_INTERFACES]));
1065         if (hello == NULL) {
1066                 rc = -ENOMEM;
1067                 goto failed_1;
1068         }
1069
1070         /* stash conn's local and remote addrs */
1071         rc = ksocknal_lib_get_conn_addrs (conn);
1072         if (rc != 0)
1073                 goto failed_1;
1074
1075         /* Find out/confirm peer_ni's NID and connection type and get the
1076          * vector of interfaces she's willing to let me connect to.
1077          * Passive connections use the listener timeout since the peer_ni sends
1078          * eagerly */
1079
1080         if (active) {
1081                 peer_ni = route->ksnr_peer;
1082                 LASSERT(ni == peer_ni->ksnp_ni);
1083
1084                 /* Active connection sends HELLO eagerly */
1085                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1086                 peerid = peer_ni->ksnp_id;
1087
1088                 write_lock_bh(global_lock);
1089                 conn->ksnc_proto = peer_ni->ksnp_proto;
1090                 write_unlock_bh(global_lock);
1091
1092                 if (conn->ksnc_proto == NULL) {
1093                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1094 #if SOCKNAL_VERSION_DEBUG
1095                          if (*ksocknal_tunables.ksnd_protocol == 2)
1096                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1097                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1098                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1099 #endif
1100                 }
1101
1102                 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1103                 if (rc != 0)
1104                         goto failed_1;
1105         } else {
1106                 peerid.nid = LNET_NID_ANY;
1107                 peerid.pid = LNET_PID_ANY;
1108
1109                 /* Passive, get protocol from peer_ni */
1110                 conn->ksnc_proto = NULL;
1111         }
1112
1113         rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1114         if (rc < 0)
1115                 goto failed_1;
1116
1117         LASSERT (rc == 0 || active);
1118         LASSERT (conn->ksnc_proto != NULL);
1119         LASSERT (peerid.nid != LNET_NID_ANY);
1120
1121         cpt = lnet_cpt_of_nid(peerid.nid, ni);
1122
1123         if (active) {
1124                 ksocknal_peer_addref(peer_ni);
1125                 write_lock_bh(global_lock);
1126         } else {
1127                 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1128                 if (rc != 0)
1129                         goto failed_1;
1130
1131                 write_lock_bh(global_lock);
1132
1133                 /* called with a ref on ni, so shutdown can't have started */
1134                 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1135
1136                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1137                 if (peer2 == NULL) {
1138                         /* NB this puts an "empty" peer_ni in the peer_ni
1139                          * table (which takes my ref) */
1140                         list_add_tail(&peer_ni->ksnp_list,
1141                                       ksocknal_nid2peerlist(peerid.nid));
1142                 } else {
1143                         ksocknal_peer_decref(peer_ni);
1144                         peer_ni = peer2;
1145                 }
1146
1147                 /* +1 ref for me */
1148                 ksocknal_peer_addref(peer_ni);
1149                 peer_ni->ksnp_accepting++;
1150
1151                 /* Am I already connecting to this guy?  Resolve in
1152                  * favour of higher NID... */
1153                 if (peerid.nid < ni->ni_nid &&
1154                     ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1155                         rc = EALREADY;
1156                         warn = "connection race resolution";
1157                         goto failed_2;
1158                 }
1159         }
1160
1161         if (peer_ni->ksnp_closing ||
1162             (active && route->ksnr_deleted)) {
1163                 /* peer_ni/route got closed under me */
1164                 rc = -ESTALE;
1165                 warn = "peer_ni/route removed";
1166                 goto failed_2;
1167         }
1168
1169         if (peer_ni->ksnp_proto == NULL) {
1170                 /* Never connected before.
1171                  * NB recv_hello may have returned EPROTO to signal my peer_ni
1172                  * wants a different protocol than the one I asked for.
1173                  */
1174                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1175
1176                 peer_ni->ksnp_proto = conn->ksnc_proto;
1177                 peer_ni->ksnp_incarnation = incarnation;
1178         }
1179
1180         if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1181             peer_ni->ksnp_incarnation != incarnation) {
1182                 /* peer_ni rebooted or I've got the wrong protocol version */
1183                 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1184
1185                 peer_ni->ksnp_proto = NULL;
1186                 rc = ESTALE;
1187                 warn = peer_ni->ksnp_incarnation != incarnation ?
1188                        "peer_ni rebooted" :
1189                        "wrong proto version";
1190                 goto failed_2;
1191         }
1192
1193         switch (rc) {
1194         default:
1195                 LBUG();
1196         case 0:
1197                 break;
1198         case EALREADY:
1199                 warn = "lost conn race";
1200                 goto failed_2;
1201         case EPROTO:
1202                 warn = "retry with different protocol version";
1203                 goto failed_2;
1204         }
1205
1206         /* Refuse to duplicate an existing connection, unless this is a
1207          * loopback connection */
1208         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1209                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1210                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1211
1212                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1213                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1214                             conn2->ksnc_type != conn->ksnc_type)
1215                                 continue;
1216
1217                         /* Reply on a passive connection attempt so the peer_ni
1218                          * realises we're connected. */
1219                         LASSERT (rc == 0);
1220                         if (!active)
1221                                 rc = EALREADY;
1222
1223                         warn = "duplicate";
1224                         goto failed_2;
1225                 }
1226         }
1227
1228         /* If the connection created by this route didn't bind to the IP
1229          * address the route connected to, the connection/route matching
1230          * code below probably isn't going to work. */
1231         if (active &&
1232             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1233                 CERROR("Route %s %pI4h connected to %pI4h\n",
1234                        libcfs_id2str(peer_ni->ksnp_id),
1235                        &route->ksnr_ipaddr,
1236                        &conn->ksnc_ipaddr);
1237         }
1238
1239         /* Search for a route corresponding to the new connection and
1240          * create an association.  This allows incoming connections created
1241          * by routes in my peer_ni to match my own route entries so I don't
1242          * continually create duplicate routes. */
1243         list_for_each(tmp, &peer_ni->ksnp_routes) {
1244                 route = list_entry(tmp, ksock_route_t, ksnr_list);
1245
1246                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1247                         continue;
1248
1249                 ksocknal_associate_route_conn_locked(route, conn);
1250                 break;
1251         }
1252
1253         conn->ksnc_peer = peer_ni;                 /* conn takes my ref on peer_ni */
1254         peer_ni->ksnp_last_alive = ktime_get_real_seconds();
1255         peer_ni->ksnp_send_keepalive = 0;
1256         peer_ni->ksnp_error = 0;
1257
1258         sched = ksocknal_choose_scheduler_locked(cpt);
1259         sched->kss_nconns++;
1260         conn->ksnc_scheduler = sched;
1261
1262         conn->ksnc_tx_last_post = ktime_get_real_seconds();
1263         /* Set the deadline for the outgoing HELLO to drain */
1264         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1265         conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1266         smp_mb();   /* order with adding to peer_ni's conn list */
1267
1268         list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1269         ksocknal_conn_addref(conn);
1270
1271         ksocknal_new_packet(conn, 0);
1272
1273         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1274
1275         /* Take packets blocking for this connection. */
1276         list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1277                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1278                     SOCKNAL_MATCH_NO)
1279                         continue;
1280
1281                 list_del(&tx->tx_list);
1282                 ksocknal_queue_tx_locked(tx, conn);
1283         }
1284
1285         write_unlock_bh(global_lock);
1286
1287         /* We've now got a new connection.  Any errors from here on are just
1288          * like "normal" comms errors and we close the connection normally.
1289          * NB (a) we still have to send the reply HELLO for passive
1290          *        connections,
1291          *    (b) normal I/O on the conn is blocked until I setup and call the
1292          *        socket callbacks.
1293          */
1294
1295         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1296                " incarnation:%lld sched[%d:%d]\n",
1297                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1298                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1299                conn->ksnc_port, incarnation, cpt,
1300                (int)(sched - &sched->kss_info->ksi_scheds[0]));
1301
1302         if (active) {
1303                 /* additional routes after interface exchange? */
1304                 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1305                                        hello->kshm_ips, hello->kshm_nips);
1306         } else {
1307                 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1308                                                        hello->kshm_nips);
1309                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1310         }
1311
1312         LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1313                                     kshm_ips[LNET_MAX_INTERFACES]));
1314
1315         /* setup the socket AFTER I've received hello (it disables
1316          * SO_LINGER).  I might call back to the acceptor who may want
1317          * to send a protocol version response and then close the
1318          * socket; this ensures the socket only tears down after the
1319          * response has been sent. */
1320         if (rc == 0)
1321                 rc = ksocknal_lib_setup_sock(sock);
1322
1323         write_lock_bh(global_lock);
1324
1325         /* NB my callbacks block while I hold ksnd_global_lock */
1326         ksocknal_lib_set_callback(sock, conn);
1327
1328         if (!active)
1329                 peer_ni->ksnp_accepting--;
1330
1331         write_unlock_bh(global_lock);
1332
1333         if (rc != 0) {
1334                 write_lock_bh(global_lock);
1335                 if (!conn->ksnc_closing) {
1336                         /* could be closed by another thread */
1337                         ksocknal_close_conn_locked(conn, rc);
1338                 }
1339                 write_unlock_bh(global_lock);
1340         } else if (ksocknal_connsock_addref(conn) == 0) {
1341                 /* Allow I/O to proceed. */
1342                 ksocknal_read_callback(conn);
1343                 ksocknal_write_callback(conn);
1344                 ksocknal_connsock_decref(conn);
1345         }
1346
1347         ksocknal_connsock_decref(conn);
1348         ksocknal_conn_decref(conn);
1349         return rc;
1350
1351 failed_2:
1352         if (!peer_ni->ksnp_closing &&
1353             list_empty(&peer_ni->ksnp_conns) &&
1354             list_empty(&peer_ni->ksnp_routes)) {
1355                 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1356                 list_del_init(&peer_ni->ksnp_tx_queue);
1357                 ksocknal_unlink_peer_locked(peer_ni);
1358         }
1359
1360         write_unlock_bh(global_lock);
1361
1362         if (warn != NULL) {
1363                 if (rc < 0)
1364                         CERROR("Not creating conn %s type %d: %s\n",
1365                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1366                 else
1367                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1368                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1369         }
1370
1371         if (!active) {
1372                 if (rc > 0) {
1373                         /* Request retry by replying with CONN_NONE
1374                          * ksnc_proto has been set already */
1375                         conn->ksnc_type = SOCKLND_CONN_NONE;
1376                         hello->kshm_nips = 0;
1377                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1378                 }
1379
1380                 write_lock_bh(global_lock);
1381                 peer_ni->ksnp_accepting--;
1382                 write_unlock_bh(global_lock);
1383         }
1384
1385         ksocknal_txlist_done(ni, &zombies, 1);
1386         ksocknal_peer_decref(peer_ni);
1387
1388 failed_1:
1389         if (hello != NULL)
1390                 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1391                                             kshm_ips[LNET_MAX_INTERFACES]));
1392
1393         LIBCFS_FREE(conn, sizeof(*conn));
1394
1395 failed_0:
1396         sock_release(sock);
1397         return rc;
1398 }
1399
1400 void
1401 ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1402 {
1403         /* This just does the immmediate housekeeping, and queues the
1404          * connection for the reaper to terminate.
1405          * Caller holds ksnd_global_lock exclusively in irq context */
1406         ksock_peer_ni_t      *peer_ni = conn->ksnc_peer;
1407         ksock_route_t     *route;
1408         ksock_conn_t      *conn2;
1409         struct list_head  *tmp;
1410
1411         LASSERT(peer_ni->ksnp_error == 0);
1412         LASSERT(!conn->ksnc_closing);
1413         conn->ksnc_closing = 1;
1414
1415         /* ksnd_deathrow_conns takes over peer_ni's ref */
1416         list_del(&conn->ksnc_list);
1417
1418         route = conn->ksnc_route;
1419         if (route != NULL) {
1420                 /* dissociate conn from route... */
1421                 LASSERT(!route->ksnr_deleted);
1422                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1423
1424                 conn2 = NULL;
1425                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1426                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1427
1428                         if (conn2->ksnc_route == route &&
1429                             conn2->ksnc_type == conn->ksnc_type)
1430                                 break;
1431
1432                         conn2 = NULL;
1433                 }
1434                 if (conn2 == NULL)
1435                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1436
1437                 conn->ksnc_route = NULL;
1438
1439                 ksocknal_route_decref(route);   /* drop conn's ref on route */
1440         }
1441
1442         if (list_empty(&peer_ni->ksnp_conns)) {
1443                 /* No more connections to this peer_ni */
1444
1445                 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1446                                 ksock_tx_t *tx;
1447
1448                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1449
1450                         /* throw them to the last connection...,
1451                          * these TXs will be send to /dev/null by scheduler */
1452                         list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1453                                             tx_list)
1454                                 ksocknal_tx_prep(conn, tx);
1455
1456                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1457                         list_splice_init(&peer_ni->ksnp_tx_queue,
1458                                          &conn->ksnc_tx_queue);
1459                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1460                 }
1461
1462                 /* renegotiate protocol version */
1463                 peer_ni->ksnp_proto = NULL;
1464                 /* stash last conn close reason */
1465                 peer_ni->ksnp_error = error;
1466
1467                 if (list_empty(&peer_ni->ksnp_routes)) {
1468                         /* I've just closed last conn belonging to a
1469                          * peer_ni with no routes to it */
1470                         ksocknal_unlink_peer_locked(peer_ni);
1471                 }
1472         }
1473
1474         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1475
1476         list_add_tail(&conn->ksnc_list,
1477                       &ksocknal_data.ksnd_deathrow_conns);
1478         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1479
1480         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1481 }
1482
1483 void
1484 ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
1485 {
1486         int        notify = 0;
1487         cfs_time_t last_alive = 0;
1488
1489         /* There has been a connection failure or comms error; but I'll only
1490          * tell LNET I think the peer_ni is dead if it's to another kernel and
1491          * there are no connections or connection attempts in existence. */
1492
1493         read_lock(&ksocknal_data.ksnd_global_lock);
1494
1495         if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1496              list_empty(&peer_ni->ksnp_conns) &&
1497              peer_ni->ksnp_accepting == 0 &&
1498              ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1499                 notify = 1;
1500                 last_alive = peer_ni->ksnp_last_alive;
1501         }
1502
1503         read_unlock(&ksocknal_data.ksnd_global_lock);
1504
1505         if (notify)
1506                 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
1507                             last_alive);
1508 }
1509
1510 void
1511 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1512 {
1513         ksock_peer_ni_t  *peer_ni = conn->ksnc_peer;
1514         ksock_tx_t       *tx;
1515         ksock_tx_t       *tmp;
1516         struct list_head  zlist = LIST_HEAD_INIT(zlist);
1517
1518         /* NB safe to finalize TXs because closing of socket will
1519          * abort all buffered data */
1520         LASSERT(conn->ksnc_sock == NULL);
1521
1522         spin_lock(&peer_ni->ksnp_lock);
1523
1524         list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1525                 if (tx->tx_conn != conn)
1526                         continue;
1527
1528                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1529
1530                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1531                 tx->tx_zc_aborted = 1;  /* mark it as not-acked */
1532                 list_del(&tx->tx_zc_list);
1533                 list_add(&tx->tx_zc_list, &zlist);
1534         }
1535
1536         spin_unlock(&peer_ni->ksnp_lock);
1537
1538         while (!list_empty(&zlist)) {
1539                 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1540
1541                 list_del(&tx->tx_zc_list);
1542                 ksocknal_tx_decref(tx);
1543         }
1544 }
1545
1546 void
1547 ksocknal_terminate_conn(ksock_conn_t *conn)
1548 {
1549         /* This gets called by the reaper (guaranteed thread context) to
1550          * disengage the socket from its callbacks and close it.
1551          * ksnc_refcount will eventually hit zero, and then the reaper will
1552          * destroy it. */
1553         ksock_peer_ni_t     *peer_ni = conn->ksnc_peer;
1554         ksock_sched_t    *sched = conn->ksnc_scheduler;
1555         int               failed = 0;
1556
1557         LASSERT(conn->ksnc_closing);
1558
1559         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1560         spin_lock_bh(&sched->kss_lock);
1561
1562         /* a closing conn is always ready to tx */
1563         conn->ksnc_tx_ready = 1;
1564
1565         if (!conn->ksnc_tx_scheduled &&
1566             !list_empty(&conn->ksnc_tx_queue)) {
1567                 list_add_tail(&conn->ksnc_tx_list,
1568                                &sched->kss_tx_conns);
1569                 conn->ksnc_tx_scheduled = 1;
1570                 /* extra ref for scheduler */
1571                 ksocknal_conn_addref(conn);
1572
1573                 wake_up (&sched->kss_waitq);
1574         }
1575
1576         spin_unlock_bh(&sched->kss_lock);
1577
1578         /* serialise with callbacks */
1579         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1580
1581         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1582
1583         /* OK, so this conn may not be completely disengaged from its
1584          * scheduler yet, but it _has_ committed to terminate... */
1585         conn->ksnc_scheduler->kss_nconns--;
1586
1587         if (peer_ni->ksnp_error != 0) {
1588                 /* peer_ni's last conn closed in error */
1589                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1590                 failed = 1;
1591                 peer_ni->ksnp_error = 0;     /* avoid multiple notifications */
1592         }
1593
1594         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1595
1596         if (failed)
1597                 ksocknal_peer_failed(peer_ni);
1598
1599         /* The socket is closed on the final put; either here, or in
1600          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1601          * when the connection was established, this will close the socket
1602          * immediately, aborting anything buffered in it. Any hung
1603          * zero-copy transmits will therefore complete in finite time. */
1604         ksocknal_connsock_decref(conn);
1605 }
1606
1607 void
1608 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1609 {
1610         /* Queue the conn for the reaper to destroy */
1611
1612         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1613         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1614
1615         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1616         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1617
1618         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1619 }
1620
1621 void
1622 ksocknal_destroy_conn (ksock_conn_t *conn)
1623 {
1624         cfs_time_t      last_rcv;
1625
1626         /* Final coup-de-grace of the reaper */
1627         CDEBUG (D_NET, "connection %p\n", conn);
1628
1629         LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1630         LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1631         LASSERT (conn->ksnc_sock == NULL);
1632         LASSERT (conn->ksnc_route == NULL);
1633         LASSERT (!conn->ksnc_tx_scheduled);
1634         LASSERT (!conn->ksnc_rx_scheduled);
1635         LASSERT(list_empty(&conn->ksnc_tx_queue));
1636
1637         /* complete current receive if any */
1638         switch (conn->ksnc_rx_state) {
1639         case SOCKNAL_RX_LNET_PAYLOAD:
1640                 last_rcv = conn->ksnc_rx_deadline -
1641                            cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1642                 CERROR("Completing partial receive from %s[%d], "
1643                        "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1644                        "last alive is %ld secs ago\n",
1645                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1646                        &conn->ksnc_ipaddr, conn->ksnc_port,
1647                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1648                        cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(),
1649                                         last_rcv)));
1650                 lnet_finalize (conn->ksnc_peer->ksnp_ni,
1651                                conn->ksnc_cookie, -EIO);
1652                 break;
1653         case SOCKNAL_RX_LNET_HEADER:
1654                 if (conn->ksnc_rx_started)
1655                         CERROR("Incomplete receive of lnet header from %s, "
1656                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1657                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1658                                &conn->ksnc_ipaddr, conn->ksnc_port,
1659                                conn->ksnc_proto->pro_version);
1660                 break;
1661         case SOCKNAL_RX_KSM_HEADER:
1662                 if (conn->ksnc_rx_started)
1663                         CERROR("Incomplete receive of ksock message from %s, "
1664                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1665                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1666                                &conn->ksnc_ipaddr, conn->ksnc_port,
1667                                conn->ksnc_proto->pro_version);
1668                 break;
1669         case SOCKNAL_RX_SLOP:
1670                 if (conn->ksnc_rx_started)
1671                         CERROR("Incomplete receive of slops from %s, "
1672                                "ip %pI4h:%d, with error\n",
1673                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1674                                &conn->ksnc_ipaddr, conn->ksnc_port);
1675                break;
1676         default:
1677                 LBUG ();
1678                 break;
1679         }
1680
1681         ksocknal_peer_decref(conn->ksnc_peer);
1682
1683         LIBCFS_FREE (conn, sizeof (*conn));
1684 }
1685
1686 int
1687 ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why)
1688 {
1689         ksock_conn_t       *conn;
1690         struct list_head         *ctmp;
1691         struct list_head         *cnxt;
1692         int                 count = 0;
1693
1694         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1695                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1696
1697                 if (ipaddr == 0 ||
1698                     conn->ksnc_ipaddr == ipaddr) {
1699                         count++;
1700                         ksocknal_close_conn_locked (conn, why);
1701                 }
1702         }
1703
1704         return (count);
1705 }
1706
1707 int
1708 ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1709 {
1710         ksock_peer_ni_t     *peer_ni = conn->ksnc_peer;
1711         __u32             ipaddr = conn->ksnc_ipaddr;
1712         int               count;
1713
1714         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1715
1716         count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1717
1718         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1719
1720         return (count);
1721 }
1722
1723 int
1724 ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
1725 {
1726         ksock_peer_ni_t       *peer_ni;
1727         struct list_head         *ptmp;
1728         struct list_head         *pnxt;
1729         int                 lo;
1730         int                 hi;
1731         int                 i;
1732         int                 count = 0;
1733
1734         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1735
1736         if (id.nid != LNET_NID_ANY)
1737                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1738         else {
1739                 lo = 0;
1740                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1741         }
1742
1743         for (i = lo; i <= hi; i++) {
1744                 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1745
1746                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
1747
1748                         if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1749                               (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1750                                 continue;
1751
1752                         count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1753                 }
1754         }
1755
1756         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1757
1758         /* wildcards always succeed */
1759         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1760                 return (0);
1761
1762         return (count == 0 ? -ENOENT : 0);
1763 }
1764
1765 void
1766 ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
1767 {
1768         /* The router is telling me she's been notified of a change in
1769          * gateway state.... */
1770         lnet_process_id_t  id = {0};
1771
1772         id.nid = gw_nid;
1773         id.pid = LNET_PID_ANY;
1774
1775         CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1776                 alive ? "up" : "down");
1777
1778         if (!alive) {
1779                 /* If the gateway crashed, close all open connections... */
1780                 ksocknal_close_matching_conns (id, 0);
1781                 return;
1782         }
1783
1784         /* ...otherwise do nothing.  We can only establish new connections
1785          * if we have autroutes, and these connect on demand. */
1786 }
1787
1788 void
1789 ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1790 {
1791         int connect = 1;
1792         time64_t last_alive = 0;
1793         time64_t now = ktime_get_real_seconds();
1794         ksock_peer_ni_t *peer_ni = NULL;
1795         rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1796         lnet_process_id_t id = {
1797                 .nid = nid,
1798                 .pid = LNET_PID_LUSTRE,
1799         };
1800
1801         read_lock(glock);
1802
1803         peer_ni = ksocknal_find_peer_locked(ni, id);
1804         if (peer_ni != NULL) {
1805                 struct list_head       *tmp;
1806                 ksock_conn_t     *conn;
1807                 int               bufnob;
1808
1809                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1810                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1811                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1812
1813                         if (bufnob < conn->ksnc_tx_bufnob) {
1814                                 /* something got ACKed */
1815                                 conn->ksnc_tx_deadline =
1816                                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1817                                 peer_ni->ksnp_last_alive = now;
1818                                 conn->ksnc_tx_bufnob = bufnob;
1819                         }
1820                 }
1821
1822                 last_alive = peer_ni->ksnp_last_alive;
1823                 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1824                         connect = 0;
1825         }
1826
1827         read_unlock(glock);
1828
1829         if (last_alive != 0)
1830                 *when = last_alive;
1831
1832         CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago, connect %d\n",
1833                libcfs_nid2str(nid), peer_ni,
1834                last_alive ? cfs_duration_sec(now - last_alive) : -1,
1835                connect);
1836
1837         if (!connect)
1838                 return;
1839
1840         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1841
1842         write_lock_bh(glock);
1843
1844         peer_ni = ksocknal_find_peer_locked(ni, id);
1845         if (peer_ni != NULL)
1846                 ksocknal_launch_all_connections_locked(peer_ni);
1847
1848         write_unlock_bh(glock);
1849         return;
1850 }
1851
1852 static void
1853 ksocknal_push_peer (ksock_peer_ni_t *peer_ni)
1854 {
1855         int               index;
1856         int               i;
1857         struct list_head       *tmp;
1858         ksock_conn_t     *conn;
1859
1860         for (index = 0; ; index++) {
1861                 read_lock(&ksocknal_data.ksnd_global_lock);
1862
1863                 i = 0;
1864                 conn = NULL;
1865
1866                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1867                         if (i++ == index) {
1868                                 conn = list_entry(tmp, ksock_conn_t,
1869                                                        ksnc_list);
1870                                 ksocknal_conn_addref(conn);
1871                                 break;
1872                         }
1873                 }
1874
1875                 read_unlock(&ksocknal_data.ksnd_global_lock);
1876
1877                 if (conn == NULL)
1878                         break;
1879
1880                 ksocknal_lib_push_conn (conn);
1881                 ksocknal_conn_decref(conn);
1882         }
1883 }
1884
1885 static int
1886 ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
1887 {
1888         struct list_head *start;
1889         struct list_head *end;
1890         struct list_head *tmp;
1891         int               rc = -ENOENT;
1892         unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
1893
1894         if (id.nid == LNET_NID_ANY) {
1895                 start = &ksocknal_data.ksnd_peers[0];
1896                 end = &ksocknal_data.ksnd_peers[hsize - 1];
1897         } else {
1898                 start = end = ksocknal_nid2peerlist(id.nid);
1899         }
1900
1901         for (tmp = start; tmp <= end; tmp++) {
1902                 int     peer_off; /* searching offset in peer_ni hash table */
1903
1904                 for (peer_off = 0; ; peer_off++) {
1905                         ksock_peer_ni_t *peer_ni;
1906                         int           i = 0;
1907
1908                         read_lock(&ksocknal_data.ksnd_global_lock);
1909                         list_for_each_entry(peer_ni, tmp, ksnp_list) {
1910                                 if (!((id.nid == LNET_NID_ANY ||
1911                                        id.nid == peer_ni->ksnp_id.nid) &&
1912                                       (id.pid == LNET_PID_ANY ||
1913                                        id.pid == peer_ni->ksnp_id.pid)))
1914                                         continue;
1915
1916                                 if (i++ == peer_off) {
1917                                         ksocknal_peer_addref(peer_ni);
1918                                         break;
1919                                 }
1920                         }
1921                         read_unlock(&ksocknal_data.ksnd_global_lock);
1922
1923                         if (i == 0) /* no match */
1924                                 break;
1925
1926                         rc = 0;
1927                         ksocknal_push_peer(peer_ni);
1928                         ksocknal_peer_decref(peer_ni);
1929                 }
1930         }
1931         return rc;
1932 }
1933
1934 static int
1935 ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1936 {
1937         ksock_net_t       *net = ni->ni_data;
1938         ksock_interface_t *iface;
1939         int                rc;
1940         int                i;
1941         int                j;
1942         struct list_head        *ptmp;
1943         ksock_peer_ni_t      *peer_ni;
1944         struct list_head        *rtmp;
1945         ksock_route_t     *route;
1946
1947         if (ipaddress == 0 ||
1948             netmask == 0)
1949                 return (-EINVAL);
1950
1951         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1952
1953         iface = ksocknal_ip2iface(ni, ipaddress);
1954         if (iface != NULL) {
1955                 /* silently ignore dups */
1956                 rc = 0;
1957         } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1958                 rc = -ENOSPC;
1959         } else {
1960                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1961
1962                 iface->ksni_ipaddr = ipaddress;
1963                 iface->ksni_netmask = netmask;
1964                 iface->ksni_nroutes = 0;
1965                 iface->ksni_npeers = 0;
1966
1967                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1968                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1969                                 peer_ni = list_entry(ptmp, ksock_peer_ni_t,
1970                                                       ksnp_list);
1971
1972                                 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1973                                         if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1974                                                 iface->ksni_npeers++;
1975
1976                                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1977                                         route = list_entry(rtmp,
1978                                                                ksock_route_t,
1979                                                                ksnr_list);
1980
1981                                         if (route->ksnr_myipaddr == ipaddress)
1982                                                 iface->ksni_nroutes++;
1983                                 }
1984                         }
1985                 }
1986
1987                 rc = 0;
1988                 /* NB only new connections will pay attention to the new interface! */
1989         }
1990
1991         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1992
1993         return (rc);
1994 }
1995
1996 static void
1997 ksocknal_peer_del_interface_locked(ksock_peer_ni_t *peer_ni, __u32 ipaddr)
1998 {
1999         struct list_head         *tmp;
2000         struct list_head         *nxt;
2001         ksock_route_t      *route;
2002         ksock_conn_t       *conn;
2003         int                 i;
2004         int                 j;
2005
2006         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2007                 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2008                         for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2009                                 peer_ni->ksnp_passive_ips[j-1] =
2010                                         peer_ni->ksnp_passive_ips[j];
2011                         peer_ni->ksnp_n_passive_ips--;
2012                         break;
2013                 }
2014
2015         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2016                 route = list_entry(tmp, ksock_route_t, ksnr_list);
2017
2018                 if (route->ksnr_myipaddr != ipaddr)
2019                         continue;
2020
2021                 if (route->ksnr_share_count != 0) {
2022                         /* Manually created; keep, but unbind */
2023                         route->ksnr_myipaddr = 0;
2024                 } else {
2025                         ksocknal_del_route_locked(route);
2026                 }
2027         }
2028
2029         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2030                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2031
2032                 if (conn->ksnc_myipaddr == ipaddr)
2033                         ksocknal_close_conn_locked (conn, 0);
2034         }
2035 }
2036
2037 static int
2038 ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2039 {
2040         ksock_net_t       *net = ni->ni_data;
2041         int                rc = -ENOENT;
2042         struct list_head        *tmp;
2043         struct list_head        *nxt;
2044         ksock_peer_ni_t      *peer_ni;
2045         __u32              this_ip;
2046         int                i;
2047         int                j;
2048
2049         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2050
2051         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2052                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2053
2054                 if (!(ipaddress == 0 ||
2055                       ipaddress == this_ip))
2056                         continue;
2057
2058                 rc = 0;
2059
2060                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2061                         net->ksnn_interfaces[j-1] =
2062                                 net->ksnn_interfaces[j];
2063
2064                 net->ksnn_ninterfaces--;
2065
2066                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2067                         list_for_each_safe(tmp, nxt,
2068                                                &ksocknal_data.ksnd_peers[j]) {
2069                                 peer_ni = list_entry(tmp, ksock_peer_ni_t,
2070                                                       ksnp_list);
2071
2072                                 if (peer_ni->ksnp_ni != ni)
2073                                         continue;
2074
2075                                 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2076                         }
2077                 }
2078         }
2079
2080         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2081
2082         return (rc);
2083 }
2084
2085 int
2086 ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2087 {
2088         lnet_process_id_t id = {0};
2089         struct libcfs_ioctl_data *data = arg;
2090         int rc;
2091
2092         switch(cmd) {
2093         case IOC_LIBCFS_GET_INTERFACE: {
2094                 ksock_net_t       *net = ni->ni_data;
2095                 ksock_interface_t *iface;
2096
2097                 read_lock(&ksocknal_data.ksnd_global_lock);
2098
2099                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2100                         rc = -ENOENT;
2101                 } else {
2102                         rc = 0;
2103                         iface = &net->ksnn_interfaces[data->ioc_count];
2104
2105                         data->ioc_u32[0] = iface->ksni_ipaddr;
2106                         data->ioc_u32[1] = iface->ksni_netmask;
2107                         data->ioc_u32[2] = iface->ksni_npeers;
2108                         data->ioc_u32[3] = iface->ksni_nroutes;
2109                 }
2110
2111                 read_unlock(&ksocknal_data.ksnd_global_lock);
2112                 return rc;
2113         }
2114
2115         case IOC_LIBCFS_ADD_INTERFACE:
2116                 return ksocknal_add_interface(ni,
2117                                               data->ioc_u32[0], /* IP address */
2118                                               data->ioc_u32[1]); /* net mask */
2119
2120         case IOC_LIBCFS_DEL_INTERFACE:
2121                 return ksocknal_del_interface(ni,
2122                                               data->ioc_u32[0]); /* IP address */
2123
2124         case IOC_LIBCFS_GET_PEER: {
2125                 __u32            myip = 0;
2126                 __u32            ip = 0;
2127                 int              port = 0;
2128                 int              conn_count = 0;
2129                 int              share_count = 0;
2130
2131                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2132                                             &id, &myip, &ip, &port,
2133                                             &conn_count,  &share_count);
2134                 if (rc != 0)
2135                         return rc;
2136
2137                 data->ioc_nid    = id.nid;
2138                 data->ioc_count  = share_count;
2139                 data->ioc_u32[0] = ip;
2140                 data->ioc_u32[1] = port;
2141                 data->ioc_u32[2] = myip;
2142                 data->ioc_u32[3] = conn_count;
2143                 data->ioc_u32[4] = id.pid;
2144                 return 0;
2145         }
2146
2147         case IOC_LIBCFS_ADD_PEER:
2148                 id.nid = data->ioc_nid;
2149                 id.pid = LNET_PID_LUSTRE;
2150                 return ksocknal_add_peer (ni, id,
2151                                           data->ioc_u32[0], /* IP */
2152                                           data->ioc_u32[1]); /* port */
2153
2154         case IOC_LIBCFS_DEL_PEER:
2155                 id.nid = data->ioc_nid;
2156                 id.pid = LNET_PID_ANY;
2157                 return ksocknal_del_peer (ni, id,
2158                                           data->ioc_u32[0]); /* IP */
2159
2160         case IOC_LIBCFS_GET_CONN: {
2161                 int           txmem;
2162                 int           rxmem;
2163                 int           nagle;
2164                 ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2165
2166                 if (conn == NULL)
2167                         return -ENOENT;
2168
2169                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2170
2171                 data->ioc_count  = txmem;
2172                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2173                 data->ioc_flags  = nagle;
2174                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2175                 data->ioc_u32[1] = conn->ksnc_port;
2176                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2177                 data->ioc_u32[3] = conn->ksnc_type;
2178                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2179                 data->ioc_u32[5] = rxmem;
2180                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2181                 ksocknal_conn_decref(conn);
2182                 return 0;
2183         }
2184
2185         case IOC_LIBCFS_CLOSE_CONNECTION:
2186                 id.nid = data->ioc_nid;
2187                 id.pid = LNET_PID_ANY;
2188                 return ksocknal_close_matching_conns (id,
2189                                                       data->ioc_u32[0]);
2190
2191         case IOC_LIBCFS_REGISTER_MYNID:
2192                 /* Ignore if this is a noop */
2193                 if (data->ioc_nid == ni->ni_nid)
2194                         return 0;
2195
2196                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2197                        libcfs_nid2str(data->ioc_nid),
2198                        libcfs_nid2str(ni->ni_nid));
2199                 return -EINVAL;
2200
2201         case IOC_LIBCFS_PUSH_CONNECTION:
2202                 id.nid = data->ioc_nid;
2203                 id.pid = LNET_PID_ANY;
2204                 return ksocknal_push(ni, id);
2205
2206         default:
2207                 return -EINVAL;
2208         }
2209         /* not reached */
2210 }
2211
2212 static void
2213 ksocknal_free_buffers (void)
2214 {
2215         LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2216
2217         if (ksocknal_data.ksnd_sched_info != NULL) {
2218                 struct ksock_sched_info *info;
2219                 int                     i;
2220
2221                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2222                         if (info->ksi_scheds != NULL) {
2223                                 LIBCFS_FREE(info->ksi_scheds,
2224                                             info->ksi_nthreads_max *
2225                                             sizeof(info->ksi_scheds[0]));
2226                         }
2227                 }
2228                 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2229         }
2230
2231         LIBCFS_FREE (ksocknal_data.ksnd_peers,
2232                      sizeof(struct list_head) *
2233                      ksocknal_data.ksnd_peer_hash_size);
2234
2235         spin_lock(&ksocknal_data.ksnd_tx_lock);
2236
2237         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2238                 struct list_head        zlist;
2239                 ksock_tx_t      *tx;
2240
2241                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2242                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2243                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2244
2245                 while (!list_empty(&zlist)) {
2246                         tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2247                         list_del(&tx->tx_list);
2248                         LIBCFS_FREE(tx, tx->tx_desc_size);
2249                 }
2250         } else {
2251                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2252         }
2253 }
2254
2255 static void
2256 ksocknal_base_shutdown(void)
2257 {
2258         struct ksock_sched_info *info;
2259         ksock_sched_t           *sched;
2260         int                     i;
2261         int                     j;
2262
2263         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2264                atomic_read (&libcfs_kmemory));
2265         LASSERT (ksocknal_data.ksnd_nnets == 0);
2266
2267         switch (ksocknal_data.ksnd_init) {
2268         default:
2269                 LASSERT (0);
2270
2271         case SOCKNAL_INIT_ALL:
2272         case SOCKNAL_INIT_DATA:
2273                 LASSERT (ksocknal_data.ksnd_peers != NULL);
2274                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2275                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2276                 }
2277
2278                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2279                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2280                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2281                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2282                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2283
2284                 if (ksocknal_data.ksnd_sched_info != NULL) {
2285                         cfs_percpt_for_each(info, i,
2286                                             ksocknal_data.ksnd_sched_info) {
2287                                 if (info->ksi_scheds == NULL)
2288                                         continue;
2289
2290                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2291
2292                                         sched = &info->ksi_scheds[j];
2293                                         LASSERT(list_empty(&sched->\
2294                                                                kss_tx_conns));
2295                                         LASSERT(list_empty(&sched->\
2296                                                                kss_rx_conns));
2297                                         LASSERT(list_empty(&sched-> \
2298                                                   kss_zombie_noop_txs));
2299                                         LASSERT(sched->kss_nconns == 0);
2300                                 }
2301                         }
2302                 }
2303
2304                 /* flag threads to terminate; wake and wait for them to die */
2305                 ksocknal_data.ksnd_shuttingdown = 1;
2306                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2307                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2308
2309                 if (ksocknal_data.ksnd_sched_info != NULL) {
2310                         cfs_percpt_for_each(info, i,
2311                                             ksocknal_data.ksnd_sched_info) {
2312                                 if (info->ksi_scheds == NULL)
2313                                         continue;
2314
2315                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2316                                         sched = &info->ksi_scheds[j];
2317                                         wake_up_all(&sched->kss_waitq);
2318                                 }
2319                         }
2320                 }
2321
2322                 i = 4;
2323                 read_lock(&ksocknal_data.ksnd_global_lock);
2324                 while (ksocknal_data.ksnd_nthreads != 0) {
2325                         i++;
2326                         /* power of 2? */
2327                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2328                                 "waiting for %d threads to terminate\n",
2329                                 ksocknal_data.ksnd_nthreads);
2330                         read_unlock(&ksocknal_data.ksnd_global_lock);
2331                         set_current_state(TASK_UNINTERRUPTIBLE);
2332                         schedule_timeout(cfs_time_seconds(1));
2333                         read_lock(&ksocknal_data.ksnd_global_lock);
2334                 }
2335                 read_unlock(&ksocknal_data.ksnd_global_lock);
2336
2337                 ksocknal_free_buffers();
2338
2339                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2340                 break;
2341         }
2342
2343         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2344                atomic_read (&libcfs_kmemory));
2345
2346         module_put(THIS_MODULE);
2347 }
2348
2349 static __u64 ksocknal_new_incarnation(void)
2350 {
2351         struct timeval tv;
2352
2353         /* The incarnation number is the time this module loaded and it
2354          * identifies this particular instance of the socknal.  Hopefully
2355          * we won't be able to reboot more frequently than 1MHz for the
2356          * forseeable future :) */
2357
2358         do_gettimeofday(&tv);
2359
2360         return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2361 }
2362
2363 static int
2364 ksocknal_base_startup(void)
2365 {
2366         struct ksock_sched_info *info;
2367         int                     rc;
2368         int                     i;
2369
2370         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2371         LASSERT (ksocknal_data.ksnd_nnets == 0);
2372
2373         memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2374
2375         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2376         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2377                      sizeof(struct list_head) *
2378                      ksocknal_data.ksnd_peer_hash_size);
2379         if (ksocknal_data.ksnd_peers == NULL)
2380                 return -ENOMEM;
2381
2382         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2383                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2384
2385         rwlock_init(&ksocknal_data.ksnd_global_lock);
2386         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2387
2388         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2389         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2390         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2391         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2392         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2393
2394         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2395         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2396         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2397         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2398
2399         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2400         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2401
2402         /* NB memset above zeros whole of ksocknal_data */
2403
2404         /* flag lists/ptrs/locks initialised */
2405         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2406         try_module_get(THIS_MODULE);
2407
2408         ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2409                                                          sizeof(*info));
2410         if (ksocknal_data.ksnd_sched_info == NULL)
2411                 goto failed;
2412
2413         cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2414                 ksock_sched_t   *sched;
2415                 int             nthrs;
2416
2417                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2418                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2419                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2420                 } else {
2421                         /* max to half of CPUs, assume another half should be
2422                          * reserved for upper layer modules */
2423                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2424                 }
2425
2426                 info->ksi_nthreads_max = nthrs;
2427                 info->ksi_cpt = i;
2428
2429                 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2430                                  info->ksi_nthreads_max * sizeof(*sched));
2431                 if (info->ksi_scheds == NULL)
2432                         goto failed;
2433
2434                 for (; nthrs > 0; nthrs--) {
2435                         sched = &info->ksi_scheds[nthrs - 1];
2436
2437                         sched->kss_info = info;
2438                         spin_lock_init(&sched->kss_lock);
2439                         INIT_LIST_HEAD(&sched->kss_rx_conns);
2440                         INIT_LIST_HEAD(&sched->kss_tx_conns);
2441                         INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2442                         init_waitqueue_head(&sched->kss_waitq);
2443                 }
2444         }
2445
2446         ksocknal_data.ksnd_connd_starting         = 0;
2447         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2448         ksocknal_data.ksnd_connd_starting_stamp   = ktime_get_real_seconds();
2449         /* must have at least 2 connds to remain responsive to accepts while
2450          * connecting */
2451         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2452                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2453
2454         if (*ksocknal_tunables.ksnd_nconnds_max <
2455             *ksocknal_tunables.ksnd_nconnds) {
2456                 ksocknal_tunables.ksnd_nconnds_max =
2457                         ksocknal_tunables.ksnd_nconnds;
2458         }
2459
2460         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2461                 char name[16];
2462                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2463                 ksocknal_data.ksnd_connd_starting++;
2464                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2465
2466
2467                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2468                 rc = ksocknal_thread_start(ksocknal_connd,
2469                                            (void *)((uintptr_t)i), name);
2470                 if (rc != 0) {
2471                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2472                         ksocknal_data.ksnd_connd_starting--;
2473                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2474                         CERROR("Can't spawn socknal connd: %d\n", rc);
2475                         goto failed;
2476                 }
2477         }
2478
2479         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2480         if (rc != 0) {
2481                 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2482                 goto failed;
2483         }
2484
2485         /* flag everything initialised */
2486         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2487
2488         return 0;
2489
2490  failed:
2491         ksocknal_base_shutdown();
2492         return -ENETDOWN;
2493 }
2494
2495 static void
2496 ksocknal_debug_peerhash (lnet_ni_t *ni)
2497 {
2498         ksock_peer_ni_t *peer_ni = NULL;
2499         struct list_head        *tmp;
2500         int             i;
2501
2502         read_lock(&ksocknal_data.ksnd_global_lock);
2503
2504         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2505                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2506                         peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
2507
2508                         if (peer_ni->ksnp_ni == ni) break;
2509
2510                         peer_ni = NULL;
2511                 }
2512         }
2513
2514         if (peer_ni != NULL) {
2515                 ksock_route_t *route;
2516                 ksock_conn_t  *conn;
2517
2518                 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2519                        "closing %d, accepting %d, err %d, zcookie %llu, "
2520                        "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2521                        atomic_read(&peer_ni->ksnp_refcount),
2522                        peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2523                        peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2524                        peer_ni->ksnp_zc_next_cookie,
2525                        !list_empty(&peer_ni->ksnp_tx_queue),
2526                        !list_empty(&peer_ni->ksnp_zc_req_list));
2527
2528                 list_for_each(tmp, &peer_ni->ksnp_routes) {
2529                         route = list_entry(tmp, ksock_route_t, ksnr_list);
2530                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2531                                "del %d\n", atomic_read(&route->ksnr_refcount),
2532                                route->ksnr_scheduled, route->ksnr_connecting,
2533                                route->ksnr_connected, route->ksnr_deleted);
2534                 }
2535
2536                 list_for_each(tmp, &peer_ni->ksnp_conns) {
2537                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2538                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2539                                atomic_read(&conn->ksnc_conn_refcount),
2540                                atomic_read(&conn->ksnc_sock_refcount),
2541                                conn->ksnc_type, conn->ksnc_closing);
2542                 }
2543         }
2544
2545         read_unlock(&ksocknal_data.ksnd_global_lock);
2546         return;
2547 }
2548
2549 void
2550 ksocknal_shutdown (lnet_ni_t *ni)
2551 {
2552         ksock_net_t      *net = ni->ni_data;
2553         int               i;
2554         lnet_process_id_t anyid = {0};
2555
2556         anyid.nid =  LNET_NID_ANY;
2557         anyid.pid =  LNET_PID_ANY;
2558
2559         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2560         LASSERT(ksocknal_data.ksnd_nnets > 0);
2561
2562         spin_lock_bh(&net->ksnn_lock);
2563         net->ksnn_shutdown = 1;                 /* prevent new peers */
2564         spin_unlock_bh(&net->ksnn_lock);
2565
2566         /* Delete all peers */
2567         ksocknal_del_peer(ni, anyid, 0);
2568
2569         /* Wait for all peer_ni state to clean up */
2570         i = 2;
2571         spin_lock_bh(&net->ksnn_lock);
2572         while (net->ksnn_npeers != 0) {
2573                 spin_unlock_bh(&net->ksnn_lock);
2574
2575                 i++;
2576                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2577                        "waiting for %d peers to disconnect\n",
2578                        net->ksnn_npeers);
2579                 set_current_state(TASK_UNINTERRUPTIBLE);
2580                 schedule_timeout(cfs_time_seconds(1));
2581
2582                 ksocknal_debug_peerhash(ni);
2583
2584                 spin_lock_bh(&net->ksnn_lock);
2585         }
2586         spin_unlock_bh(&net->ksnn_lock);
2587
2588         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2589                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2590                 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2591         }
2592
2593         list_del(&net->ksnn_list);
2594         LIBCFS_FREE(net, sizeof(*net));
2595
2596         ksocknal_data.ksnd_nnets--;
2597         if (ksocknal_data.ksnd_nnets == 0)
2598                 ksocknal_base_shutdown();
2599 }
2600
2601 static int
2602 ksocknal_enumerate_interfaces(ksock_net_t *net)
2603 {
2604         char      **names;
2605         int         i;
2606         int         j;
2607         int         rc;
2608         int         n;
2609
2610         n = lnet_ipif_enumerate(&names);
2611         if (n <= 0) {
2612                 CERROR("Can't enumerate interfaces: %d\n", n);
2613                 return n;
2614         }
2615
2616         for (i = j = 0; i < n; i++) {
2617                 int        up;
2618                 __u32      ip;
2619                 __u32      mask;
2620
2621                 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2622                         continue;
2623
2624                 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2625                 if (rc != 0) {
2626                         CWARN("Can't get interface %s info: %d\n",
2627                               names[i], rc);
2628                         continue;
2629                 }
2630
2631                 if (!up) {
2632                         CWARN("Ignoring interface %s (down)\n",
2633                               names[i]);
2634                         continue;
2635                 }
2636
2637                 if (j == LNET_MAX_INTERFACES) {
2638                         CWARN("Ignoring interface %s (too many interfaces)\n",
2639                               names[i]);
2640                         continue;
2641                 }
2642
2643                 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2644                 net->ksnn_interfaces[j].ksni_netmask = mask;
2645                 strlcpy(net->ksnn_interfaces[j].ksni_name,
2646                         names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2647                 j++;
2648         }
2649
2650         lnet_ipif_free_enumeration(names, n);
2651
2652         if (j == 0)
2653                 CERROR("Can't find any usable interfaces\n");
2654
2655         return j;
2656 }
2657
2658 static int
2659 ksocknal_search_new_ipif(ksock_net_t *net)
2660 {
2661         int     new_ipif = 0;
2662         int     i;
2663
2664         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2665                 char            *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2666                 char            *colon = strchr(ifnam, ':');
2667                 int             found  = 0;
2668                 ksock_net_t     *tmp;
2669                 int             j;
2670
2671                 if (colon != NULL) /* ignore alias device */
2672                         *colon = 0;
2673
2674                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2675                                         ksnn_list) {
2676                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2677                                 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2678                                              ksni_name[0];
2679                                 char *colon2 = strchr(ifnam2, ':');
2680
2681                                 if (colon2 != NULL)
2682                                         *colon2 = 0;
2683
2684                                 found = strcmp(ifnam, ifnam2) == 0;
2685                                 if (colon2 != NULL)
2686                                         *colon2 = ':';
2687                         }
2688                         if (found)
2689                                 break;
2690                 }
2691
2692                 new_ipif += !found;
2693                 if (colon != NULL)
2694                         *colon = ':';
2695         }
2696
2697         return new_ipif;
2698 }
2699
2700 static int
2701 ksocknal_start_schedulers(struct ksock_sched_info *info)
2702 {
2703         int     nthrs;
2704         int     rc = 0;
2705         int     i;
2706
2707         if (info->ksi_nthreads == 0) {
2708                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2709                         nthrs = info->ksi_nthreads_max;
2710                 } else {
2711                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2712                                                info->ksi_cpt);
2713                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2714                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2715                 }
2716                 nthrs = min(nthrs, info->ksi_nthreads_max);
2717         } else {
2718                 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2719                 /* increase two threads if there is new interface */
2720                 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2721         }
2722
2723         for (i = 0; i < nthrs; i++) {
2724                 long            id;
2725                 char            name[20];
2726                 ksock_sched_t   *sched;
2727                 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2728                 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2729                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2730                          info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2731
2732                 rc = ksocknal_thread_start(ksocknal_scheduler,
2733                                            (void *)id, name);
2734                 if (rc == 0)
2735                         continue;
2736
2737                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2738                        info->ksi_cpt, info->ksi_nthreads + i, rc);
2739                 break;
2740         }
2741
2742         info->ksi_nthreads += i;
2743         return rc;
2744 }
2745
2746 static int
2747 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2748 {
2749         int     newif = ksocknal_search_new_ipif(net);
2750         int     rc;
2751         int     i;
2752
2753         if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2754                 return -EINVAL;
2755
2756         for (i = 0; i < ncpts; i++) {
2757                 struct ksock_sched_info *info;
2758                 int cpt = (cpts == NULL) ? i : cpts[i];
2759
2760                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2761                 info = ksocknal_data.ksnd_sched_info[cpt];
2762
2763                 if (!newif && info->ksi_nthreads > 0)
2764                         continue;
2765
2766                 rc = ksocknal_start_schedulers(info);
2767                 if (rc != 0)
2768                         return rc;
2769         }
2770         return 0;
2771 }
2772
2773 int
2774 ksocknal_startup (lnet_ni_t *ni)
2775 {
2776         ksock_net_t  *net;
2777         int           rc;
2778         int           i;
2779         struct net_device *net_dev;
2780         int node_id;
2781
2782         LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2783
2784         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2785                 rc = ksocknal_base_startup();
2786                 if (rc != 0)
2787                         return rc;
2788         }
2789
2790         LIBCFS_ALLOC(net, sizeof(*net));
2791         if (net == NULL)
2792                 goto fail_0;
2793
2794         spin_lock_init(&net->ksnn_lock);
2795         net->ksnn_incarnation = ksocknal_new_incarnation();
2796         ni->ni_data = net;
2797         if (!ni->ni_net->net_tunables_set) {
2798                 ni->ni_net->net_tunables.lct_peer_timeout =
2799                         *ksocknal_tunables.ksnd_peertimeout;
2800                 ni->ni_net->net_tunables.lct_max_tx_credits =
2801                         *ksocknal_tunables.ksnd_credits;
2802                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2803                         *ksocknal_tunables.ksnd_peertxcredits;
2804                 ni->ni_net->net_tunables.lct_peer_rtr_credits =
2805                         *ksocknal_tunables.ksnd_peerrtrcredits;
2806                 ni->ni_net->net_tunables_set = true;
2807         }
2808
2809
2810         if (ni->ni_interfaces[0] == NULL) {
2811                 rc = ksocknal_enumerate_interfaces(net);
2812                 if (rc <= 0)
2813                         goto fail_1;
2814
2815                 net->ksnn_ninterfaces = 1;
2816         } else {
2817                 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2818                         int    up;
2819
2820                         if (ni->ni_interfaces[i] == NULL)
2821                                 break;
2822
2823                         rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2824                                 &net->ksnn_interfaces[i].ksni_ipaddr,
2825                                 &net->ksnn_interfaces[i].ksni_netmask);
2826
2827                         if (rc != 0) {
2828                                 CERROR("Can't get interface %s info: %d\n",
2829                                        ni->ni_interfaces[i], rc);
2830                                 goto fail_1;
2831                         }
2832
2833                         if (!up) {
2834                                 CERROR("Interface %s is down\n",
2835                                        ni->ni_interfaces[i]);
2836                                 goto fail_1;
2837                         }
2838
2839                         strlcpy(net->ksnn_interfaces[i].ksni_name,
2840                                 ni->ni_interfaces[i],
2841                                 sizeof(net->ksnn_interfaces[i].ksni_name));
2842
2843                 }
2844                 net->ksnn_ninterfaces = i;
2845         }
2846
2847         net_dev = dev_get_by_name(&init_net,
2848                                   net->ksnn_interfaces[0].ksni_name);
2849         if (net_dev != NULL) {
2850                 node_id = dev_to_node(&net_dev->dev);
2851                 ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
2852                 dev_put(net_dev);
2853         } else {
2854                 ni->ni_dev_cpt = CFS_CPT_ANY;
2855         }
2856
2857         /* call it before add it to ksocknal_data.ksnd_nets */
2858         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2859         if (rc != 0)
2860                 goto fail_1;
2861
2862         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2863                                 net->ksnn_interfaces[0].ksni_ipaddr);
2864         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2865
2866         ksocknal_data.ksnd_nnets++;
2867
2868         return 0;
2869
2870  fail_1:
2871         LIBCFS_FREE(net, sizeof(*net));
2872  fail_0:
2873         if (ksocknal_data.ksnd_nnets == 0)
2874                 ksocknal_base_shutdown();
2875
2876         return -ENETDOWN;
2877 }
2878
2879
2880 static void __exit ksocklnd_exit(void)
2881 {
2882         lnet_unregister_lnd(&the_ksocklnd);
2883 }
2884
2885 static int __init ksocklnd_init(void)
2886 {
2887         int rc;
2888
2889         /* check ksnr_connected/connecting field large enough */
2890         CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2891         CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2892
2893         /* initialize the_ksocklnd */
2894         the_ksocklnd.lnd_type     = SOCKLND;
2895         the_ksocklnd.lnd_startup  = ksocknal_startup;
2896         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2897         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2898         the_ksocklnd.lnd_send     = ksocknal_send;
2899         the_ksocklnd.lnd_recv     = ksocknal_recv;
2900         the_ksocklnd.lnd_notify   = ksocknal_notify;
2901         the_ksocklnd.lnd_query    = ksocknal_query;
2902         the_ksocklnd.lnd_accept   = ksocknal_accept;
2903
2904         rc = ksocknal_tunables_init();
2905         if (rc != 0)
2906                 return rc;
2907
2908         lnet_register_lnd(&the_ksocklnd);
2909
2910         return 0;
2911 }
2912
2913 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2914 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2915 MODULE_VERSION("2.8.0");
2916 MODULE_LICENSE("GPL");
2917
2918 module_init(ksocklnd_init);
2919 module_exit(ksocklnd_exit);