Whamcloud - gitweb
30cbf3f33251969d316eb5fd0b2fe6542e43e334
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/socklnd/socklnd.c
33  *
34  * Author: Zach Brown <zab@zabbo.net>
35  * Author: Peter J. Braam <braam@clusterfs.com>
36  * Author: Phil Schwan <phil@clusterfs.com>
37  * Author: Eric Barton <eric@bartonsoftware.com>
38  */
39
40 #include "socklnd.h"
41 #include <linux/inetdevice.h>
42
43 static struct lnet_lnd the_ksocklnd;
44 struct ksock_nal_data ksocknal_data;
45
46 static struct ksock_interface *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
48 {
49         struct ksock_net *net = ni->ni_data;
50         int i;
51         struct ksock_interface *iface;
52
53         for (i = 0; i < net->ksnn_ninterfaces; i++) {
54                 LASSERT(i < LNET_INTERFACES_NUM);
55                 iface = &net->ksnn_interfaces[i];
56
57                 if (iface->ksni_ipaddr == ip)
58                         return iface;
59         }
60
61         return NULL;
62 }
63
64 static struct ksock_route *
65 ksocknal_create_route(__u32 ipaddr, int port)
66 {
67         struct ksock_route *route;
68
69         LIBCFS_ALLOC (route, sizeof (*route));
70         if (route == NULL)
71                 return (NULL);
72
73         atomic_set (&route->ksnr_refcount, 1);
74         route->ksnr_peer = NULL;
75         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
76         route->ksnr_ipaddr = ipaddr;
77         route->ksnr_port = port;
78         route->ksnr_scheduled = 0;
79         route->ksnr_connecting = 0;
80         route->ksnr_connected = 0;
81         route->ksnr_deleted = 0;
82         route->ksnr_conn_count = 0;
83         route->ksnr_share_count = 0;
84
85         return (route);
86 }
87
88 void
89 ksocknal_destroy_route(struct ksock_route *route)
90 {
91         LASSERT (atomic_read(&route->ksnr_refcount) == 0);
92
93         if (route->ksnr_peer != NULL)
94                 ksocknal_peer_decref(route->ksnr_peer);
95
96         LIBCFS_FREE (route, sizeof (*route));
97 }
98
99 static struct ksock_peer_ni *
100 ksocknal_create_peer(struct lnet_ni *ni, struct lnet_process_id id)
101 {
102         int cpt = lnet_cpt_of_nid(id.nid, ni);
103         struct ksock_net *net = ni->ni_data;
104         struct ksock_peer_ni *peer_ni;
105
106         LASSERT(id.nid != LNET_NID_ANY);
107         LASSERT(id.pid != LNET_PID_ANY);
108         LASSERT(!in_interrupt());
109
110         LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
111         if (peer_ni == NULL)
112                 return ERR_PTR(-ENOMEM);
113
114         peer_ni->ksnp_ni = ni;
115         peer_ni->ksnp_id = id;
116         atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
117         peer_ni->ksnp_closing = 0;
118         peer_ni->ksnp_accepting = 0;
119         peer_ni->ksnp_proto = NULL;
120         peer_ni->ksnp_last_alive = 0;
121         peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
122
123         INIT_LIST_HEAD(&peer_ni->ksnp_conns);
124         INIT_LIST_HEAD(&peer_ni->ksnp_routes);
125         INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
126         INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
127         spin_lock_init(&peer_ni->ksnp_lock);
128
129         spin_lock_bh(&net->ksnn_lock);
130
131         if (net->ksnn_shutdown) {
132                 spin_unlock_bh(&net->ksnn_lock);
133
134                 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
135                 CERROR("Can't create peer_ni: network shutdown\n");
136                 return ERR_PTR(-ESHUTDOWN);
137         }
138
139         net->ksnn_npeers++;
140
141         spin_unlock_bh(&net->ksnn_lock);
142
143         return peer_ni;
144 }
145
146 void
147 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
148 {
149         struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
150
151         CDEBUG (D_NET, "peer_ni %s %p deleted\n",
152                 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
153
154         LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
155         LASSERT(peer_ni->ksnp_accepting == 0);
156         LASSERT(list_empty(&peer_ni->ksnp_conns));
157         LASSERT(list_empty(&peer_ni->ksnp_routes));
158         LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
159         LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
160
161         LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
162
163         /* NB a peer_ni's connections and routes keep a reference on their peer_ni
164          * until they are destroyed, so we can be assured that _all_ state to
165          * do with this peer_ni has been cleaned up when its refcount drops to
166          * zero. */
167         spin_lock_bh(&net->ksnn_lock);
168         net->ksnn_npeers--;
169         spin_unlock_bh(&net->ksnn_lock);
170 }
171
172 struct ksock_peer_ni *
173 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
174 {
175         struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
176         struct list_head *tmp;
177         struct ksock_peer_ni *peer_ni;
178
179         list_for_each(tmp, peer_list) {
180                 peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
181
182                 LASSERT(!peer_ni->ksnp_closing);
183
184                 if (peer_ni->ksnp_ni != ni)
185                         continue;
186
187                 if (peer_ni->ksnp_id.nid != id.nid ||
188                     peer_ni->ksnp_id.pid != id.pid)
189                         continue;
190
191                 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
192                        peer_ni, libcfs_id2str(id),
193                        atomic_read(&peer_ni->ksnp_refcount));
194                 return peer_ni;
195         }
196         return NULL;
197 }
198
199 struct ksock_peer_ni *
200 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
201 {
202         struct ksock_peer_ni *peer_ni;
203
204         read_lock(&ksocknal_data.ksnd_global_lock);
205         peer_ni = ksocknal_find_peer_locked(ni, id);
206         if (peer_ni != NULL)                    /* +1 ref for caller? */
207                 ksocknal_peer_addref(peer_ni);
208         read_unlock(&ksocknal_data.ksnd_global_lock);
209
210         return (peer_ni);
211 }
212
213 static void
214 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
215 {
216         int i;
217         __u32 ip;
218         struct ksock_interface *iface;
219
220         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
221                 LASSERT(i < LNET_INTERFACES_NUM);
222                 ip = peer_ni->ksnp_passive_ips[i];
223
224                 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
225                 /*
226                  * All IPs in peer_ni->ksnp_passive_ips[] come from the
227                  * interface list, therefore the call must succeed.
228                  */
229                 LASSERT(iface != NULL);
230
231                 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
232                        peer_ni, iface, iface->ksni_nroutes);
233                 iface->ksni_npeers--;
234         }
235
236         LASSERT(list_empty(&peer_ni->ksnp_conns));
237         LASSERT(list_empty(&peer_ni->ksnp_routes));
238         LASSERT(!peer_ni->ksnp_closing);
239         peer_ni->ksnp_closing = 1;
240         list_del(&peer_ni->ksnp_list);
241         /* lose peerlist's ref */
242         ksocknal_peer_decref(peer_ni);
243 }
244
245 static int
246 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
247                        struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
248                        int *port, int *conn_count, int *share_count)
249 {
250         struct ksock_peer_ni *peer_ni;
251         struct list_head *ptmp;
252         struct ksock_route *route;
253         struct list_head *rtmp;
254         int i;
255         int j;
256         int rc = -ENOENT;
257
258         read_lock(&ksocknal_data.ksnd_global_lock);
259
260         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
261                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
262                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
263
264                         if (peer_ni->ksnp_ni != ni)
265                                 continue;
266
267                         if (peer_ni->ksnp_n_passive_ips == 0 &&
268                             list_empty(&peer_ni->ksnp_routes)) {
269                                 if (index-- > 0)
270                                         continue;
271
272                                 *id = peer_ni->ksnp_id;
273                                 *myip = 0;
274                                 *peer_ip = 0;
275                                 *port = 0;
276                                 *conn_count = 0;
277                                 *share_count = 0;
278                                 rc = 0;
279                                 goto out;
280                         }
281
282                         for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
283                                 if (index-- > 0)
284                                         continue;
285
286                                 *id = peer_ni->ksnp_id;
287                                 *myip = peer_ni->ksnp_passive_ips[j];
288                                 *peer_ip = 0;
289                                 *port = 0;
290                                 *conn_count = 0;
291                                 *share_count = 0;
292                                 rc = 0;
293                                 goto out;
294                         }
295
296                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
297                                 if (index-- > 0)
298                                         continue;
299
300                                 route = list_entry(rtmp, struct ksock_route,
301                                                    ksnr_list);
302
303                                 *id = peer_ni->ksnp_id;
304                                 *myip = route->ksnr_myipaddr;
305                                 *peer_ip = route->ksnr_ipaddr;
306                                 *port = route->ksnr_port;
307                                 *conn_count = route->ksnr_conn_count;
308                                 *share_count = route->ksnr_share_count;
309                                 rc = 0;
310                                 goto out;
311                         }
312                 }
313         }
314 out:
315         read_unlock(&ksocknal_data.ksnd_global_lock);
316         return rc;
317 }
318
319 static void
320 ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
321 {
322         struct ksock_peer_ni *peer_ni = route->ksnr_peer;
323         int type = conn->ksnc_type;
324         struct ksock_interface *iface;
325
326         conn->ksnc_route = route;
327         ksocknal_route_addref(route);
328
329         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
330                 if (route->ksnr_myipaddr == 0) {
331                         /* route wasn't bound locally yet (the initial route) */
332                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
333                                libcfs_id2str(peer_ni->ksnp_id),
334                                &route->ksnr_ipaddr,
335                                &conn->ksnc_myipaddr);
336                 } else {
337                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
338                                "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
339                                &route->ksnr_ipaddr,
340                                &route->ksnr_myipaddr,
341                                &conn->ksnc_myipaddr);
342
343                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
344                                                   route->ksnr_myipaddr);
345                         if (iface != NULL)
346                                 iface->ksni_nroutes--;
347                 }
348                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
349                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
350                                           route->ksnr_myipaddr);
351                 if (iface != NULL)
352                         iface->ksni_nroutes++;
353         }
354
355         route->ksnr_connected |= (1<<type);
356         route->ksnr_conn_count++;
357
358         /* Successful connection => further attempts can
359          * proceed immediately */
360         route->ksnr_retry_interval = 0;
361 }
362
363 static void
364 ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
365 {
366         struct list_head *tmp;
367         struct ksock_conn *conn;
368         struct ksock_route *route2;
369
370         LASSERT(!peer_ni->ksnp_closing);
371         LASSERT(route->ksnr_peer == NULL);
372         LASSERT(!route->ksnr_scheduled);
373         LASSERT(!route->ksnr_connecting);
374         LASSERT(route->ksnr_connected == 0);
375
376         /* LASSERT(unique) */
377         list_for_each(tmp, &peer_ni->ksnp_routes) {
378                 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
379
380                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
381                         CERROR("Duplicate route %s %pI4h\n",
382                                libcfs_id2str(peer_ni->ksnp_id),
383                                &route->ksnr_ipaddr);
384                         LBUG();
385                 }
386         }
387
388         route->ksnr_peer = peer_ni;
389         ksocknal_peer_addref(peer_ni);
390         /* peer_ni's routelist takes over my ref on 'route' */
391         list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
392
393         list_for_each(tmp, &peer_ni->ksnp_conns) {
394                 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
395
396                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
397                         continue;
398
399                 ksocknal_associate_route_conn_locked(route, conn);
400                 /* keep going (typed routes) */
401         }
402 }
403
404 static void
405 ksocknal_del_route_locked(struct ksock_route *route)
406 {
407         struct ksock_peer_ni *peer_ni = route->ksnr_peer;
408         struct ksock_interface *iface;
409         struct ksock_conn *conn;
410         struct list_head *ctmp;
411         struct list_head *cnxt;
412
413         LASSERT(!route->ksnr_deleted);
414
415         /* Close associated conns */
416         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
417                 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
418
419                 if (conn->ksnc_route != route)
420                         continue;
421
422                 ksocknal_close_conn_locked(conn, 0);
423         }
424
425         if (route->ksnr_myipaddr != 0) {
426                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
427                                           route->ksnr_myipaddr);
428                 if (iface != NULL)
429                         iface->ksni_nroutes--;
430         }
431
432         route->ksnr_deleted = 1;
433         list_del(&route->ksnr_list);
434         ksocknal_route_decref(route);           /* drop peer_ni's ref */
435
436         if (list_empty(&peer_ni->ksnp_routes) &&
437             list_empty(&peer_ni->ksnp_conns)) {
438                 /* I've just removed the last route to a peer_ni with no active
439                  * connections */
440                 ksocknal_unlink_peer_locked(peer_ni);
441         }
442 }
443
444 int
445 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
446                   int port)
447 {
448         struct list_head *tmp;
449         struct ksock_peer_ni *peer_ni;
450         struct ksock_peer_ni *peer2;
451         struct ksock_route *route;
452         struct ksock_route *route2;
453
454         if (id.nid == LNET_NID_ANY ||
455             id.pid == LNET_PID_ANY)
456                 return (-EINVAL);
457
458         /* Have a brand new peer_ni ready... */
459         peer_ni = ksocknal_create_peer(ni, id);
460         if (IS_ERR(peer_ni))
461                 return PTR_ERR(peer_ni);
462
463         route = ksocknal_create_route (ipaddr, port);
464         if (route == NULL) {
465                 ksocknal_peer_decref(peer_ni);
466                 return (-ENOMEM);
467         }
468
469         write_lock_bh(&ksocknal_data.ksnd_global_lock);
470
471         /* always called with a ref on ni, so shutdown can't have started */
472         LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
473
474         peer2 = ksocknal_find_peer_locked(ni, id);
475         if (peer2 != NULL) {
476                 ksocknal_peer_decref(peer_ni);
477                 peer_ni = peer2;
478         } else {
479                 /* peer_ni table takes my ref on peer_ni */
480                 list_add_tail(&peer_ni->ksnp_list,
481                               ksocknal_nid2peerlist(id.nid));
482         }
483
484         route2 = NULL;
485         list_for_each(tmp, &peer_ni->ksnp_routes) {
486                 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
487
488                 if (route2->ksnr_ipaddr == ipaddr)
489                         break;
490
491                 route2 = NULL;
492         }
493         if (route2 == NULL) {
494                 ksocknal_add_route_locked(peer_ni, route);
495                 route->ksnr_share_count++;
496         } else {
497                 ksocknal_route_decref(route);
498                 route2->ksnr_share_count++;
499         }
500
501         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
502
503         return 0;
504 }
505
506 static void
507 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
508 {
509         struct ksock_conn *conn;
510         struct ksock_route *route;
511         struct list_head *tmp;
512         struct list_head *nxt;
513         int nshared;
514
515         LASSERT(!peer_ni->ksnp_closing);
516
517         /* Extra ref prevents peer_ni disappearing until I'm done with it */
518         ksocknal_peer_addref(peer_ni);
519
520         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
521                 route = list_entry(tmp, struct ksock_route, ksnr_list);
522
523                 /* no match */
524                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
525                         continue;
526
527                 route->ksnr_share_count = 0;
528                 /* This deletes associated conns too */
529                 ksocknal_del_route_locked(route);
530         }
531
532         nshared = 0;
533         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
534                 route = list_entry(tmp, struct ksock_route, ksnr_list);
535                 nshared += route->ksnr_share_count;
536         }
537
538         if (nshared == 0) {
539                 /* remove everything else if there are no explicit entries
540                  * left */
541
542                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
543                         route = list_entry(tmp, struct ksock_route, ksnr_list);
544
545                         /* we should only be removing auto-entries */
546                         LASSERT(route->ksnr_share_count == 0);
547                         ksocknal_del_route_locked(route);
548                 }
549
550                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
551                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
552
553                         ksocknal_close_conn_locked(conn, 0);
554                 }
555         }
556
557         ksocknal_peer_decref(peer_ni);
558         /* NB peer_ni unlinks itself when last conn/route is removed */
559 }
560
561 static int
562 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
563 {
564         LIST_HEAD(zombies);
565         struct list_head *ptmp;
566         struct list_head *pnxt;
567         struct ksock_peer_ni *peer_ni;
568         int lo;
569         int hi;
570         int i;
571         int rc = -ENOENT;
572
573         write_lock_bh(&ksocknal_data.ksnd_global_lock);
574
575         if (id.nid != LNET_NID_ANY) {
576                 hi = (int)(ksocknal_nid2peerlist(id.nid) -
577                            ksocknal_data.ksnd_peers);
578                 lo = hi;
579         } else {
580                 lo = 0;
581                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
582         }
583
584         for (i = lo; i <= hi; i++) {
585                 list_for_each_safe(ptmp, pnxt,
586                                    &ksocknal_data.ksnd_peers[i]) {
587                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
588
589                         if (peer_ni->ksnp_ni != ni)
590                                 continue;
591
592                         if (!((id.nid == LNET_NID_ANY ||
593                                peer_ni->ksnp_id.nid == id.nid) &&
594                               (id.pid == LNET_PID_ANY ||
595                                peer_ni->ksnp_id.pid == id.pid)))
596                                 continue;
597
598                         ksocknal_peer_addref(peer_ni);  /* a ref for me... */
599
600                         ksocknal_del_peer_locked(peer_ni, ip);
601
602                         if (peer_ni->ksnp_closing &&
603                             !list_empty(&peer_ni->ksnp_tx_queue)) {
604                                 LASSERT(list_empty(&peer_ni->ksnp_conns));
605                                 LASSERT(list_empty(&peer_ni->ksnp_routes));
606
607                                 list_splice_init(&peer_ni->ksnp_tx_queue,
608                                                  &zombies);
609                         }
610
611                         ksocknal_peer_decref(peer_ni);  /* ...till here */
612
613                         rc = 0;                         /* matched! */
614                 }
615         }
616
617         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
618
619         ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
620
621         return rc;
622 }
623
624 static struct ksock_conn *
625 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
626 {
627         struct ksock_peer_ni *peer_ni;
628         struct list_head *ptmp;
629         struct ksock_conn *conn;
630         struct list_head *ctmp;
631         int i;
632
633         read_lock(&ksocknal_data.ksnd_global_lock);
634
635         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
636                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
637                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
638
639                         LASSERT(!peer_ni->ksnp_closing);
640
641                         if (peer_ni->ksnp_ni != ni)
642                                 continue;
643
644                         list_for_each(ctmp, &peer_ni->ksnp_conns) {
645                                 if (index-- > 0)
646                                         continue;
647
648                                 conn = list_entry(ctmp, struct ksock_conn,
649                                                   ksnc_list);
650                                 ksocknal_conn_addref(conn);
651                                 read_unlock(&ksocknal_data. \
652                                             ksnd_global_lock);
653                                 return conn;
654                         }
655                 }
656         }
657
658         read_unlock(&ksocknal_data.ksnd_global_lock);
659         return NULL;
660 }
661
662 static struct ksock_sched *
663 ksocknal_choose_scheduler_locked(unsigned int cpt)
664 {
665         struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
666         int i;
667
668         if (sched->kss_nthreads == 0) {
669                 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
670                         if (sched->kss_nthreads > 0) {
671                                 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
672                                        cpt, sched->kss_cpt);
673                                 return sched;
674                         }
675                 }
676                 return NULL;
677         }
678
679         return sched;
680 }
681
682 static int
683 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
684 {
685         struct ksock_net *net = ni->ni_data;
686         int i;
687         int nip;
688
689         read_lock(&ksocknal_data.ksnd_global_lock);
690
691         nip = net->ksnn_ninterfaces;
692         LASSERT(nip <= LNET_INTERFACES_NUM);
693
694         /*
695          * Only offer interfaces for additional connections if I have
696          * more than one.
697          */
698         if (nip < 2) {
699                 read_unlock(&ksocknal_data.ksnd_global_lock);
700                 return 0;
701         }
702
703         for (i = 0; i < nip; i++) {
704                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
705                 LASSERT(ipaddrs[i] != 0);
706         }
707
708         read_unlock(&ksocknal_data.ksnd_global_lock);
709         return nip;
710 }
711
712 static int
713 ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
714 {
715         int best_netmatch = 0;
716         int best_xor = 0;
717         int best = -1;
718         int this_xor;
719         int this_netmatch;
720         int i;
721
722         for (i = 0; i < nips; i++) {
723                 if (ips[i] == 0)
724                         continue;
725
726                 this_xor = (ips[i] ^ iface->ksni_ipaddr);
727                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
728
729                 if (!(best < 0 ||
730                       best_netmatch < this_netmatch ||
731                       (best_netmatch == this_netmatch &&
732                        best_xor > this_xor)))
733                         continue;
734
735                 best = i;
736                 best_netmatch = this_netmatch;
737                 best_xor = this_xor;
738         }
739
740         LASSERT (best >= 0);
741         return (best);
742 }
743
744 static int
745 ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
746 {
747         rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
748         struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
749         struct ksock_interface *iface;
750         struct ksock_interface *best_iface;
751         int n_ips;
752         int i;
753         int j;
754         int k;
755         u32 ip;
756         u32 xor;
757         int this_netmatch;
758         int best_netmatch;
759         int best_npeers;
760
761         /* CAVEAT EMPTOR: We do all our interface matching with an
762          * exclusive hold of global lock at IRQ priority.  We're only
763          * expecting to be dealing with small numbers of interfaces, so the
764          * O(n**3)-ness shouldn't matter */
765
766         /* Also note that I'm not going to return more than n_peerips
767          * interfaces, even if I have more myself */
768
769         write_lock_bh(global_lock);
770
771         LASSERT(n_peerips <= LNET_INTERFACES_NUM);
772         LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
773
774         /* Only match interfaces for additional connections
775          * if I have > 1 interface */
776         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
777                 MIN(n_peerips, net->ksnn_ninterfaces);
778
779         for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
780                 /*              ^ yes really... */
781
782                 /* If we have any new interfaces, first tick off all the
783                  * peer_ni IPs that match old interfaces, then choose new
784                  * interfaces to match the remaining peer_ni IPS.
785                  * We don't forget interfaces we've stopped using; we might
786                  * start using them again... */
787
788                 if (i < peer_ni->ksnp_n_passive_ips) {
789                         /* Old interface. */
790                         ip = peer_ni->ksnp_passive_ips[i];
791                         best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
792
793                         /* peer_ni passive ips are kept up to date */
794                         LASSERT(best_iface != NULL);
795                 } else {
796                         /* choose a new interface */
797                         LASSERT (i == peer_ni->ksnp_n_passive_ips);
798
799                         best_iface = NULL;
800                         best_netmatch = 0;
801                         best_npeers = 0;
802
803                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
804                                 iface = &net->ksnn_interfaces[j];
805                                 ip = iface->ksni_ipaddr;
806
807                                 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
808                                         if (peer_ni->ksnp_passive_ips[k] == ip)
809                                                 break;
810
811                                 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
812                                         continue;
813
814                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
815                                 xor = (ip ^ peerips[k]);
816                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
817
818                                 if (!(best_iface == NULL ||
819                                       best_netmatch < this_netmatch ||
820                                       (best_netmatch == this_netmatch &&
821                                        best_npeers > iface->ksni_npeers)))
822                                         continue;
823
824                                 best_iface = iface;
825                                 best_netmatch = this_netmatch;
826                                 best_npeers = iface->ksni_npeers;
827                         }
828
829                         LASSERT(best_iface != NULL);
830
831                         best_iface->ksni_npeers++;
832                         ip = best_iface->ksni_ipaddr;
833                         peer_ni->ksnp_passive_ips[i] = ip;
834                         peer_ni->ksnp_n_passive_ips = i+1;
835                 }
836
837                 /* mark the best matching peer_ni IP used */
838                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
839                 peerips[j] = 0;
840         }
841
842         /* Overwrite input peer_ni IP addresses */
843         memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
844
845         write_unlock_bh(global_lock);
846
847         return (n_ips);
848 }
849
850 static void
851 ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
852                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
853 {
854         struct ksock_route              *newroute = NULL;
855         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
856         struct lnet_ni *ni = peer_ni->ksnp_ni;
857         struct ksock_net                *net = ni->ni_data;
858         struct list_head        *rtmp;
859         struct ksock_route              *route;
860         struct ksock_interface  *iface;
861         struct ksock_interface  *best_iface;
862         int                     best_netmatch;
863         int                     this_netmatch;
864         int                     best_nroutes;
865         int                     i;
866         int                     j;
867
868         /* CAVEAT EMPTOR: We do all our interface matching with an
869          * exclusive hold of global lock at IRQ priority.  We're only
870          * expecting to be dealing with small numbers of interfaces, so the
871          * O(n**3)-ness here shouldn't matter */
872
873         write_lock_bh(global_lock);
874
875         if (net->ksnn_ninterfaces < 2) {
876                 /* Only create additional connections
877                  * if I have > 1 interface */
878                 write_unlock_bh(global_lock);
879                 return;
880         }
881
882         LASSERT(npeer_ipaddrs <= LNET_INTERFACES_NUM);
883
884         for (i = 0; i < npeer_ipaddrs; i++) {
885                 if (newroute != NULL) {
886                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
887                 } else {
888                         write_unlock_bh(global_lock);
889
890                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
891                         if (newroute == NULL)
892                                 return;
893
894                         write_lock_bh(global_lock);
895                 }
896
897                 if (peer_ni->ksnp_closing) {
898                         /* peer_ni got closed under me */
899                         break;
900                 }
901
902                 /* Already got a route? */
903                 route = NULL;
904                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
905                         route = list_entry(rtmp, struct ksock_route, ksnr_list);
906
907                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
908                                 break;
909
910                         route = NULL;
911                 }
912                 if (route != NULL)
913                         continue;
914
915                 best_iface = NULL;
916                 best_nroutes = 0;
917                 best_netmatch = 0;
918
919                 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
920
921                 /* Select interface to connect from */
922                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
923                         iface = &net->ksnn_interfaces[j];
924
925                         /* Using this interface already? */
926                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
927                                 route = list_entry(rtmp, struct ksock_route,
928                                                    ksnr_list);
929
930                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
931                                         break;
932
933                                 route = NULL;
934                         }
935                         if (route != NULL)
936                                 continue;
937
938                         this_netmatch = (((iface->ksni_ipaddr ^
939                                            newroute->ksnr_ipaddr) &
940                                            iface->ksni_netmask) == 0) ? 1 : 0;
941
942                         if (!(best_iface == NULL ||
943                               best_netmatch < this_netmatch ||
944                               (best_netmatch == this_netmatch &&
945                                best_nroutes > iface->ksni_nroutes)))
946                                 continue;
947
948                         best_iface = iface;
949                         best_netmatch = this_netmatch;
950                         best_nroutes = iface->ksni_nroutes;
951                 }
952
953                 if (best_iface == NULL)
954                         continue;
955
956                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
957                 best_iface->ksni_nroutes++;
958
959                 ksocknal_add_route_locked(peer_ni, newroute);
960                 newroute = NULL;
961         }
962
963         write_unlock_bh(global_lock);
964         if (newroute != NULL)
965                 ksocknal_route_decref(newroute);
966 }
967
968 int
969 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
970 {
971         struct ksock_connreq *cr;
972         int rc;
973         u32 peer_ip;
974         int peer_port;
975
976         rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
977         LASSERT(rc == 0);               /* we succeeded before */
978
979         LIBCFS_ALLOC(cr, sizeof(*cr));
980         if (cr == NULL) {
981                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
982                                    "%pI4h: memory exhausted\n", &peer_ip);
983                 return -ENOMEM;
984         }
985
986         lnet_ni_addref(ni);
987         cr->ksncr_ni   = ni;
988         cr->ksncr_sock = sock;
989
990         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
991
992         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
993         wake_up(&ksocknal_data.ksnd_connd_waitq);
994
995         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
996         return 0;
997 }
998
999 static int
1000 ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
1001 {
1002         struct ksock_route *route;
1003
1004         list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1005                 if (route->ksnr_ipaddr == ipaddr)
1006                         return route->ksnr_connecting;
1007         }
1008         return 0;
1009 }
1010
1011 int
1012 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
1013                      struct socket *sock, int type)
1014 {
1015         rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1016         LIST_HEAD(zombies);
1017         struct lnet_process_id peerid;
1018         struct list_head *tmp;
1019         u64 incarnation;
1020         struct ksock_conn *conn;
1021         struct ksock_conn *conn2;
1022         struct ksock_peer_ni *peer_ni = NULL;
1023         struct ksock_peer_ni *peer2;
1024         struct ksock_sched *sched;
1025         struct ksock_hello_msg *hello;
1026         int cpt;
1027         struct ksock_tx *tx;
1028         struct ksock_tx *txtmp;
1029         int rc;
1030         int rc2;
1031         int active;
1032         char *warn = NULL;
1033
1034         active = (route != NULL);
1035
1036         LASSERT (active == (type != SOCKLND_CONN_NONE));
1037
1038         LIBCFS_ALLOC(conn, sizeof(*conn));
1039         if (conn == NULL) {
1040                 rc = -ENOMEM;
1041                 goto failed_0;
1042         }
1043
1044         conn->ksnc_peer = NULL;
1045         conn->ksnc_route = NULL;
1046         conn->ksnc_sock = sock;
1047         /* 2 ref, 1 for conn, another extra ref prevents socket
1048          * being closed before establishment of connection */
1049         atomic_set (&conn->ksnc_sock_refcount, 2);
1050         conn->ksnc_type = type;
1051         ksocknal_lib_save_callback(sock, conn);
1052         atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1053
1054         conn->ksnc_rx_ready = 0;
1055         conn->ksnc_rx_scheduled = 0;
1056
1057         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1058         conn->ksnc_tx_ready = 0;
1059         conn->ksnc_tx_scheduled = 0;
1060         conn->ksnc_tx_carrier = NULL;
1061         atomic_set (&conn->ksnc_tx_nob, 0);
1062
1063         LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1064                                      kshm_ips[LNET_INTERFACES_NUM]));
1065         if (hello == NULL) {
1066                 rc = -ENOMEM;
1067                 goto failed_1;
1068         }
1069
1070         /* stash conn's local and remote addrs */
1071         rc = ksocknal_lib_get_conn_addrs (conn);
1072         if (rc != 0)
1073                 goto failed_1;
1074
1075         /* Find out/confirm peer_ni's NID and connection type and get the
1076          * vector of interfaces she's willing to let me connect to.
1077          * Passive connections use the listener timeout since the peer_ni sends
1078          * eagerly */
1079
1080         if (active) {
1081                 peer_ni = route->ksnr_peer;
1082                 LASSERT(ni == peer_ni->ksnp_ni);
1083
1084                 /* Active connection sends HELLO eagerly */
1085                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1086                 peerid = peer_ni->ksnp_id;
1087
1088                 write_lock_bh(global_lock);
1089                 conn->ksnc_proto = peer_ni->ksnp_proto;
1090                 write_unlock_bh(global_lock);
1091
1092                 if (conn->ksnc_proto == NULL) {
1093                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1094 #if SOCKNAL_VERSION_DEBUG
1095                          if (*ksocknal_tunables.ksnd_protocol == 2)
1096                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1097                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1098                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1099 #endif
1100                 }
1101
1102                 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1103                 if (rc != 0)
1104                         goto failed_1;
1105         } else {
1106                 peerid.nid = LNET_NID_ANY;
1107                 peerid.pid = LNET_PID_ANY;
1108
1109                 /* Passive, get protocol from peer_ni */
1110                 conn->ksnc_proto = NULL;
1111         }
1112
1113         rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1114         if (rc < 0)
1115                 goto failed_1;
1116
1117         LASSERT (rc == 0 || active);
1118         LASSERT (conn->ksnc_proto != NULL);
1119         LASSERT (peerid.nid != LNET_NID_ANY);
1120
1121         cpt = lnet_cpt_of_nid(peerid.nid, ni);
1122
1123         if (active) {
1124                 ksocknal_peer_addref(peer_ni);
1125                 write_lock_bh(global_lock);
1126         } else {
1127                 peer_ni = ksocknal_create_peer(ni, peerid);
1128                 if (IS_ERR(peer_ni)) {
1129                         rc = PTR_ERR(peer_ni);
1130                         goto failed_1;
1131                 }
1132
1133                 write_lock_bh(global_lock);
1134
1135                 /* called with a ref on ni, so shutdown can't have started */
1136                 LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
1137
1138                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1139                 if (peer2 == NULL) {
1140                         /* NB this puts an "empty" peer_ni in the peer_ni
1141                          * table (which takes my ref) */
1142                         list_add_tail(&peer_ni->ksnp_list,
1143                                       ksocknal_nid2peerlist(peerid.nid));
1144                 } else {
1145                         ksocknal_peer_decref(peer_ni);
1146                         peer_ni = peer2;
1147                 }
1148
1149                 /* +1 ref for me */
1150                 ksocknal_peer_addref(peer_ni);
1151                 peer_ni->ksnp_accepting++;
1152
1153                 /* Am I already connecting to this guy?  Resolve in
1154                  * favour of higher NID... */
1155                 if (peerid.nid < ni->ni_nid &&
1156                     ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1157                         rc = EALREADY;
1158                         warn = "connection race resolution";
1159                         goto failed_2;
1160                 }
1161         }
1162
1163         if (peer_ni->ksnp_closing ||
1164             (active && route->ksnr_deleted)) {
1165                 /* peer_ni/route got closed under me */
1166                 rc = -ESTALE;
1167                 warn = "peer_ni/route removed";
1168                 goto failed_2;
1169         }
1170
1171         if (peer_ni->ksnp_proto == NULL) {
1172                 /* Never connected before.
1173                  * NB recv_hello may have returned EPROTO to signal my peer_ni
1174                  * wants a different protocol than the one I asked for.
1175                  */
1176                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1177
1178                 peer_ni->ksnp_proto = conn->ksnc_proto;
1179                 peer_ni->ksnp_incarnation = incarnation;
1180         }
1181
1182         if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1183             peer_ni->ksnp_incarnation != incarnation) {
1184                 /* peer_ni rebooted or I've got the wrong protocol version */
1185                 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1186
1187                 peer_ni->ksnp_proto = NULL;
1188                 rc = ESTALE;
1189                 warn = peer_ni->ksnp_incarnation != incarnation ?
1190                        "peer_ni rebooted" :
1191                        "wrong proto version";
1192                 goto failed_2;
1193         }
1194
1195         switch (rc) {
1196         default:
1197                 LBUG();
1198         case 0:
1199                 break;
1200         case EALREADY:
1201                 warn = "lost conn race";
1202                 goto failed_2;
1203         case EPROTO:
1204                 warn = "retry with different protocol version";
1205                 goto failed_2;
1206         }
1207
1208         /* Refuse to duplicate an existing connection, unless this is a
1209          * loopback connection */
1210         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1211                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1212                         conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1213
1214                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1215                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1216                             conn2->ksnc_type != conn->ksnc_type)
1217                                 continue;
1218
1219                         /* Reply on a passive connection attempt so the peer_ni
1220                          * realises we're connected. */
1221                         LASSERT (rc == 0);
1222                         if (!active)
1223                                 rc = EALREADY;
1224
1225                         warn = "duplicate";
1226                         goto failed_2;
1227                 }
1228         }
1229
1230         /* If the connection created by this route didn't bind to the IP
1231          * address the route connected to, the connection/route matching
1232          * code below probably isn't going to work. */
1233         if (active &&
1234             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1235                 CERROR("Route %s %pI4h connected to %pI4h\n",
1236                        libcfs_id2str(peer_ni->ksnp_id),
1237                        &route->ksnr_ipaddr,
1238                        &conn->ksnc_ipaddr);
1239         }
1240
1241         /* Search for a route corresponding to the new connection and
1242          * create an association.  This allows incoming connections created
1243          * by routes in my peer_ni to match my own route entries so I don't
1244          * continually create duplicate routes. */
1245         list_for_each(tmp, &peer_ni->ksnp_routes) {
1246                 route = list_entry(tmp, struct ksock_route, ksnr_list);
1247
1248                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1249                         continue;
1250
1251                 ksocknal_associate_route_conn_locked(route, conn);
1252                 break;
1253         }
1254
1255         conn->ksnc_peer = peer_ni;                 /* conn takes my ref on peer_ni */
1256         peer_ni->ksnp_last_alive = ktime_get_seconds();
1257         peer_ni->ksnp_send_keepalive = 0;
1258         peer_ni->ksnp_error = 0;
1259
1260         sched = ksocknal_choose_scheduler_locked(cpt);
1261         if (!sched) {
1262                 CERROR("no schedulers available. node is unhealthy\n");
1263                 goto failed_2;
1264         }
1265         /*
1266          * The cpt might have changed if we ended up selecting a non cpt
1267          * native scheduler. So use the scheduler's cpt instead.
1268          */
1269         cpt = sched->kss_cpt;
1270         sched->kss_nconns++;
1271         conn->ksnc_scheduler = sched;
1272
1273         conn->ksnc_tx_last_post = ktime_get_seconds();
1274         /* Set the deadline for the outgoing HELLO to drain */
1275         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1276         conn->ksnc_tx_deadline = ktime_get_seconds() +
1277                                  lnet_get_lnd_timeout();
1278         smp_mb();   /* order with adding to peer_ni's conn list */
1279
1280         list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1281         ksocknal_conn_addref(conn);
1282
1283         ksocknal_new_packet(conn, 0);
1284
1285         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1286
1287         /* Take packets blocking for this connection. */
1288         list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1289                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1290                     SOCKNAL_MATCH_NO)
1291                         continue;
1292
1293                 list_del(&tx->tx_list);
1294                 ksocknal_queue_tx_locked(tx, conn);
1295         }
1296
1297         write_unlock_bh(global_lock);
1298
1299         /* We've now got a new connection.  Any errors from here on are just
1300          * like "normal" comms errors and we close the connection normally.
1301          * NB (a) we still have to send the reply HELLO for passive
1302          *        connections,
1303          *    (b) normal I/O on the conn is blocked until I setup and call the
1304          *        socket callbacks.
1305          */
1306
1307         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1308                " incarnation:%lld sched[%d]\n",
1309                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1310                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1311                conn->ksnc_port, incarnation, cpt);
1312
1313         if (active) {
1314                 /* additional routes after interface exchange? */
1315                 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1316                                        hello->kshm_ips, hello->kshm_nips);
1317         } else {
1318                 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1319                                                        hello->kshm_nips);
1320                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1321         }
1322
1323         LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1324                                     kshm_ips[LNET_INTERFACES_NUM]));
1325
1326         /* setup the socket AFTER I've received hello (it disables
1327          * SO_LINGER).  I might call back to the acceptor who may want
1328          * to send a protocol version response and then close the
1329          * socket; this ensures the socket only tears down after the
1330          * response has been sent. */
1331         if (rc == 0)
1332                 rc = ksocknal_lib_setup_sock(sock);
1333
1334         write_lock_bh(global_lock);
1335
1336         /* NB my callbacks block while I hold ksnd_global_lock */
1337         ksocknal_lib_set_callback(sock, conn);
1338
1339         if (!active)
1340                 peer_ni->ksnp_accepting--;
1341
1342         write_unlock_bh(global_lock);
1343
1344         if (rc != 0) {
1345                 write_lock_bh(global_lock);
1346                 if (!conn->ksnc_closing) {
1347                         /* could be closed by another thread */
1348                         ksocknal_close_conn_locked(conn, rc);
1349                 }
1350                 write_unlock_bh(global_lock);
1351         } else if (ksocknal_connsock_addref(conn) == 0) {
1352                 /* Allow I/O to proceed. */
1353                 ksocknal_read_callback(conn);
1354                 ksocknal_write_callback(conn);
1355                 ksocknal_connsock_decref(conn);
1356         }
1357
1358         ksocknal_connsock_decref(conn);
1359         ksocknal_conn_decref(conn);
1360         return rc;
1361
1362 failed_2:
1363         if (!peer_ni->ksnp_closing &&
1364             list_empty(&peer_ni->ksnp_conns) &&
1365             list_empty(&peer_ni->ksnp_routes)) {
1366                 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1367                 list_del_init(&peer_ni->ksnp_tx_queue);
1368                 ksocknal_unlink_peer_locked(peer_ni);
1369         }
1370
1371         write_unlock_bh(global_lock);
1372
1373         if (warn != NULL) {
1374                 if (rc < 0)
1375                         CERROR("Not creating conn %s type %d: %s\n",
1376                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1377                 else
1378                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1379                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1380         }
1381
1382         if (!active) {
1383                 if (rc > 0) {
1384                         /* Request retry by replying with CONN_NONE
1385                          * ksnc_proto has been set already */
1386                         conn->ksnc_type = SOCKLND_CONN_NONE;
1387                         hello->kshm_nips = 0;
1388                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1389                 }
1390
1391                 write_lock_bh(global_lock);
1392                 peer_ni->ksnp_accepting--;
1393                 write_unlock_bh(global_lock);
1394         }
1395
1396         /*
1397          * If we get here without an error code, just use -EALREADY.
1398          * Depending on how we got here, the error may be positive
1399          * or negative. Normalize the value for ksocknal_txlist_done().
1400          */
1401         rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1402         ksocknal_txlist_done(ni, &zombies, rc2);
1403         ksocknal_peer_decref(peer_ni);
1404
1405 failed_1:
1406         if (hello != NULL)
1407                 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1408                                             kshm_ips[LNET_INTERFACES_NUM]));
1409
1410         LIBCFS_FREE(conn, sizeof(*conn));
1411
1412 failed_0:
1413         sock_release(sock);
1414         return rc;
1415 }
1416
1417 void
1418 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1419 {
1420         /* This just does the immmediate housekeeping, and queues the
1421          * connection for the reaper to terminate.
1422          * Caller holds ksnd_global_lock exclusively in irq context */
1423         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1424         struct ksock_route *route;
1425         struct ksock_conn *conn2;
1426         struct list_head *tmp;
1427
1428         LASSERT(peer_ni->ksnp_error == 0);
1429         LASSERT(!conn->ksnc_closing);
1430         conn->ksnc_closing = 1;
1431
1432         /* ksnd_deathrow_conns takes over peer_ni's ref */
1433         list_del(&conn->ksnc_list);
1434
1435         route = conn->ksnc_route;
1436         if (route != NULL) {
1437                 /* dissociate conn from route... */
1438                 LASSERT(!route->ksnr_deleted);
1439                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1440
1441                 conn2 = NULL;
1442                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1443                         conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1444
1445                         if (conn2->ksnc_route == route &&
1446                             conn2->ksnc_type == conn->ksnc_type)
1447                                 break;
1448
1449                         conn2 = NULL;
1450                 }
1451                 if (conn2 == NULL)
1452                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1453
1454                 conn->ksnc_route = NULL;
1455
1456                 ksocknal_route_decref(route);   /* drop conn's ref on route */
1457         }
1458
1459         if (list_empty(&peer_ni->ksnp_conns)) {
1460                 /* No more connections to this peer_ni */
1461
1462                 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1463                         struct ksock_tx *tx;
1464
1465                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1466
1467                         /* throw them to the last connection...,
1468                          * these TXs will be send to /dev/null by scheduler */
1469                         list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1470                                             tx_list)
1471                                 ksocknal_tx_prep(conn, tx);
1472
1473                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1474                         list_splice_init(&peer_ni->ksnp_tx_queue,
1475                                          &conn->ksnc_tx_queue);
1476                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1477                 }
1478
1479                 /* renegotiate protocol version */
1480                 peer_ni->ksnp_proto = NULL;
1481                 /* stash last conn close reason */
1482                 peer_ni->ksnp_error = error;
1483
1484                 if (list_empty(&peer_ni->ksnp_routes)) {
1485                         /* I've just closed last conn belonging to a
1486                          * peer_ni with no routes to it */
1487                         ksocknal_unlink_peer_locked(peer_ni);
1488                 }
1489         }
1490
1491         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1492
1493         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
1494         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1495
1496         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1497 }
1498
1499 void
1500 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1501 {
1502         int notify = 0;
1503         time64_t last_alive = 0;
1504
1505         /* There has been a connection failure or comms error; but I'll only
1506          * tell LNET I think the peer_ni is dead if it's to another kernel and
1507          * there are no connections or connection attempts in existence. */
1508
1509         read_lock(&ksocknal_data.ksnd_global_lock);
1510
1511         if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1512              list_empty(&peer_ni->ksnp_conns) &&
1513              peer_ni->ksnp_accepting == 0 &&
1514              ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1515                 notify = 1;
1516                 last_alive = peer_ni->ksnp_last_alive;
1517         }
1518
1519         read_unlock(&ksocknal_data.ksnd_global_lock);
1520
1521         if (notify)
1522                 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid,
1523                             false, false, last_alive);
1524 }
1525
1526 void
1527 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1528 {
1529         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1530         struct ksock_tx *tx;
1531         struct ksock_tx *tmp;
1532         LIST_HEAD(zlist);
1533
1534         /* NB safe to finalize TXs because closing of socket will
1535          * abort all buffered data */
1536         LASSERT(conn->ksnc_sock == NULL);
1537
1538         spin_lock(&peer_ni->ksnp_lock);
1539
1540         list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1541                 if (tx->tx_conn != conn)
1542                         continue;
1543
1544                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1545
1546                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1547                 tx->tx_zc_aborted = 1;  /* mark it as not-acked */
1548                 list_del(&tx->tx_zc_list);
1549                 list_add(&tx->tx_zc_list, &zlist);
1550         }
1551
1552         spin_unlock(&peer_ni->ksnp_lock);
1553
1554         while (!list_empty(&zlist)) {
1555                 tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
1556
1557                 list_del(&tx->tx_zc_list);
1558                 ksocknal_tx_decref(tx);
1559         }
1560 }
1561
1562 void
1563 ksocknal_terminate_conn(struct ksock_conn *conn)
1564 {
1565         /* This gets called by the reaper (guaranteed thread context) to
1566          * disengage the socket from its callbacks and close it.
1567          * ksnc_refcount will eventually hit zero, and then the reaper will
1568          * destroy it. */
1569         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1570         struct ksock_sched *sched = conn->ksnc_scheduler;
1571         int failed = 0;
1572
1573         LASSERT(conn->ksnc_closing);
1574
1575         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1576         spin_lock_bh(&sched->kss_lock);
1577
1578         /* a closing conn is always ready to tx */
1579         conn->ksnc_tx_ready = 1;
1580
1581         if (!conn->ksnc_tx_scheduled &&
1582             !list_empty(&conn->ksnc_tx_queue)) {
1583                 list_add_tail(&conn->ksnc_tx_list,
1584                                &sched->kss_tx_conns);
1585                 conn->ksnc_tx_scheduled = 1;
1586                 /* extra ref for scheduler */
1587                 ksocknal_conn_addref(conn);
1588
1589                 wake_up (&sched->kss_waitq);
1590         }
1591
1592         spin_unlock_bh(&sched->kss_lock);
1593
1594         /* serialise with callbacks */
1595         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1596
1597         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1598
1599         /* OK, so this conn may not be completely disengaged from its
1600          * scheduler yet, but it _has_ committed to terminate... */
1601         conn->ksnc_scheduler->kss_nconns--;
1602
1603         if (peer_ni->ksnp_error != 0) {
1604                 /* peer_ni's last conn closed in error */
1605                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1606                 failed = 1;
1607                 peer_ni->ksnp_error = 0;     /* avoid multiple notifications */
1608         }
1609
1610         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1611
1612         if (failed)
1613                 ksocknal_peer_failed(peer_ni);
1614
1615         /* The socket is closed on the final put; either here, or in
1616          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1617          * when the connection was established, this will close the socket
1618          * immediately, aborting anything buffered in it. Any hung
1619          * zero-copy transmits will therefore complete in finite time. */
1620         ksocknal_connsock_decref(conn);
1621 }
1622
1623 void
1624 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1625 {
1626         /* Queue the conn for the reaper to destroy */
1627         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1628         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1629
1630         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1631         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1632
1633         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1634 }
1635
1636 void
1637 ksocknal_destroy_conn(struct ksock_conn *conn)
1638 {
1639         time64_t last_rcv;
1640
1641         /* Final coup-de-grace of the reaper */
1642         CDEBUG (D_NET, "connection %p\n", conn);
1643
1644         LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1645         LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1646         LASSERT (conn->ksnc_sock == NULL);
1647         LASSERT (conn->ksnc_route == NULL);
1648         LASSERT (!conn->ksnc_tx_scheduled);
1649         LASSERT (!conn->ksnc_rx_scheduled);
1650         LASSERT(list_empty(&conn->ksnc_tx_queue));
1651
1652         /* complete current receive if any */
1653         switch (conn->ksnc_rx_state) {
1654         case SOCKNAL_RX_LNET_PAYLOAD:
1655                 last_rcv = conn->ksnc_rx_deadline -
1656                            lnet_get_lnd_timeout();
1657                 CERROR("Completing partial receive from %s[%d], "
1658                        "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1659                        "last alive is %lld secs ago\n",
1660                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1661                        &conn->ksnc_ipaddr, conn->ksnc_port,
1662                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1663                        ktime_get_seconds() - last_rcv);
1664                 if (conn->ksnc_lnet_msg)
1665                         conn->ksnc_lnet_msg->msg_health_status =
1666                                 LNET_MSG_STATUS_REMOTE_ERROR;
1667                 lnet_finalize(conn->ksnc_lnet_msg, -EIO);
1668                 break;
1669         case SOCKNAL_RX_LNET_HEADER:
1670                 if (conn->ksnc_rx_started)
1671                         CERROR("Incomplete receive of lnet header from %s, "
1672                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1673                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1674                                &conn->ksnc_ipaddr, conn->ksnc_port,
1675                                conn->ksnc_proto->pro_version);
1676                 break;
1677         case SOCKNAL_RX_KSM_HEADER:
1678                 if (conn->ksnc_rx_started)
1679                         CERROR("Incomplete receive of ksock message from %s, "
1680                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1681                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1682                                &conn->ksnc_ipaddr, conn->ksnc_port,
1683                                conn->ksnc_proto->pro_version);
1684                 break;
1685         case SOCKNAL_RX_SLOP:
1686                 if (conn->ksnc_rx_started)
1687                         CERROR("Incomplete receive of slops from %s, "
1688                                "ip %pI4h:%d, with error\n",
1689                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1690                                &conn->ksnc_ipaddr, conn->ksnc_port);
1691                break;
1692         default:
1693                 LBUG ();
1694                 break;
1695         }
1696
1697         ksocknal_peer_decref(conn->ksnc_peer);
1698
1699         LIBCFS_FREE (conn, sizeof (*conn));
1700 }
1701
1702 int
1703 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
1704 {
1705         struct ksock_conn *conn;
1706         struct list_head *ctmp;
1707         struct list_head *cnxt;
1708         int count = 0;
1709
1710         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1711                 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
1712
1713                 if (ipaddr == 0 ||
1714                     conn->ksnc_ipaddr == ipaddr) {
1715                         count++;
1716                         ksocknal_close_conn_locked (conn, why);
1717                 }
1718         }
1719
1720         return (count);
1721 }
1722
1723 int
1724 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1725 {
1726         struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1727         u32 ipaddr = conn->ksnc_ipaddr;
1728         int count;
1729
1730         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1731
1732         count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1733
1734         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1735
1736         return (count);
1737 }
1738
1739 int
1740 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1741 {
1742         struct ksock_peer_ni *peer_ni;
1743         struct list_head *ptmp;
1744         struct list_head *pnxt;
1745         int lo;
1746         int hi;
1747         int i;
1748         int count = 0;
1749
1750         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1751
1752         if (id.nid != LNET_NID_ANY)
1753                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1754         else {
1755                 lo = 0;
1756                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1757         }
1758
1759         for (i = lo; i <= hi; i++) {
1760                 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1761
1762                         peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
1763
1764                         if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1765                               (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1766                                 continue;
1767
1768                         count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1769                 }
1770         }
1771
1772         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1773
1774         /* wildcards always succeed */
1775         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1776                 return (0);
1777
1778         return (count == 0 ? -ENOENT : 0);
1779 }
1780
1781 void
1782 ksocknal_notify_gw_down(lnet_nid_t gw_nid)
1783 {
1784         /* The router is telling me she's been notified of a change in
1785          * gateway state....
1786          */
1787         struct lnet_process_id id = {
1788                 .nid    = gw_nid,
1789                 .pid    = LNET_PID_ANY,
1790         };
1791
1792         CDEBUG(D_NET, "gw %s down\n", libcfs_nid2str(gw_nid));
1793
1794         /* If the gateway crashed, close all open connections... */
1795         ksocknal_close_matching_conns(id, 0);
1796         return;
1797
1798         /* We can only establish new connections
1799          * if we have autroutes, and these connect on demand. */
1800 }
1801
1802 void
1803 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
1804 {
1805         int connect = 1;
1806         time64_t last_alive = 0;
1807         time64_t now = ktime_get_seconds();
1808         struct ksock_peer_ni *peer_ni = NULL;
1809         rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1810         struct lnet_process_id id = {
1811                 .nid = nid,
1812                 .pid = LNET_PID_LUSTRE,
1813         };
1814
1815         read_lock(glock);
1816
1817         peer_ni = ksocknal_find_peer_locked(ni, id);
1818         if (peer_ni != NULL) {
1819                 struct list_head *tmp;
1820                 struct ksock_conn *conn;
1821                 int bufnob;
1822
1823                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1824                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
1825                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1826
1827                         if (bufnob < conn->ksnc_tx_bufnob) {
1828                                 /* something got ACKed */
1829                                 conn->ksnc_tx_deadline = ktime_get_seconds() +
1830                                                          lnet_get_lnd_timeout();
1831                                 peer_ni->ksnp_last_alive = now;
1832                                 conn->ksnc_tx_bufnob = bufnob;
1833                         }
1834                 }
1835
1836                 last_alive = peer_ni->ksnp_last_alive;
1837                 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1838                         connect = 0;
1839         }
1840
1841         read_unlock(glock);
1842
1843         if (last_alive != 0)
1844                 *when = last_alive;
1845
1846         CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
1847                libcfs_nid2str(nid), peer_ni,
1848                last_alive ? now - last_alive : -1,
1849                connect);
1850
1851         if (!connect)
1852                 return;
1853
1854         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1855
1856         write_lock_bh(glock);
1857
1858         peer_ni = ksocknal_find_peer_locked(ni, id);
1859         if (peer_ni != NULL)
1860                 ksocknal_launch_all_connections_locked(peer_ni);
1861
1862         write_unlock_bh(glock);
1863 }
1864
1865 static void
1866 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1867 {
1868         int index;
1869         int i;
1870         struct list_head *tmp;
1871         struct ksock_conn *conn;
1872
1873         for (index = 0; ; index++) {
1874                 read_lock(&ksocknal_data.ksnd_global_lock);
1875
1876                 i = 0;
1877                 conn = NULL;
1878
1879                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1880                         if (i++ == index) {
1881                                 conn = list_entry(tmp, struct ksock_conn,
1882                                                   ksnc_list);
1883                                 ksocknal_conn_addref(conn);
1884                                 break;
1885                         }
1886                 }
1887
1888                 read_unlock(&ksocknal_data.ksnd_global_lock);
1889
1890                 if (conn == NULL)
1891                         break;
1892
1893                 ksocknal_lib_push_conn (conn);
1894                 ksocknal_conn_decref(conn);
1895         }
1896 }
1897
1898 static int
1899 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1900 {
1901         struct list_head *start;
1902         struct list_head *end;
1903         struct list_head *tmp;
1904         int               rc = -ENOENT;
1905         unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
1906
1907         if (id.nid == LNET_NID_ANY) {
1908                 start = &ksocknal_data.ksnd_peers[0];
1909                 end = &ksocknal_data.ksnd_peers[hsize - 1];
1910         } else {
1911                 start = end = ksocknal_nid2peerlist(id.nid);
1912         }
1913
1914         for (tmp = start; tmp <= end; tmp++) {
1915                 int     peer_off; /* searching offset in peer_ni hash table */
1916
1917                 for (peer_off = 0; ; peer_off++) {
1918                         struct ksock_peer_ni *peer_ni;
1919                         int           i = 0;
1920
1921                         read_lock(&ksocknal_data.ksnd_global_lock);
1922                         list_for_each_entry(peer_ni, tmp, ksnp_list) {
1923                                 if (!((id.nid == LNET_NID_ANY ||
1924                                        id.nid == peer_ni->ksnp_id.nid) &&
1925                                       (id.pid == LNET_PID_ANY ||
1926                                        id.pid == peer_ni->ksnp_id.pid)))
1927                                         continue;
1928
1929                                 if (i++ == peer_off) {
1930                                         ksocknal_peer_addref(peer_ni);
1931                                         break;
1932                                 }
1933                         }
1934                         read_unlock(&ksocknal_data.ksnd_global_lock);
1935
1936                         if (i <= peer_off) /* no match */
1937                                 break;
1938
1939                         rc = 0;
1940                         ksocknal_push_peer(peer_ni);
1941                         ksocknal_peer_decref(peer_ni);
1942                 }
1943         }
1944         return rc;
1945 }
1946
1947 static int
1948 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1949 {
1950         struct ksock_net *net = ni->ni_data;
1951         struct ksock_interface *iface;
1952         int rc;
1953         int i;
1954         int j;
1955         struct list_head *ptmp;
1956         struct ksock_peer_ni *peer_ni;
1957         struct list_head *rtmp;
1958         struct ksock_route *route;
1959
1960         if (ipaddress == 0 ||
1961             netmask == 0)
1962                 return -EINVAL;
1963
1964         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1965
1966         iface = ksocknal_ip2iface(ni, ipaddress);
1967         if (iface != NULL) {
1968                 /* silently ignore dups */
1969                 rc = 0;
1970         } else if (net->ksnn_ninterfaces == LNET_INTERFACES_NUM) {
1971                 rc = -ENOSPC;
1972         } else {
1973                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1974
1975                 iface->ksni_ipaddr = ipaddress;
1976                 iface->ksni_netmask = netmask;
1977                 iface->ksni_nroutes = 0;
1978                 iface->ksni_npeers = 0;
1979
1980                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1981                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1982                                 peer_ni = list_entry(ptmp, struct ksock_peer_ni,
1983                                                      ksnp_list);
1984
1985                                 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1986                                         if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1987                                                 iface->ksni_npeers++;
1988
1989                                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1990                                         route = list_entry(rtmp,
1991                                                            struct ksock_route,
1992                                                            ksnr_list);
1993
1994                                         if (route->ksnr_myipaddr == ipaddress)
1995                                                 iface->ksni_nroutes++;
1996                                 }
1997                         }
1998                 }
1999
2000                 rc = 0;
2001                 /* NB only new connections will pay attention to the new interface! */
2002         }
2003
2004         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2005
2006         return rc;
2007 }
2008
2009 static void
2010 ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
2011 {
2012         struct list_head *tmp;
2013         struct list_head *nxt;
2014         struct ksock_route *route;
2015         struct ksock_conn *conn;
2016         int i;
2017         int j;
2018
2019         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2020                 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2021                         for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2022                                 peer_ni->ksnp_passive_ips[j-1] =
2023                                         peer_ni->ksnp_passive_ips[j];
2024                         peer_ni->ksnp_n_passive_ips--;
2025                         break;
2026                 }
2027
2028         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2029                 route = list_entry(tmp, struct ksock_route, ksnr_list);
2030
2031                 if (route->ksnr_myipaddr != ipaddr)
2032                         continue;
2033
2034                 if (route->ksnr_share_count != 0) {
2035                         /* Manually created; keep, but unbind */
2036                         route->ksnr_myipaddr = 0;
2037                 } else {
2038                         ksocknal_del_route_locked(route);
2039                 }
2040         }
2041
2042         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2043                 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2044
2045                 if (conn->ksnc_myipaddr == ipaddr)
2046                         ksocknal_close_conn_locked (conn, 0);
2047         }
2048 }
2049
2050 static int
2051 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2052 {
2053         struct ksock_net *net = ni->ni_data;
2054         int rc = -ENOENT;
2055         struct list_head *tmp;
2056         struct list_head *nxt;
2057         struct ksock_peer_ni *peer_ni;
2058         u32 this_ip;
2059         int i;
2060         int j;
2061
2062         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2063
2064         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2065                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2066
2067                 if (!(ipaddress == 0 ||
2068                       ipaddress == this_ip))
2069                         continue;
2070
2071                 rc = 0;
2072
2073                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2074                         net->ksnn_interfaces[j-1] =
2075                                 net->ksnn_interfaces[j];
2076
2077                 net->ksnn_ninterfaces--;
2078
2079                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2080                         list_for_each_safe(tmp, nxt,
2081                                            &ksocknal_data.ksnd_peers[j]) {
2082                                 peer_ni = list_entry(tmp, struct ksock_peer_ni,
2083                                                      ksnp_list);
2084
2085                                 if (peer_ni->ksnp_ni != ni)
2086                                         continue;
2087
2088                                 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2089                         }
2090                 }
2091         }
2092
2093         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2094
2095         return (rc);
2096 }
2097
2098 int
2099 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2100 {
2101         struct lnet_process_id id = {0};
2102         struct libcfs_ioctl_data *data = arg;
2103         int rc;
2104
2105         switch(cmd) {
2106         case IOC_LIBCFS_GET_INTERFACE: {
2107                 struct ksock_net *net = ni->ni_data;
2108                 struct ksock_interface *iface;
2109
2110                 read_lock(&ksocknal_data.ksnd_global_lock);
2111
2112                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2113                         rc = -ENOENT;
2114                 } else {
2115                         rc = 0;
2116                         iface = &net->ksnn_interfaces[data->ioc_count];
2117
2118                         data->ioc_u32[0] = iface->ksni_ipaddr;
2119                         data->ioc_u32[1] = iface->ksni_netmask;
2120                         data->ioc_u32[2] = iface->ksni_npeers;
2121                         data->ioc_u32[3] = iface->ksni_nroutes;
2122                 }
2123
2124                 read_unlock(&ksocknal_data.ksnd_global_lock);
2125                 return rc;
2126         }
2127
2128         case IOC_LIBCFS_ADD_INTERFACE:
2129                 return ksocknal_add_interface(ni,
2130                                               data->ioc_u32[0], /* IP address */
2131                                               data->ioc_u32[1]); /* net mask */
2132
2133         case IOC_LIBCFS_DEL_INTERFACE:
2134                 return ksocknal_del_interface(ni,
2135                                               data->ioc_u32[0]); /* IP address */
2136
2137         case IOC_LIBCFS_GET_PEER: {
2138                 __u32            myip = 0;
2139                 __u32            ip = 0;
2140                 int              port = 0;
2141                 int              conn_count = 0;
2142                 int              share_count = 0;
2143
2144                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2145                                             &id, &myip, &ip, &port,
2146                                             &conn_count,  &share_count);
2147                 if (rc != 0)
2148                         return rc;
2149
2150                 data->ioc_nid    = id.nid;
2151                 data->ioc_count  = share_count;
2152                 data->ioc_u32[0] = ip;
2153                 data->ioc_u32[1] = port;
2154                 data->ioc_u32[2] = myip;
2155                 data->ioc_u32[3] = conn_count;
2156                 data->ioc_u32[4] = id.pid;
2157                 return 0;
2158         }
2159
2160         case IOC_LIBCFS_ADD_PEER:
2161                 id.nid = data->ioc_nid;
2162                 id.pid = LNET_PID_LUSTRE;
2163                 return ksocknal_add_peer (ni, id,
2164                                           data->ioc_u32[0], /* IP */
2165                                           data->ioc_u32[1]); /* port */
2166
2167         case IOC_LIBCFS_DEL_PEER:
2168                 id.nid = data->ioc_nid;
2169                 id.pid = LNET_PID_ANY;
2170                 return ksocknal_del_peer (ni, id,
2171                                           data->ioc_u32[0]); /* IP */
2172
2173         case IOC_LIBCFS_GET_CONN: {
2174                 int           txmem;
2175                 int           rxmem;
2176                 int           nagle;
2177                 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2178
2179                 if (conn == NULL)
2180                         return -ENOENT;
2181
2182                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2183
2184                 data->ioc_count  = txmem;
2185                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2186                 data->ioc_flags  = nagle;
2187                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2188                 data->ioc_u32[1] = conn->ksnc_port;
2189                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2190                 data->ioc_u32[3] = conn->ksnc_type;
2191                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
2192                 data->ioc_u32[5] = rxmem;
2193                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2194                 ksocknal_conn_decref(conn);
2195                 return 0;
2196         }
2197
2198         case IOC_LIBCFS_CLOSE_CONNECTION:
2199                 id.nid = data->ioc_nid;
2200                 id.pid = LNET_PID_ANY;
2201                 return ksocknal_close_matching_conns (id,
2202                                                       data->ioc_u32[0]);
2203
2204         case IOC_LIBCFS_REGISTER_MYNID:
2205                 /* Ignore if this is a noop */
2206                 if (data->ioc_nid == ni->ni_nid)
2207                         return 0;
2208
2209                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2210                        libcfs_nid2str(data->ioc_nid),
2211                        libcfs_nid2str(ni->ni_nid));
2212                 return -EINVAL;
2213
2214         case IOC_LIBCFS_PUSH_CONNECTION:
2215                 id.nid = data->ioc_nid;
2216                 id.pid = LNET_PID_ANY;
2217                 return ksocknal_push(ni, id);
2218
2219         default:
2220                 return -EINVAL;
2221         }
2222         /* not reached */
2223 }
2224
2225 static void
2226 ksocknal_free_buffers (void)
2227 {
2228         LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2229
2230         if (ksocknal_data.ksnd_schedulers != NULL)
2231                 cfs_percpt_free(ksocknal_data.ksnd_schedulers);
2232
2233         LIBCFS_FREE (ksocknal_data.ksnd_peers,
2234                      sizeof(struct list_head) *
2235                      ksocknal_data.ksnd_peer_hash_size);
2236
2237         spin_lock(&ksocknal_data.ksnd_tx_lock);
2238
2239         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2240                 struct list_head zlist;
2241                 struct ksock_tx *tx;
2242
2243                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2244                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2245                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2246
2247                 while (!list_empty(&zlist)) {
2248                         tx = list_entry(zlist.next, struct ksock_tx, tx_list);
2249                         list_del(&tx->tx_list);
2250                         LIBCFS_FREE(tx, tx->tx_desc_size);
2251                 }
2252         } else {
2253                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2254         }
2255 }
2256
2257 static void
2258 ksocknal_base_shutdown(void)
2259 {
2260         struct ksock_sched *sched;
2261         int i;
2262
2263         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2264                atomic_read (&libcfs_kmemory));
2265         LASSERT (ksocknal_data.ksnd_nnets == 0);
2266
2267         switch (ksocknal_data.ksnd_init) {
2268         default:
2269                 LASSERT(0);
2270                 /* fallthrough */
2271
2272         case SOCKNAL_INIT_ALL:
2273         case SOCKNAL_INIT_DATA:
2274                 LASSERT(ksocknal_data.ksnd_peers != NULL);
2275                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2276                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2277
2278                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2279                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2280                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2281                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2282                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2283
2284                 if (ksocknal_data.ksnd_schedulers != NULL) {
2285                         cfs_percpt_for_each(sched, i,
2286                                             ksocknal_data.ksnd_schedulers) {
2287
2288                                 LASSERT(list_empty(&sched->kss_tx_conns));
2289                                 LASSERT(list_empty(&sched->kss_rx_conns));
2290                                 LASSERT(list_empty(&sched->kss_zombie_noop_txs));
2291                                 LASSERT(sched->kss_nconns == 0);
2292                         }
2293                 }
2294
2295                 /* flag threads to terminate; wake and wait for them to die */
2296                 ksocknal_data.ksnd_shuttingdown = 1;
2297                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2298                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2299
2300                 if (ksocknal_data.ksnd_schedulers != NULL) {
2301                         cfs_percpt_for_each(sched, i,
2302                                             ksocknal_data.ksnd_schedulers)
2303                                         wake_up_all(&sched->kss_waitq);
2304                 }
2305
2306                 i = 4;
2307                 read_lock(&ksocknal_data.ksnd_global_lock);
2308                 while (ksocknal_data.ksnd_nthreads != 0) {
2309                         i++;
2310                         /* power of 2? */
2311                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2312                                 "waiting for %d threads to terminate\n",
2313                                 ksocknal_data.ksnd_nthreads);
2314                         read_unlock(&ksocknal_data.ksnd_global_lock);
2315                         set_current_state(TASK_UNINTERRUPTIBLE);
2316                         schedule_timeout(cfs_time_seconds(1));
2317                         read_lock(&ksocknal_data.ksnd_global_lock);
2318                 }
2319                 read_unlock(&ksocknal_data.ksnd_global_lock);
2320
2321                 ksocknal_free_buffers();
2322
2323                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2324                 break;
2325         }
2326
2327         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2328                atomic_read (&libcfs_kmemory));
2329
2330         module_put(THIS_MODULE);
2331 }
2332
2333 static int
2334 ksocknal_base_startup(void)
2335 {
2336         struct ksock_sched *sched;
2337         int rc;
2338         int i;
2339
2340         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2341         LASSERT (ksocknal_data.ksnd_nnets == 0);
2342
2343         memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2344
2345         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2346         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2347                      sizeof(struct list_head) *
2348                      ksocknal_data.ksnd_peer_hash_size);
2349         if (ksocknal_data.ksnd_peers == NULL)
2350                 return -ENOMEM;
2351
2352         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2353                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2354
2355         rwlock_init(&ksocknal_data.ksnd_global_lock);
2356         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2357
2358         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2359         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2360         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2361         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2362         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2363
2364         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2365         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2366         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2367         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2368
2369         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2370         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2371
2372         /* NB memset above zeros whole of ksocknal_data */
2373
2374         /* flag lists/ptrs/locks initialised */
2375         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2376         try_module_get(THIS_MODULE);
2377
2378         /* Create a scheduler block per available CPT */
2379         ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
2380                                                          sizeof(*sched));
2381         if (ksocknal_data.ksnd_schedulers == NULL)
2382                 goto failed;
2383
2384         cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
2385                 int nthrs;
2386
2387                 /*
2388                  * make sure not to allocate more threads than there are
2389                  * cores/CPUs in teh CPT
2390                  */
2391                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2392                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2393                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2394                 } else {
2395                         /*
2396                          * max to half of CPUs, assume another half should be
2397                          * reserved for upper layer modules
2398                          */
2399                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2400                 }
2401
2402                 sched->kss_nthreads_max = nthrs;
2403                 sched->kss_cpt = i;
2404
2405                 spin_lock_init(&sched->kss_lock);
2406                 INIT_LIST_HEAD(&sched->kss_rx_conns);
2407                 INIT_LIST_HEAD(&sched->kss_tx_conns);
2408                 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2409                 init_waitqueue_head(&sched->kss_waitq);
2410         }
2411
2412         ksocknal_data.ksnd_connd_starting         = 0;
2413         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2414         ksocknal_data.ksnd_connd_starting_stamp   = ktime_get_real_seconds();
2415         /* must have at least 2 connds to remain responsive to accepts while
2416          * connecting */
2417         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2418                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2419
2420         if (*ksocknal_tunables.ksnd_nconnds_max <
2421             *ksocknal_tunables.ksnd_nconnds) {
2422                 ksocknal_tunables.ksnd_nconnds_max =
2423                         ksocknal_tunables.ksnd_nconnds;
2424         }
2425
2426         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2427                 char name[16];
2428                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2429                 ksocknal_data.ksnd_connd_starting++;
2430                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2431
2432
2433                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2434                 rc = ksocknal_thread_start(ksocknal_connd,
2435                                            (void *)((uintptr_t)i), name);
2436                 if (rc != 0) {
2437                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2438                         ksocknal_data.ksnd_connd_starting--;
2439                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2440                         CERROR("Can't spawn socknal connd: %d\n", rc);
2441                         goto failed;
2442                 }
2443         }
2444
2445         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2446         if (rc != 0) {
2447                 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2448                 goto failed;
2449         }
2450
2451         /* flag everything initialised */
2452         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2453
2454         return 0;
2455
2456  failed:
2457         ksocknal_base_shutdown();
2458         return -ENETDOWN;
2459 }
2460
2461 static void
2462 ksocknal_debug_peerhash(struct lnet_ni *ni)
2463 {
2464         struct ksock_peer_ni *peer_ni = NULL;
2465         struct list_head *tmp;
2466         int i;
2467
2468         read_lock(&ksocknal_data.ksnd_global_lock);
2469
2470         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2471                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2472                         peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
2473
2474                         if (peer_ni->ksnp_ni == ni)
2475                                 break;
2476
2477                         peer_ni = NULL;
2478                 }
2479         }
2480
2481         if (peer_ni != NULL) {
2482                 struct ksock_route *route;
2483                 struct ksock_conn  *conn;
2484
2485                 CWARN("Active peer_ni on shutdown: %s, ref %d, "
2486                       "closing %d, accepting %d, err %d, zcookie %llu, "
2487                       "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2488                       atomic_read(&peer_ni->ksnp_refcount),
2489                       peer_ni->ksnp_closing,
2490                       peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2491                       peer_ni->ksnp_zc_next_cookie,
2492                       !list_empty(&peer_ni->ksnp_tx_queue),
2493                       !list_empty(&peer_ni->ksnp_zc_req_list));
2494
2495                 list_for_each(tmp, &peer_ni->ksnp_routes) {
2496                         route = list_entry(tmp, struct ksock_route, ksnr_list);
2497                         CWARN("Route: ref %d, schd %d, conn %d, cnted %d, "
2498                               "del %d\n", atomic_read(&route->ksnr_refcount),
2499                               route->ksnr_scheduled, route->ksnr_connecting,
2500                               route->ksnr_connected, route->ksnr_deleted);
2501                 }
2502
2503                 list_for_each(tmp, &peer_ni->ksnp_conns) {
2504                         conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2505                         CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
2506                               atomic_read(&conn->ksnc_conn_refcount),
2507                               atomic_read(&conn->ksnc_sock_refcount),
2508                               conn->ksnc_type, conn->ksnc_closing);
2509                 }
2510         }
2511
2512         read_unlock(&ksocknal_data.ksnd_global_lock);
2513 }
2514
2515 void
2516 ksocknal_shutdown(struct lnet_ni *ni)
2517 {
2518         struct ksock_net *net = ni->ni_data;
2519         struct lnet_process_id anyid = {
2520                 .nid = LNET_NID_ANY,
2521                 .pid = LNET_PID_ANY,
2522         };
2523         int i;
2524
2525         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2526         LASSERT(ksocknal_data.ksnd_nnets > 0);
2527
2528         spin_lock_bh(&net->ksnn_lock);
2529         net->ksnn_shutdown = 1;                 /* prevent new peers */
2530         spin_unlock_bh(&net->ksnn_lock);
2531
2532         /* Delete all peers */
2533         ksocknal_del_peer(ni, anyid, 0);
2534
2535         /* Wait for all peer_ni state to clean up */
2536         i = 2;
2537         spin_lock_bh(&net->ksnn_lock);
2538         while (net->ksnn_npeers != 0) {
2539                 spin_unlock_bh(&net->ksnn_lock);
2540
2541                 i++;
2542                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2543                        "waiting for %d peers to disconnect\n",
2544                        net->ksnn_npeers);
2545                 set_current_state(TASK_UNINTERRUPTIBLE);
2546                 schedule_timeout(cfs_time_seconds(1));
2547
2548                 ksocknal_debug_peerhash(ni);
2549
2550                 spin_lock_bh(&net->ksnn_lock);
2551         }
2552         spin_unlock_bh(&net->ksnn_lock);
2553
2554         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2555                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2556                 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2557         }
2558
2559         list_del(&net->ksnn_list);
2560         LIBCFS_FREE(net, sizeof(*net));
2561
2562         ksocknal_data.ksnd_nnets--;
2563         if (ksocknal_data.ksnd_nnets == 0)
2564                 ksocknal_base_shutdown();
2565 }
2566
2567 static int
2568 ksocknal_search_new_ipif(struct ksock_net *net)
2569 {
2570         int new_ipif = 0;
2571         int i;
2572
2573         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2574                 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2575                 char *colon = strchr(ifnam, ':');
2576                 int found  = 0;
2577                 struct ksock_net *tmp;
2578                 int j;
2579
2580                 if (colon != NULL) /* ignore alias device */
2581                         *colon = 0;
2582
2583                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2584                                         ksnn_list) {
2585                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2586                                 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2587                                              ksni_name[0];
2588                                 char *colon2 = strchr(ifnam2, ':');
2589
2590                                 if (colon2 != NULL)
2591                                         *colon2 = 0;
2592
2593                                 found = strcmp(ifnam, ifnam2) == 0;
2594                                 if (colon2 != NULL)
2595                                         *colon2 = ':';
2596                         }
2597                         if (found)
2598                                 break;
2599                 }
2600
2601                 new_ipif += !found;
2602                 if (colon != NULL)
2603                         *colon = ':';
2604         }
2605
2606         return new_ipif;
2607 }
2608
2609 static int
2610 ksocknal_start_schedulers(struct ksock_sched *sched)
2611 {
2612         int     nthrs;
2613         int     rc = 0;
2614         int     i;
2615
2616         if (sched->kss_nthreads == 0) {
2617                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2618                         nthrs = sched->kss_nthreads_max;
2619                 } else {
2620                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2621                                                sched->kss_cpt);
2622                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2623                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2624                 }
2625                 nthrs = min(nthrs, sched->kss_nthreads_max);
2626         } else {
2627                 LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
2628                 /* increase two threads if there is new interface */
2629                 nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
2630         }
2631
2632         for (i = 0; i < nthrs; i++) {
2633                 long id;
2634                 char name[20];
2635
2636                 id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
2637                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2638                          sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
2639
2640                 rc = ksocknal_thread_start(ksocknal_scheduler,
2641                                            (void *)id, name);
2642                 if (rc == 0)
2643                         continue;
2644
2645                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2646                        sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
2647                 break;
2648         }
2649
2650         sched->kss_nthreads += i;
2651         return rc;
2652 }
2653
2654 static int
2655 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2656 {
2657         int newif = ksocknal_search_new_ipif(net);
2658         int rc;
2659         int i;
2660
2661         if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2662                 return -EINVAL;
2663
2664         for (i = 0; i < ncpts; i++) {
2665                 struct ksock_sched *sched;
2666                 int cpt = (cpts == NULL) ? i : cpts[i];
2667
2668                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2669                 sched = ksocknal_data.ksnd_schedulers[cpt];
2670
2671                 if (!newif && sched->kss_nthreads > 0)
2672                         continue;
2673
2674                 rc = ksocknal_start_schedulers(sched);
2675                 if (rc != 0)
2676                         return rc;
2677         }
2678         return 0;
2679 }
2680
2681 int
2682 ksocknal_startup(struct lnet_ni *ni)
2683 {
2684         struct ksock_net *net;
2685         struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
2686         struct ksock_interface *ksi = NULL;
2687         struct lnet_inetdev *ifaces = NULL;
2688         int i = 0;
2689         int rc;
2690
2691         LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2692
2693         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2694                 rc = ksocknal_base_startup();
2695                 if (rc != 0)
2696                         return rc;
2697         }
2698
2699         LIBCFS_ALLOC(net, sizeof(*net));
2700         if (net == NULL)
2701                 goto fail_0;
2702
2703         spin_lock_init(&net->ksnn_lock);
2704         net->ksnn_incarnation = ktime_get_real_ns();
2705         ni->ni_data = net;
2706         net_tunables = &ni->ni_net->net_tunables;
2707
2708         if (net_tunables->lct_peer_timeout == -1)
2709                 net_tunables->lct_peer_timeout =
2710                         *ksocknal_tunables.ksnd_peertimeout;
2711
2712         if (net_tunables->lct_max_tx_credits == -1)
2713                 net_tunables->lct_max_tx_credits =
2714                         *ksocknal_tunables.ksnd_credits;
2715
2716         if (net_tunables->lct_peer_tx_credits == -1)
2717                 net_tunables->lct_peer_tx_credits =
2718                         *ksocknal_tunables.ksnd_peertxcredits;
2719
2720         if (net_tunables->lct_peer_tx_credits >
2721             net_tunables->lct_max_tx_credits)
2722                 net_tunables->lct_peer_tx_credits =
2723                         net_tunables->lct_max_tx_credits;
2724
2725         if (net_tunables->lct_peer_rtr_credits == -1)
2726                 net_tunables->lct_peer_rtr_credits =
2727                         *ksocknal_tunables.ksnd_peerrtrcredits;
2728
2729         rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns);
2730         if (rc < 0)
2731                 goto fail_1;
2732
2733         if (!ni->ni_interfaces[0]) {
2734                 ksi = &net->ksnn_interfaces[0];
2735
2736                 /* Use the first discovered interface */
2737                 net->ksnn_ninterfaces = 1;
2738                 ni->ni_dev_cpt = ifaces[0].li_cpt;
2739                 ksi->ksni_ipaddr = ifaces[0].li_ipaddr;
2740                 ksi->ksni_netmask = ifaces[0].li_netmask;
2741                 strlcpy(ksi->ksni_name, ifaces[0].li_name,
2742                         sizeof(ksi->ksni_name));
2743         } else {
2744                 /* Before Multi-Rail ksocklnd would manage
2745                  * multiple interfaces with its own tcp bonding.
2746                  * If we encounter an old configuration using
2747                  * this tcp bonding approach then we need to
2748                  * handle more than one ni_interfaces.
2749                  *
2750                  * In Multi-Rail configuration only ONE ni_interface
2751                  * should exist. Each IP alias should be mapped to
2752                  * each 'struct net_ni'.
2753                  */
2754                 for (i = 0; i < LNET_INTERFACES_NUM; i++) {
2755                         int j;
2756
2757                         if (!ni->ni_interfaces[i])
2758                                 break;
2759
2760                         for (j = 0; j < LNET_INTERFACES_NUM;  j++) {
2761                                 if (i != j && ni->ni_interfaces[j] &&
2762                                     strcmp(ni->ni_interfaces[i],
2763                                            ni->ni_interfaces[j]) == 0) {
2764                                         rc = -EEXIST;
2765                                         CERROR("ksocklnd: found duplicate %s at %d and %d, rc = %d\n",
2766                                                ni->ni_interfaces[i], i, j, rc);
2767                                         goto fail_1;
2768                                 }
2769                         }
2770
2771                         for (j = 0; j < rc; j++) {
2772                                 if (strcmp(ifaces[j].li_name,
2773                                            ni->ni_interfaces[i]) != 0)
2774                                         continue;
2775
2776                                 ksi = &net->ksnn_interfaces[j];
2777                                 ni->ni_dev_cpt = ifaces[j].li_cpt;
2778                                 ksi->ksni_ipaddr = ifaces[j].li_ipaddr;
2779                                 ksi->ksni_netmask = ifaces[j].li_netmask;
2780                                 strlcpy(ksi->ksni_name, ifaces[j].li_name,
2781                                         sizeof(ksi->ksni_name));
2782                                 net->ksnn_ninterfaces++;
2783                                 break;
2784                         }
2785                 }
2786                 /* ni_interfaces don't map to all network interfaces */
2787                 if (!ksi || net->ksnn_ninterfaces != i) {
2788                         CERROR("ksocklnd: requested %d but only %d interfaces found\n",
2789                                i, net->ksnn_ninterfaces);
2790                         goto fail_1;
2791                 }
2792         }
2793
2794         /* call it before add it to ksocknal_data.ksnd_nets */
2795         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2796         if (rc != 0)
2797                 goto fail_1;
2798
2799         LASSERT(ksi);
2800         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ksi->ksni_ipaddr);
2801         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2802
2803         ksocknal_data.ksnd_nnets++;
2804
2805         return 0;
2806
2807  fail_1:
2808         LIBCFS_FREE(net, sizeof(*net));
2809  fail_0:
2810         if (ksocknal_data.ksnd_nnets == 0)
2811                 ksocknal_base_shutdown();
2812
2813         return -ENETDOWN;
2814 }
2815
2816
2817 static void __exit ksocklnd_exit(void)
2818 {
2819         lnet_unregister_lnd(&the_ksocklnd);
2820 }
2821
2822 static int __init ksocklnd_init(void)
2823 {
2824         int rc;
2825
2826         /* check ksnr_connected/connecting field large enough */
2827         BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
2828         BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
2829
2830         /* initialize the_ksocklnd */
2831         the_ksocklnd.lnd_type     = SOCKLND;
2832         the_ksocklnd.lnd_startup  = ksocknal_startup;
2833         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2834         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2835         the_ksocklnd.lnd_send     = ksocknal_send;
2836         the_ksocklnd.lnd_recv     = ksocknal_recv;
2837         the_ksocklnd.lnd_notify_peer_down   = ksocknal_notify_gw_down;
2838         the_ksocklnd.lnd_query    = ksocknal_query;
2839         the_ksocklnd.lnd_accept   = ksocknal_accept;
2840
2841         rc = ksocknal_tunables_init();
2842         if (rc != 0)
2843                 return rc;
2844
2845         lnet_register_lnd(&the_ksocklnd);
2846
2847         return 0;
2848 }
2849
2850 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2851 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2852 MODULE_VERSION("2.8.0");
2853 MODULE_LICENSE("GPL");
2854
2855 module_init(ksocklnd_init);
2856 module_exit(ksocklnd_exit);