Whamcloud - gitweb
LU-9019 socklnd: use 64-bit incarnation time stamp
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/socklnd/socklnd.c
33  *
34  * Author: Zach Brown <zab@zabbo.net>
35  * Author: Peter J. Braam <braam@clusterfs.com>
36  * Author: Phil Schwan <phil@clusterfs.com>
37  * Author: Eric Barton <eric@bartonsoftware.com>
38  */
39
40 #include <linux/pci.h>
41 #include "socklnd.h"
42
43 static struct lnet_lnd the_ksocklnd;
44 ksock_nal_data_t        ksocknal_data;
45
46 static ksock_interface_t *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
48 {
49         ksock_net_t       *net = ni->ni_data;
50         int                i;
51         ksock_interface_t *iface;
52
53         for (i = 0; i < net->ksnn_ninterfaces; i++) {
54                 LASSERT(i < LNET_MAX_INTERFACES);
55                 iface = &net->ksnn_interfaces[i];
56
57                 if (iface->ksni_ipaddr == ip)
58                         return (iface);
59         }
60
61         return (NULL);
62 }
63
64 static ksock_route_t *
65 ksocknal_create_route (__u32 ipaddr, int port)
66 {
67         ksock_route_t *route;
68
69         LIBCFS_ALLOC (route, sizeof (*route));
70         if (route == NULL)
71                 return (NULL);
72
73         atomic_set (&route->ksnr_refcount, 1);
74         route->ksnr_peer = NULL;
75         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
76         route->ksnr_ipaddr = ipaddr;
77         route->ksnr_port = port;
78         route->ksnr_scheduled = 0;
79         route->ksnr_connecting = 0;
80         route->ksnr_connected = 0;
81         route->ksnr_deleted = 0;
82         route->ksnr_conn_count = 0;
83         route->ksnr_share_count = 0;
84
85         return (route);
86 }
87
88 void
89 ksocknal_destroy_route (ksock_route_t *route)
90 {
91         LASSERT (atomic_read(&route->ksnr_refcount) == 0);
92
93         if (route->ksnr_peer != NULL)
94                 ksocknal_peer_decref(route->ksnr_peer);
95
96         LIBCFS_FREE (route, sizeof (*route));
97 }
98
99 static int
100 ksocknal_create_peer(ksock_peer_ni_t **peerp, struct lnet_ni *ni,
101                      struct lnet_process_id id)
102 {
103         int             cpt = lnet_cpt_of_nid(id.nid, ni);
104         ksock_net_t     *net = ni->ni_data;
105         ksock_peer_ni_t *peer_ni;
106
107         LASSERT(id.nid != LNET_NID_ANY);
108         LASSERT(id.pid != LNET_PID_ANY);
109         LASSERT(!in_interrupt());
110
111         LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
112         if (peer_ni == NULL)
113                 return -ENOMEM;
114
115         peer_ni->ksnp_ni = ni;
116         peer_ni->ksnp_id = id;
117         atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
118         peer_ni->ksnp_closing = 0;
119         peer_ni->ksnp_accepting = 0;
120         peer_ni->ksnp_proto = NULL;
121         peer_ni->ksnp_last_alive = 0;
122         peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
123
124         INIT_LIST_HEAD(&peer_ni->ksnp_conns);
125         INIT_LIST_HEAD(&peer_ni->ksnp_routes);
126         INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
127         INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
128         spin_lock_init(&peer_ni->ksnp_lock);
129
130         spin_lock_bh(&net->ksnn_lock);
131
132         if (net->ksnn_shutdown) {
133                 spin_unlock_bh(&net->ksnn_lock);
134
135                 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
136                 CERROR("Can't create peer_ni: network shutdown\n");
137                 return -ESHUTDOWN;
138         }
139
140         net->ksnn_npeers++;
141
142         spin_unlock_bh(&net->ksnn_lock);
143
144         *peerp = peer_ni;
145         return 0;
146 }
147
148 void
149 ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni)
150 {
151         ksock_net_t    *net = peer_ni->ksnp_ni->ni_data;
152
153         CDEBUG (D_NET, "peer_ni %s %p deleted\n",
154                 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
155
156         LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
157         LASSERT(peer_ni->ksnp_accepting == 0);
158         LASSERT(list_empty(&peer_ni->ksnp_conns));
159         LASSERT(list_empty(&peer_ni->ksnp_routes));
160         LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
161         LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
162
163         LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
164
165         /* NB a peer_ni's connections and routes keep a reference on their peer_ni
166          * until they are destroyed, so we can be assured that _all_ state to
167          * do with this peer_ni has been cleaned up when its refcount drops to
168          * zero. */
169         spin_lock_bh(&net->ksnn_lock);
170         net->ksnn_npeers--;
171         spin_unlock_bh(&net->ksnn_lock);
172 }
173
174 ksock_peer_ni_t *
175 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
176 {
177         struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178         struct list_head *tmp;
179         ksock_peer_ni_t  *peer_ni;
180
181         list_for_each(tmp, peer_list) {
182
183                 peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
184
185                 LASSERT(!peer_ni->ksnp_closing);
186
187                 if (peer_ni->ksnp_ni != ni)
188                         continue;
189
190                 if (peer_ni->ksnp_id.nid != id.nid ||
191                     peer_ni->ksnp_id.pid != id.pid)
192                         continue;
193
194                 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
195                        peer_ni, libcfs_id2str(id),
196                        atomic_read(&peer_ni->ksnp_refcount));
197                 return peer_ni;
198         }
199         return NULL;
200 }
201
202 ksock_peer_ni_t *
203 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
204 {
205         ksock_peer_ni_t     *peer_ni;
206
207         read_lock(&ksocknal_data.ksnd_global_lock);
208         peer_ni = ksocknal_find_peer_locked(ni, id);
209         if (peer_ni != NULL)                    /* +1 ref for caller? */
210                 ksocknal_peer_addref(peer_ni);
211         read_unlock(&ksocknal_data.ksnd_global_lock);
212
213         return (peer_ni);
214 }
215
216 static void
217 ksocknal_unlink_peer_locked (ksock_peer_ni_t *peer_ni)
218 {
219         int                i;
220         __u32              ip;
221         ksock_interface_t *iface;
222
223         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
224                 LASSERT (i < LNET_MAX_INTERFACES);
225                 ip = peer_ni->ksnp_passive_ips[i];
226
227                 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
228                 /* All IPs in peer_ni->ksnp_passive_ips[] come from the
229                  * interface list, therefore the call must succeed. */
230                 LASSERT (iface != NULL);
231
232                 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
233                        peer_ni, iface, iface->ksni_nroutes);
234                 iface->ksni_npeers--;
235         }
236
237         LASSERT(list_empty(&peer_ni->ksnp_conns));
238         LASSERT(list_empty(&peer_ni->ksnp_routes));
239         LASSERT(!peer_ni->ksnp_closing);
240         peer_ni->ksnp_closing = 1;
241         list_del(&peer_ni->ksnp_list);
242         /* lose peerlist's ref */
243         ksocknal_peer_decref(peer_ni);
244 }
245
246 static int
247 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
248                        struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
249                        int *port, int *conn_count, int *share_count)
250 {
251         ksock_peer_ni_t   *peer_ni;
252         struct list_head  *ptmp;
253         ksock_route_t     *route;
254         struct list_head  *rtmp;
255         int                i;
256         int                j;
257         int                rc = -ENOENT;
258
259         read_lock(&ksocknal_data.ksnd_global_lock);
260
261         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
262                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
263                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
264
265                         if (peer_ni->ksnp_ni != ni)
266                                 continue;
267
268                         if (peer_ni->ksnp_n_passive_ips == 0 &&
269                             list_empty(&peer_ni->ksnp_routes)) {
270                                 if (index-- > 0)
271                                         continue;
272
273                                 *id = peer_ni->ksnp_id;
274                                 *myip = 0;
275                                 *peer_ip = 0;
276                                 *port = 0;
277                                 *conn_count = 0;
278                                 *share_count = 0;
279                                 rc = 0;
280                                 goto out;
281                         }
282
283                         for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
284                                 if (index-- > 0)
285                                         continue;
286
287                                 *id = peer_ni->ksnp_id;
288                                 *myip = peer_ni->ksnp_passive_ips[j];
289                                 *peer_ip = 0;
290                                 *port = 0;
291                                 *conn_count = 0;
292                                 *share_count = 0;
293                                 rc = 0;
294                                 goto out;
295                         }
296
297                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
298                                 if (index-- > 0)
299                                         continue;
300
301                                 route = list_entry(rtmp, ksock_route_t,
302                                                    ksnr_list);
303
304                                 *id = peer_ni->ksnp_id;
305                                 *myip = route->ksnr_myipaddr;
306                                 *peer_ip = route->ksnr_ipaddr;
307                                 *port = route->ksnr_port;
308                                 *conn_count = route->ksnr_conn_count;
309                                 *share_count = route->ksnr_share_count;
310                                 rc = 0;
311                                 goto out;
312                         }
313                 }
314         }
315 out:
316         read_unlock(&ksocknal_data.ksnd_global_lock);
317         return rc;
318 }
319
320 static void
321 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
322 {
323         ksock_peer_ni_t   *peer_ni = route->ksnr_peer;
324         int                type = conn->ksnc_type;
325         ksock_interface_t *iface;
326
327         conn->ksnc_route = route;
328         ksocknal_route_addref(route);
329
330         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
331                 if (route->ksnr_myipaddr == 0) {
332                         /* route wasn't bound locally yet (the initial route) */
333                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
334                                libcfs_id2str(peer_ni->ksnp_id),
335                                &route->ksnr_ipaddr,
336                                &conn->ksnc_myipaddr);
337                 } else {
338                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
339                                "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
340                                &route->ksnr_ipaddr,
341                                &route->ksnr_myipaddr,
342                                &conn->ksnc_myipaddr);
343
344                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
345                                                   route->ksnr_myipaddr);
346                         if (iface != NULL)
347                                 iface->ksni_nroutes--;
348                 }
349                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
350                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
351                                           route->ksnr_myipaddr);
352                 if (iface != NULL)
353                         iface->ksni_nroutes++;
354         }
355
356         route->ksnr_connected |= (1<<type);
357         route->ksnr_conn_count++;
358
359         /* Successful connection => further attempts can
360          * proceed immediately */
361         route->ksnr_retry_interval = 0;
362 }
363
364 static void
365 ksocknal_add_route_locked (ksock_peer_ni_t *peer_ni, ksock_route_t *route)
366 {
367         struct list_head *tmp;
368         ksock_conn_t     *conn;
369         ksock_route_t    *route2;
370
371         LASSERT(!peer_ni->ksnp_closing);
372         LASSERT(route->ksnr_peer == NULL);
373         LASSERT(!route->ksnr_scheduled);
374         LASSERT(!route->ksnr_connecting);
375         LASSERT(route->ksnr_connected == 0);
376
377         /* LASSERT(unique) */
378         list_for_each(tmp, &peer_ni->ksnp_routes) {
379                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
380
381                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
382                         CERROR("Duplicate route %s %pI4h\n",
383                                libcfs_id2str(peer_ni->ksnp_id),
384                                &route->ksnr_ipaddr);
385                         LBUG();
386                 }
387         }
388
389         route->ksnr_peer = peer_ni;
390         ksocknal_peer_addref(peer_ni);
391         /* peer_ni's routelist takes over my ref on 'route' */
392         list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
393
394         list_for_each(tmp, &peer_ni->ksnp_conns) {
395                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
396
397                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
398                         continue;
399
400                 ksocknal_associate_route_conn_locked(route, conn);
401                 /* keep going (typed routes) */
402         }
403 }
404
405 static void
406 ksocknal_del_route_locked (ksock_route_t *route)
407 {
408         ksock_peer_ni_t   *peer_ni = route->ksnr_peer;
409         ksock_interface_t *iface;
410         ksock_conn_t      *conn;
411         struct list_head  *ctmp;
412         struct list_head  *cnxt;
413
414         LASSERT(!route->ksnr_deleted);
415
416         /* Close associated conns */
417         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
418                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
419
420                 if (conn->ksnc_route != route)
421                         continue;
422
423                 ksocknal_close_conn_locked(conn, 0);
424         }
425
426         if (route->ksnr_myipaddr != 0) {
427                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
428                                           route->ksnr_myipaddr);
429                 if (iface != NULL)
430                         iface->ksni_nroutes--;
431         }
432
433         route->ksnr_deleted = 1;
434         list_del(&route->ksnr_list);
435         ksocknal_route_decref(route);           /* drop peer_ni's ref */
436
437         if (list_empty(&peer_ni->ksnp_routes) &&
438             list_empty(&peer_ni->ksnp_conns)) {
439                 /* I've just removed the last route to a peer_ni with no active
440                  * connections */
441                 ksocknal_unlink_peer_locked(peer_ni);
442         }
443 }
444
445 int
446 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
447                   int port)
448 {
449         struct list_head *tmp;
450         ksock_peer_ni_t  *peer_ni;
451         ksock_peer_ni_t  *peer2;
452         ksock_route_t    *route;
453         ksock_route_t    *route2;
454         int               rc;
455
456         if (id.nid == LNET_NID_ANY ||
457             id.pid == LNET_PID_ANY)
458                 return (-EINVAL);
459
460         /* Have a brand new peer_ni ready... */
461         rc = ksocknal_create_peer(&peer_ni, ni, id);
462         if (rc != 0)
463                 return rc;
464
465         route = ksocknal_create_route (ipaddr, port);
466         if (route == NULL) {
467                 ksocknal_peer_decref(peer_ni);
468                 return (-ENOMEM);
469         }
470
471         write_lock_bh(&ksocknal_data.ksnd_global_lock);
472
473         /* always called with a ref on ni, so shutdown can't have started */
474         LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
475
476         peer2 = ksocknal_find_peer_locked(ni, id);
477         if (peer2 != NULL) {
478                 ksocknal_peer_decref(peer_ni);
479                 peer_ni = peer2;
480         } else {
481                 /* peer_ni table takes my ref on peer_ni */
482                 list_add_tail(&peer_ni->ksnp_list,
483                               ksocknal_nid2peerlist(id.nid));
484         }
485
486         route2 = NULL;
487         list_for_each(tmp, &peer_ni->ksnp_routes) {
488                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
489
490                 if (route2->ksnr_ipaddr == ipaddr)
491                         break;
492
493                 route2 = NULL;
494         }
495         if (route2 == NULL) {
496                 ksocknal_add_route_locked(peer_ni, route);
497                 route->ksnr_share_count++;
498         } else {
499                 ksocknal_route_decref(route);
500                 route2->ksnr_share_count++;
501         }
502
503         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
504
505         return 0;
506 }
507
508 static void
509 ksocknal_del_peer_locked (ksock_peer_ni_t *peer_ni, __u32 ip)
510 {
511         ksock_conn_t     *conn;
512         ksock_route_t    *route;
513         struct list_head *tmp;
514         struct list_head *nxt;
515         int               nshared;
516
517         LASSERT(!peer_ni->ksnp_closing);
518
519         /* Extra ref prevents peer_ni disappearing until I'm done with it */
520         ksocknal_peer_addref(peer_ni);
521
522         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
523                 route = list_entry(tmp, ksock_route_t, ksnr_list);
524
525                 /* no match */
526                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
527                         continue;
528
529                 route->ksnr_share_count = 0;
530                 /* This deletes associated conns too */
531                 ksocknal_del_route_locked(route);
532         }
533
534         nshared = 0;
535         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
536                 route = list_entry(tmp, ksock_route_t, ksnr_list);
537                 nshared += route->ksnr_share_count;
538         }
539
540         if (nshared == 0) {
541                 /* remove everything else if there are no explicit entries
542                  * left */
543
544                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
545                         route = list_entry(tmp, ksock_route_t, ksnr_list);
546
547                         /* we should only be removing auto-entries */
548                         LASSERT(route->ksnr_share_count == 0);
549                         ksocknal_del_route_locked(route);
550                 }
551
552                 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
553                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
554
555                         ksocknal_close_conn_locked(conn, 0);
556                 }
557         }
558
559         ksocknal_peer_decref(peer_ni);
560                 /* NB peer_ni unlinks itself when last conn/route is removed */
561 }
562
563 static int
564 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
565 {
566         struct list_head  zombies = LIST_HEAD_INIT(zombies);
567         struct list_head *ptmp;
568         struct list_head *pnxt;
569         ksock_peer_ni_t     *peer_ni;
570         int               lo;
571         int               hi;
572         int               i;
573         int               rc = -ENOENT;
574
575         write_lock_bh(&ksocknal_data.ksnd_global_lock);
576
577         if (id.nid != LNET_NID_ANY) {
578                 hi = (int)(ksocknal_nid2peerlist(id.nid) -
579                            ksocknal_data.ksnd_peers);
580                 lo = hi;
581         } else {
582                 lo = 0;
583                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
584         }
585
586         for (i = lo; i <= hi; i++) {
587                 list_for_each_safe(ptmp, pnxt,
588                                    &ksocknal_data.ksnd_peers[i]) {
589                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
590
591                         if (peer_ni->ksnp_ni != ni)
592                                 continue;
593
594                         if (!((id.nid == LNET_NID_ANY ||
595                                peer_ni->ksnp_id.nid == id.nid) &&
596                               (id.pid == LNET_PID_ANY ||
597                                peer_ni->ksnp_id.pid == id.pid)))
598                                 continue;
599
600                         ksocknal_peer_addref(peer_ni);  /* a ref for me... */
601
602                         ksocknal_del_peer_locked(peer_ni, ip);
603
604                         if (peer_ni->ksnp_closing &&
605                             !list_empty(&peer_ni->ksnp_tx_queue)) {
606                                 LASSERT(list_empty(&peer_ni->ksnp_conns));
607                                 LASSERT(list_empty(&peer_ni->ksnp_routes));
608
609                                 list_splice_init(&peer_ni->ksnp_tx_queue,
610                                                  &zombies);
611                         }
612
613                         ksocknal_peer_decref(peer_ni);  /* ...till here */
614
615                         rc = 0;                         /* matched! */
616                 }
617         }
618
619         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
620
621         ksocknal_txlist_done(ni, &zombies, 1);
622
623         return rc;
624 }
625
626 static ksock_conn_t *
627 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
628 {
629         ksock_peer_ni_t  *peer_ni;
630         struct list_head *ptmp;
631         ksock_conn_t     *conn;
632         struct list_head *ctmp;
633         int               i;
634
635         read_lock(&ksocknal_data.ksnd_global_lock);
636
637         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
638                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
639                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
640
641                         LASSERT(!peer_ni->ksnp_closing);
642
643                         if (peer_ni->ksnp_ni != ni)
644                                 continue;
645
646                         list_for_each(ctmp, &peer_ni->ksnp_conns) {
647                                 if (index-- > 0)
648                                         continue;
649
650                                 conn = list_entry(ctmp, ksock_conn_t,
651                                                   ksnc_list);
652                                 ksocknal_conn_addref(conn);
653                                 read_unlock(&ksocknal_data. \
654                                             ksnd_global_lock);
655                                 return conn;
656                         }
657                 }
658         }
659
660         read_unlock(&ksocknal_data.ksnd_global_lock);
661         return NULL;
662 }
663
664 static ksock_sched_t *
665 ksocknal_choose_scheduler_locked(unsigned int cpt)
666 {
667         struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
668         ksock_sched_t           *sched;
669         int                     i;
670
671         LASSERT(info->ksi_nthreads > 0);
672
673         sched = &info->ksi_scheds[0];
674         /*
675          * NB: it's safe so far, but info->ksi_nthreads could be changed
676          * at runtime when we have dynamic LNet configuration, then we
677          * need to take care of this.
678          */
679         for (i = 1; i < info->ksi_nthreads; i++) {
680                 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
681                         sched = &info->ksi_scheds[i];
682         }
683
684         return sched;
685 }
686
687 static int
688 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
689 {
690         ksock_net_t       *net = ni->ni_data;
691         int                i;
692         int                nip;
693
694         read_lock(&ksocknal_data.ksnd_global_lock);
695
696         nip = net->ksnn_ninterfaces;
697         LASSERT (nip <= LNET_MAX_INTERFACES);
698
699         /* Only offer interfaces for additional connections if I have
700          * more than one. */
701         if (nip < 2) {
702                 read_unlock(&ksocknal_data.ksnd_global_lock);
703                 return 0;
704         }
705
706         for (i = 0; i < nip; i++) {
707                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
708                 LASSERT (ipaddrs[i] != 0);
709         }
710
711         read_unlock(&ksocknal_data.ksnd_global_lock);
712         return (nip);
713 }
714
715 static int
716 ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
717 {
718         int   best_netmatch = 0;
719         int   best_xor      = 0;
720         int   best          = -1;
721         int   this_xor;
722         int   this_netmatch;
723         int   i;
724
725         for (i = 0; i < nips; i++) {
726                 if (ips[i] == 0)
727                         continue;
728
729                 this_xor = (ips[i] ^ iface->ksni_ipaddr);
730                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
731
732                 if (!(best < 0 ||
733                       best_netmatch < this_netmatch ||
734                       (best_netmatch == this_netmatch &&
735                        best_xor > this_xor)))
736                         continue;
737
738                 best = i;
739                 best_netmatch = this_netmatch;
740                 best_xor = this_xor;
741         }
742
743         LASSERT (best >= 0);
744         return (best);
745 }
746
747 static int
748 ksocknal_select_ips(ksock_peer_ni_t *peer_ni, __u32 *peerips, int n_peerips)
749 {
750         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
751         ksock_net_t        *net = peer_ni->ksnp_ni->ni_data;
752         ksock_interface_t  *iface;
753         ksock_interface_t  *best_iface;
754         int                 n_ips;
755         int                 i;
756         int                 j;
757         int                 k;
758         __u32               ip;
759         __u32               xor;
760         int                 this_netmatch;
761         int                 best_netmatch;
762         int                 best_npeers;
763
764         /* CAVEAT EMPTOR: We do all our interface matching with an
765          * exclusive hold of global lock at IRQ priority.  We're only
766          * expecting to be dealing with small numbers of interfaces, so the
767          * O(n**3)-ness shouldn't matter */
768
769         /* Also note that I'm not going to return more than n_peerips
770          * interfaces, even if I have more myself */
771
772         write_lock_bh(global_lock);
773
774         LASSERT (n_peerips <= LNET_MAX_INTERFACES);
775         LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
776
777         /* Only match interfaces for additional connections
778          * if I have > 1 interface */
779         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
780                 MIN(n_peerips, net->ksnn_ninterfaces);
781
782         for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
783                 /*              ^ yes really... */
784
785                 /* If we have any new interfaces, first tick off all the
786                  * peer_ni IPs that match old interfaces, then choose new
787                  * interfaces to match the remaining peer_ni IPS.
788                  * We don't forget interfaces we've stopped using; we might
789                  * start using them again... */
790
791                 if (i < peer_ni->ksnp_n_passive_ips) {
792                         /* Old interface. */
793                         ip = peer_ni->ksnp_passive_ips[i];
794                         best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
795
796                         /* peer_ni passive ips are kept up to date */
797                         LASSERT(best_iface != NULL);
798                 } else {
799                         /* choose a new interface */
800                         LASSERT (i == peer_ni->ksnp_n_passive_ips);
801
802                         best_iface = NULL;
803                         best_netmatch = 0;
804                         best_npeers = 0;
805
806                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
807                                 iface = &net->ksnn_interfaces[j];
808                                 ip = iface->ksni_ipaddr;
809
810                                 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
811                                         if (peer_ni->ksnp_passive_ips[k] == ip)
812                                                 break;
813
814                                 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
815                                         continue;
816
817                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
818                                 xor = (ip ^ peerips[k]);
819                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
820
821                                 if (!(best_iface == NULL ||
822                                       best_netmatch < this_netmatch ||
823                                       (best_netmatch == this_netmatch &&
824                                        best_npeers > iface->ksni_npeers)))
825                                         continue;
826
827                                 best_iface = iface;
828                                 best_netmatch = this_netmatch;
829                                 best_npeers = iface->ksni_npeers;
830                         }
831
832                         LASSERT(best_iface != NULL);
833
834                         best_iface->ksni_npeers++;
835                         ip = best_iface->ksni_ipaddr;
836                         peer_ni->ksnp_passive_ips[i] = ip;
837                         peer_ni->ksnp_n_passive_ips = i+1;
838                 }
839
840                 /* mark the best matching peer_ni IP used */
841                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
842                 peerips[j] = 0;
843         }
844
845         /* Overwrite input peer_ni IP addresses */
846         memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
847
848         write_unlock_bh(global_lock);
849
850         return (n_ips);
851 }
852
853 static void
854 ksocknal_create_routes(ksock_peer_ni_t *peer_ni, int port,
855                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
856 {
857         ksock_route_t           *newroute = NULL;
858         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
859         struct lnet_ni *ni = peer_ni->ksnp_ni;
860         ksock_net_t             *net = ni->ni_data;
861         struct list_head        *rtmp;
862         ksock_route_t           *route;
863         ksock_interface_t       *iface;
864         ksock_interface_t       *best_iface;
865         int                     best_netmatch;
866         int                     this_netmatch;
867         int                     best_nroutes;
868         int                     i;
869         int                     j;
870
871         /* CAVEAT EMPTOR: We do all our interface matching with an
872          * exclusive hold of global lock at IRQ priority.  We're only
873          * expecting to be dealing with small numbers of interfaces, so the
874          * O(n**3)-ness here shouldn't matter */
875
876         write_lock_bh(global_lock);
877
878         if (net->ksnn_ninterfaces < 2) {
879                 /* Only create additional connections
880                  * if I have > 1 interface */
881                 write_unlock_bh(global_lock);
882                 return;
883         }
884
885         LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
886
887         for (i = 0; i < npeer_ipaddrs; i++) {
888                 if (newroute != NULL) {
889                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
890                 } else {
891                         write_unlock_bh(global_lock);
892
893                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
894                         if (newroute == NULL)
895                                 return;
896
897                         write_lock_bh(global_lock);
898                 }
899
900                 if (peer_ni->ksnp_closing) {
901                         /* peer_ni got closed under me */
902                         break;
903                 }
904
905                 /* Already got a route? */
906                 route = NULL;
907                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
908                         route = list_entry(rtmp, ksock_route_t, ksnr_list);
909
910                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
911                                 break;
912
913                         route = NULL;
914                 }
915                 if (route != NULL)
916                         continue;
917
918                 best_iface = NULL;
919                 best_nroutes = 0;
920                 best_netmatch = 0;
921
922                 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
923
924                 /* Select interface to connect from */
925                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
926                         iface = &net->ksnn_interfaces[j];
927
928                         /* Using this interface already? */
929                         list_for_each(rtmp, &peer_ni->ksnp_routes) {
930                                 route = list_entry(rtmp, ksock_route_t,
931                                                    ksnr_list);
932
933                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
934                                         break;
935
936                                 route = NULL;
937                         }
938                         if (route != NULL)
939                                 continue;
940
941                         this_netmatch = (((iface->ksni_ipaddr ^
942                                            newroute->ksnr_ipaddr) &
943                                            iface->ksni_netmask) == 0) ? 1 : 0;
944
945                         if (!(best_iface == NULL ||
946                               best_netmatch < this_netmatch ||
947                               (best_netmatch == this_netmatch &&
948                                best_nroutes > iface->ksni_nroutes)))
949                                 continue;
950
951                         best_iface = iface;
952                         best_netmatch = this_netmatch;
953                         best_nroutes = iface->ksni_nroutes;
954                 }
955
956                 if (best_iface == NULL)
957                         continue;
958
959                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
960                 best_iface->ksni_nroutes++;
961
962                 ksocknal_add_route_locked(peer_ni, newroute);
963                 newroute = NULL;
964         }
965
966         write_unlock_bh(global_lock);
967         if (newroute != NULL)
968                 ksocknal_route_decref(newroute);
969 }
970
971 int
972 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
973 {
974         ksock_connreq_t *cr;
975         int              rc;
976         __u32            peer_ip;
977         int              peer_port;
978
979         rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
980         LASSERT(rc == 0);               /* we succeeded before */
981
982         LIBCFS_ALLOC(cr, sizeof(*cr));
983         if (cr == NULL) {
984                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
985                                    "%pI4h: memory exhausted\n", &peer_ip);
986                 return -ENOMEM;
987         }
988
989         lnet_ni_addref(ni);
990         cr->ksncr_ni   = ni;
991         cr->ksncr_sock = sock;
992
993         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
994
995         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
996         wake_up(&ksocknal_data.ksnd_connd_waitq);
997
998         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
999         return 0;
1000 }
1001
1002 static int
1003 ksocknal_connecting (ksock_peer_ni_t *peer_ni, __u32 ipaddr)
1004 {
1005         ksock_route_t *route;
1006
1007         list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1008                 if (route->ksnr_ipaddr == ipaddr)
1009                         return route->ksnr_connecting;
1010         }
1011         return 0;
1012 }
1013
1014 int
1015 ksocknal_create_conn(struct lnet_ni *ni, ksock_route_t *route,
1016                      struct socket *sock, int type)
1017 {
1018         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
1019         struct list_head        zombies = LIST_HEAD_INIT(zombies);
1020         struct lnet_process_id peerid;
1021         struct list_head        *tmp;
1022         __u64              incarnation;
1023         ksock_conn_t      *conn;
1024         ksock_conn_t      *conn2;
1025         ksock_peer_ni_t      *peer_ni = NULL;
1026         ksock_peer_ni_t      *peer2;
1027         ksock_sched_t     *sched;
1028         struct ksock_hello_msg *hello;
1029         int                cpt;
1030         ksock_tx_t        *tx;
1031         ksock_tx_t        *txtmp;
1032         int                rc;
1033         int                active;
1034         char              *warn = NULL;
1035
1036         active = (route != NULL);
1037
1038         LASSERT (active == (type != SOCKLND_CONN_NONE));
1039
1040         LIBCFS_ALLOC(conn, sizeof(*conn));
1041         if (conn == NULL) {
1042                 rc = -ENOMEM;
1043                 goto failed_0;
1044         }
1045
1046         conn->ksnc_peer = NULL;
1047         conn->ksnc_route = NULL;
1048         conn->ksnc_sock = sock;
1049         /* 2 ref, 1 for conn, another extra ref prevents socket
1050          * being closed before establishment of connection */
1051         atomic_set (&conn->ksnc_sock_refcount, 2);
1052         conn->ksnc_type = type;
1053         ksocknal_lib_save_callback(sock, conn);
1054         atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1055
1056         conn->ksnc_rx_ready = 0;
1057         conn->ksnc_rx_scheduled = 0;
1058
1059         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1060         conn->ksnc_tx_ready = 0;
1061         conn->ksnc_tx_scheduled = 0;
1062         conn->ksnc_tx_carrier = NULL;
1063         atomic_set (&conn->ksnc_tx_nob, 0);
1064
1065         LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1066                                      kshm_ips[LNET_MAX_INTERFACES]));
1067         if (hello == NULL) {
1068                 rc = -ENOMEM;
1069                 goto failed_1;
1070         }
1071
1072         /* stash conn's local and remote addrs */
1073         rc = ksocknal_lib_get_conn_addrs (conn);
1074         if (rc != 0)
1075                 goto failed_1;
1076
1077         /* Find out/confirm peer_ni's NID and connection type and get the
1078          * vector of interfaces she's willing to let me connect to.
1079          * Passive connections use the listener timeout since the peer_ni sends
1080          * eagerly */
1081
1082         if (active) {
1083                 peer_ni = route->ksnr_peer;
1084                 LASSERT(ni == peer_ni->ksnp_ni);
1085
1086                 /* Active connection sends HELLO eagerly */
1087                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1088                 peerid = peer_ni->ksnp_id;
1089
1090                 write_lock_bh(global_lock);
1091                 conn->ksnc_proto = peer_ni->ksnp_proto;
1092                 write_unlock_bh(global_lock);
1093
1094                 if (conn->ksnc_proto == NULL) {
1095                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1096 #if SOCKNAL_VERSION_DEBUG
1097                          if (*ksocknal_tunables.ksnd_protocol == 2)
1098                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1099                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1100                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1101 #endif
1102                 }
1103
1104                 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1105                 if (rc != 0)
1106                         goto failed_1;
1107         } else {
1108                 peerid.nid = LNET_NID_ANY;
1109                 peerid.pid = LNET_PID_ANY;
1110
1111                 /* Passive, get protocol from peer_ni */
1112                 conn->ksnc_proto = NULL;
1113         }
1114
1115         rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1116         if (rc < 0)
1117                 goto failed_1;
1118
1119         LASSERT (rc == 0 || active);
1120         LASSERT (conn->ksnc_proto != NULL);
1121         LASSERT (peerid.nid != LNET_NID_ANY);
1122
1123         cpt = lnet_cpt_of_nid(peerid.nid, ni);
1124
1125         if (active) {
1126                 ksocknal_peer_addref(peer_ni);
1127                 write_lock_bh(global_lock);
1128         } else {
1129                 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1130                 if (rc != 0)
1131                         goto failed_1;
1132
1133                 write_lock_bh(global_lock);
1134
1135                 /* called with a ref on ni, so shutdown can't have started */
1136                 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1137
1138                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1139                 if (peer2 == NULL) {
1140                         /* NB this puts an "empty" peer_ni in the peer_ni
1141                          * table (which takes my ref) */
1142                         list_add_tail(&peer_ni->ksnp_list,
1143                                       ksocknal_nid2peerlist(peerid.nid));
1144                 } else {
1145                         ksocknal_peer_decref(peer_ni);
1146                         peer_ni = peer2;
1147                 }
1148
1149                 /* +1 ref for me */
1150                 ksocknal_peer_addref(peer_ni);
1151                 peer_ni->ksnp_accepting++;
1152
1153                 /* Am I already connecting to this guy?  Resolve in
1154                  * favour of higher NID... */
1155                 if (peerid.nid < ni->ni_nid &&
1156                     ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1157                         rc = EALREADY;
1158                         warn = "connection race resolution";
1159                         goto failed_2;
1160                 }
1161         }
1162
1163         if (peer_ni->ksnp_closing ||
1164             (active && route->ksnr_deleted)) {
1165                 /* peer_ni/route got closed under me */
1166                 rc = -ESTALE;
1167                 warn = "peer_ni/route removed";
1168                 goto failed_2;
1169         }
1170
1171         if (peer_ni->ksnp_proto == NULL) {
1172                 /* Never connected before.
1173                  * NB recv_hello may have returned EPROTO to signal my peer_ni
1174                  * wants a different protocol than the one I asked for.
1175                  */
1176                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1177
1178                 peer_ni->ksnp_proto = conn->ksnc_proto;
1179                 peer_ni->ksnp_incarnation = incarnation;
1180         }
1181
1182         if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1183             peer_ni->ksnp_incarnation != incarnation) {
1184                 /* peer_ni rebooted or I've got the wrong protocol version */
1185                 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1186
1187                 peer_ni->ksnp_proto = NULL;
1188                 rc = ESTALE;
1189                 warn = peer_ni->ksnp_incarnation != incarnation ?
1190                        "peer_ni rebooted" :
1191                        "wrong proto version";
1192                 goto failed_2;
1193         }
1194
1195         switch (rc) {
1196         default:
1197                 LBUG();
1198         case 0:
1199                 break;
1200         case EALREADY:
1201                 warn = "lost conn race";
1202                 goto failed_2;
1203         case EPROTO:
1204                 warn = "retry with different protocol version";
1205                 goto failed_2;
1206         }
1207
1208         /* Refuse to duplicate an existing connection, unless this is a
1209          * loopback connection */
1210         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1211                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1212                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1213
1214                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1215                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1216                             conn2->ksnc_type != conn->ksnc_type)
1217                                 continue;
1218
1219                         /* Reply on a passive connection attempt so the peer_ni
1220                          * realises we're connected. */
1221                         LASSERT (rc == 0);
1222                         if (!active)
1223                                 rc = EALREADY;
1224
1225                         warn = "duplicate";
1226                         goto failed_2;
1227                 }
1228         }
1229
1230         /* If the connection created by this route didn't bind to the IP
1231          * address the route connected to, the connection/route matching
1232          * code below probably isn't going to work. */
1233         if (active &&
1234             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1235                 CERROR("Route %s %pI4h connected to %pI4h\n",
1236                        libcfs_id2str(peer_ni->ksnp_id),
1237                        &route->ksnr_ipaddr,
1238                        &conn->ksnc_ipaddr);
1239         }
1240
1241         /* Search for a route corresponding to the new connection and
1242          * create an association.  This allows incoming connections created
1243          * by routes in my peer_ni to match my own route entries so I don't
1244          * continually create duplicate routes. */
1245         list_for_each(tmp, &peer_ni->ksnp_routes) {
1246                 route = list_entry(tmp, ksock_route_t, ksnr_list);
1247
1248                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1249                         continue;
1250
1251                 ksocknal_associate_route_conn_locked(route, conn);
1252                 break;
1253         }
1254
1255         conn->ksnc_peer = peer_ni;                 /* conn takes my ref on peer_ni */
1256         peer_ni->ksnp_last_alive = ktime_get_real_seconds();
1257         peer_ni->ksnp_send_keepalive = 0;
1258         peer_ni->ksnp_error = 0;
1259
1260         sched = ksocknal_choose_scheduler_locked(cpt);
1261         sched->kss_nconns++;
1262         conn->ksnc_scheduler = sched;
1263
1264         conn->ksnc_tx_last_post = ktime_get_real_seconds();
1265         /* Set the deadline for the outgoing HELLO to drain */
1266         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1267         conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1268         smp_mb();   /* order with adding to peer_ni's conn list */
1269
1270         list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1271         ksocknal_conn_addref(conn);
1272
1273         ksocknal_new_packet(conn, 0);
1274
1275         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1276
1277         /* Take packets blocking for this connection. */
1278         list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1279                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1280                     SOCKNAL_MATCH_NO)
1281                         continue;
1282
1283                 list_del(&tx->tx_list);
1284                 ksocknal_queue_tx_locked(tx, conn);
1285         }
1286
1287         write_unlock_bh(global_lock);
1288
1289         /* We've now got a new connection.  Any errors from here on are just
1290          * like "normal" comms errors and we close the connection normally.
1291          * NB (a) we still have to send the reply HELLO for passive
1292          *        connections,
1293          *    (b) normal I/O on the conn is blocked until I setup and call the
1294          *        socket callbacks.
1295          */
1296
1297         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1298                " incarnation:%lld sched[%d:%d]\n",
1299                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1300                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1301                conn->ksnc_port, incarnation, cpt,
1302                (int)(sched - &sched->kss_info->ksi_scheds[0]));
1303
1304         if (active) {
1305                 /* additional routes after interface exchange? */
1306                 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1307                                        hello->kshm_ips, hello->kshm_nips);
1308         } else {
1309                 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1310                                                        hello->kshm_nips);
1311                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1312         }
1313
1314         LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1315                                     kshm_ips[LNET_MAX_INTERFACES]));
1316
1317         /* setup the socket AFTER I've received hello (it disables
1318          * SO_LINGER).  I might call back to the acceptor who may want
1319          * to send a protocol version response and then close the
1320          * socket; this ensures the socket only tears down after the
1321          * response has been sent. */
1322         if (rc == 0)
1323                 rc = ksocknal_lib_setup_sock(sock);
1324
1325         write_lock_bh(global_lock);
1326
1327         /* NB my callbacks block while I hold ksnd_global_lock */
1328         ksocknal_lib_set_callback(sock, conn);
1329
1330         if (!active)
1331                 peer_ni->ksnp_accepting--;
1332
1333         write_unlock_bh(global_lock);
1334
1335         if (rc != 0) {
1336                 write_lock_bh(global_lock);
1337                 if (!conn->ksnc_closing) {
1338                         /* could be closed by another thread */
1339                         ksocknal_close_conn_locked(conn, rc);
1340                 }
1341                 write_unlock_bh(global_lock);
1342         } else if (ksocknal_connsock_addref(conn) == 0) {
1343                 /* Allow I/O to proceed. */
1344                 ksocknal_read_callback(conn);
1345                 ksocknal_write_callback(conn);
1346                 ksocknal_connsock_decref(conn);
1347         }
1348
1349         ksocknal_connsock_decref(conn);
1350         ksocknal_conn_decref(conn);
1351         return rc;
1352
1353 failed_2:
1354         if (!peer_ni->ksnp_closing &&
1355             list_empty(&peer_ni->ksnp_conns) &&
1356             list_empty(&peer_ni->ksnp_routes)) {
1357                 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1358                 list_del_init(&peer_ni->ksnp_tx_queue);
1359                 ksocknal_unlink_peer_locked(peer_ni);
1360         }
1361
1362         write_unlock_bh(global_lock);
1363
1364         if (warn != NULL) {
1365                 if (rc < 0)
1366                         CERROR("Not creating conn %s type %d: %s\n",
1367                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1368                 else
1369                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1370                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1371         }
1372
1373         if (!active) {
1374                 if (rc > 0) {
1375                         /* Request retry by replying with CONN_NONE
1376                          * ksnc_proto has been set already */
1377                         conn->ksnc_type = SOCKLND_CONN_NONE;
1378                         hello->kshm_nips = 0;
1379                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1380                 }
1381
1382                 write_lock_bh(global_lock);
1383                 peer_ni->ksnp_accepting--;
1384                 write_unlock_bh(global_lock);
1385         }
1386
1387         ksocknal_txlist_done(ni, &zombies, 1);
1388         ksocknal_peer_decref(peer_ni);
1389
1390 failed_1:
1391         if (hello != NULL)
1392                 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1393                                             kshm_ips[LNET_MAX_INTERFACES]));
1394
1395         LIBCFS_FREE(conn, sizeof(*conn));
1396
1397 failed_0:
1398         sock_release(sock);
1399         return rc;
1400 }
1401
1402 void
1403 ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1404 {
1405         /* This just does the immmediate housekeeping, and queues the
1406          * connection for the reaper to terminate.
1407          * Caller holds ksnd_global_lock exclusively in irq context */
1408         ksock_peer_ni_t      *peer_ni = conn->ksnc_peer;
1409         ksock_route_t     *route;
1410         ksock_conn_t      *conn2;
1411         struct list_head  *tmp;
1412
1413         LASSERT(peer_ni->ksnp_error == 0);
1414         LASSERT(!conn->ksnc_closing);
1415         conn->ksnc_closing = 1;
1416
1417         /* ksnd_deathrow_conns takes over peer_ni's ref */
1418         list_del(&conn->ksnc_list);
1419
1420         route = conn->ksnc_route;
1421         if (route != NULL) {
1422                 /* dissociate conn from route... */
1423                 LASSERT(!route->ksnr_deleted);
1424                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1425
1426                 conn2 = NULL;
1427                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1428                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1429
1430                         if (conn2->ksnc_route == route &&
1431                             conn2->ksnc_type == conn->ksnc_type)
1432                                 break;
1433
1434                         conn2 = NULL;
1435                 }
1436                 if (conn2 == NULL)
1437                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1438
1439                 conn->ksnc_route = NULL;
1440
1441                 ksocknal_route_decref(route);   /* drop conn's ref on route */
1442         }
1443
1444         if (list_empty(&peer_ni->ksnp_conns)) {
1445                 /* No more connections to this peer_ni */
1446
1447                 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1448                                 ksock_tx_t *tx;
1449
1450                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1451
1452                         /* throw them to the last connection...,
1453                          * these TXs will be send to /dev/null by scheduler */
1454                         list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1455                                             tx_list)
1456                                 ksocknal_tx_prep(conn, tx);
1457
1458                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1459                         list_splice_init(&peer_ni->ksnp_tx_queue,
1460                                          &conn->ksnc_tx_queue);
1461                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1462                 }
1463
1464                 /* renegotiate protocol version */
1465                 peer_ni->ksnp_proto = NULL;
1466                 /* stash last conn close reason */
1467                 peer_ni->ksnp_error = error;
1468
1469                 if (list_empty(&peer_ni->ksnp_routes)) {
1470                         /* I've just closed last conn belonging to a
1471                          * peer_ni with no routes to it */
1472                         ksocknal_unlink_peer_locked(peer_ni);
1473                 }
1474         }
1475
1476         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1477
1478         list_add_tail(&conn->ksnc_list,
1479                       &ksocknal_data.ksnd_deathrow_conns);
1480         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1481
1482         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1483 }
1484
1485 void
1486 ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
1487 {
1488         int        notify = 0;
1489         cfs_time_t last_alive = 0;
1490
1491         /* There has been a connection failure or comms error; but I'll only
1492          * tell LNET I think the peer_ni is dead if it's to another kernel and
1493          * there are no connections or connection attempts in existence. */
1494
1495         read_lock(&ksocknal_data.ksnd_global_lock);
1496
1497         if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1498              list_empty(&peer_ni->ksnp_conns) &&
1499              peer_ni->ksnp_accepting == 0 &&
1500              ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1501                 notify = 1;
1502                 last_alive = peer_ni->ksnp_last_alive;
1503         }
1504
1505         read_unlock(&ksocknal_data.ksnd_global_lock);
1506
1507         if (notify)
1508                 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
1509                             last_alive);
1510 }
1511
1512 void
1513 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1514 {
1515         ksock_peer_ni_t  *peer_ni = conn->ksnc_peer;
1516         ksock_tx_t       *tx;
1517         ksock_tx_t       *tmp;
1518         struct list_head  zlist = LIST_HEAD_INIT(zlist);
1519
1520         /* NB safe to finalize TXs because closing of socket will
1521          * abort all buffered data */
1522         LASSERT(conn->ksnc_sock == NULL);
1523
1524         spin_lock(&peer_ni->ksnp_lock);
1525
1526         list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1527                 if (tx->tx_conn != conn)
1528                         continue;
1529
1530                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1531
1532                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1533                 tx->tx_zc_aborted = 1;  /* mark it as not-acked */
1534                 list_del(&tx->tx_zc_list);
1535                 list_add(&tx->tx_zc_list, &zlist);
1536         }
1537
1538         spin_unlock(&peer_ni->ksnp_lock);
1539
1540         while (!list_empty(&zlist)) {
1541                 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1542
1543                 list_del(&tx->tx_zc_list);
1544                 ksocknal_tx_decref(tx);
1545         }
1546 }
1547
1548 void
1549 ksocknal_terminate_conn(ksock_conn_t *conn)
1550 {
1551         /* This gets called by the reaper (guaranteed thread context) to
1552          * disengage the socket from its callbacks and close it.
1553          * ksnc_refcount will eventually hit zero, and then the reaper will
1554          * destroy it. */
1555         ksock_peer_ni_t     *peer_ni = conn->ksnc_peer;
1556         ksock_sched_t    *sched = conn->ksnc_scheduler;
1557         int               failed = 0;
1558
1559         LASSERT(conn->ksnc_closing);
1560
1561         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1562         spin_lock_bh(&sched->kss_lock);
1563
1564         /* a closing conn is always ready to tx */
1565         conn->ksnc_tx_ready = 1;
1566
1567         if (!conn->ksnc_tx_scheduled &&
1568             !list_empty(&conn->ksnc_tx_queue)) {
1569                 list_add_tail(&conn->ksnc_tx_list,
1570                                &sched->kss_tx_conns);
1571                 conn->ksnc_tx_scheduled = 1;
1572                 /* extra ref for scheduler */
1573                 ksocknal_conn_addref(conn);
1574
1575                 wake_up (&sched->kss_waitq);
1576         }
1577
1578         spin_unlock_bh(&sched->kss_lock);
1579
1580         /* serialise with callbacks */
1581         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1582
1583         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1584
1585         /* OK, so this conn may not be completely disengaged from its
1586          * scheduler yet, but it _has_ committed to terminate... */
1587         conn->ksnc_scheduler->kss_nconns--;
1588
1589         if (peer_ni->ksnp_error != 0) {
1590                 /* peer_ni's last conn closed in error */
1591                 LASSERT(list_empty(&peer_ni->ksnp_conns));
1592                 failed = 1;
1593                 peer_ni->ksnp_error = 0;     /* avoid multiple notifications */
1594         }
1595
1596         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1597
1598         if (failed)
1599                 ksocknal_peer_failed(peer_ni);
1600
1601         /* The socket is closed on the final put; either here, or in
1602          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1603          * when the connection was established, this will close the socket
1604          * immediately, aborting anything buffered in it. Any hung
1605          * zero-copy transmits will therefore complete in finite time. */
1606         ksocknal_connsock_decref(conn);
1607 }
1608
1609 void
1610 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1611 {
1612         /* Queue the conn for the reaper to destroy */
1613
1614         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1615         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1616
1617         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1618         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1619
1620         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1621 }
1622
1623 void
1624 ksocknal_destroy_conn (ksock_conn_t *conn)
1625 {
1626         cfs_time_t      last_rcv;
1627
1628         /* Final coup-de-grace of the reaper */
1629         CDEBUG (D_NET, "connection %p\n", conn);
1630
1631         LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1632         LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1633         LASSERT (conn->ksnc_sock == NULL);
1634         LASSERT (conn->ksnc_route == NULL);
1635         LASSERT (!conn->ksnc_tx_scheduled);
1636         LASSERT (!conn->ksnc_rx_scheduled);
1637         LASSERT(list_empty(&conn->ksnc_tx_queue));
1638
1639         /* complete current receive if any */
1640         switch (conn->ksnc_rx_state) {
1641         case SOCKNAL_RX_LNET_PAYLOAD:
1642                 last_rcv = conn->ksnc_rx_deadline -
1643                            cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1644                 CERROR("Completing partial receive from %s[%d], "
1645                        "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1646                        "last alive is %ld secs ago\n",
1647                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1648                        &conn->ksnc_ipaddr, conn->ksnc_port,
1649                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1650                        cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(),
1651                                         last_rcv)));
1652                 lnet_finalize(conn->ksnc_cookie, -EIO);
1653                 break;
1654         case SOCKNAL_RX_LNET_HEADER:
1655                 if (conn->ksnc_rx_started)
1656                         CERROR("Incomplete receive of lnet header from %s, "
1657                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1658                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1659                                &conn->ksnc_ipaddr, conn->ksnc_port,
1660                                conn->ksnc_proto->pro_version);
1661                 break;
1662         case SOCKNAL_RX_KSM_HEADER:
1663                 if (conn->ksnc_rx_started)
1664                         CERROR("Incomplete receive of ksock message from %s, "
1665                                "ip %pI4h:%d, with error, protocol: %d.x.\n",
1666                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1667                                &conn->ksnc_ipaddr, conn->ksnc_port,
1668                                conn->ksnc_proto->pro_version);
1669                 break;
1670         case SOCKNAL_RX_SLOP:
1671                 if (conn->ksnc_rx_started)
1672                         CERROR("Incomplete receive of slops from %s, "
1673                                "ip %pI4h:%d, with error\n",
1674                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1675                                &conn->ksnc_ipaddr, conn->ksnc_port);
1676                break;
1677         default:
1678                 LBUG ();
1679                 break;
1680         }
1681
1682         ksocknal_peer_decref(conn->ksnc_peer);
1683
1684         LIBCFS_FREE (conn, sizeof (*conn));
1685 }
1686
1687 int
1688 ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why)
1689 {
1690         ksock_conn_t       *conn;
1691         struct list_head         *ctmp;
1692         struct list_head         *cnxt;
1693         int                 count = 0;
1694
1695         list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1696                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1697
1698                 if (ipaddr == 0 ||
1699                     conn->ksnc_ipaddr == ipaddr) {
1700                         count++;
1701                         ksocknal_close_conn_locked (conn, why);
1702                 }
1703         }
1704
1705         return (count);
1706 }
1707
1708 int
1709 ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1710 {
1711         ksock_peer_ni_t     *peer_ni = conn->ksnc_peer;
1712         __u32             ipaddr = conn->ksnc_ipaddr;
1713         int               count;
1714
1715         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1716
1717         count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1718
1719         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1720
1721         return (count);
1722 }
1723
1724 int
1725 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1726 {
1727         ksock_peer_ni_t       *peer_ni;
1728         struct list_head         *ptmp;
1729         struct list_head         *pnxt;
1730         int                 lo;
1731         int                 hi;
1732         int                 i;
1733         int                 count = 0;
1734
1735         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1736
1737         if (id.nid != LNET_NID_ANY)
1738                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1739         else {
1740                 lo = 0;
1741                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1742         }
1743
1744         for (i = lo; i <= hi; i++) {
1745                 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1746
1747                         peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
1748
1749                         if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1750                               (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1751                                 continue;
1752
1753                         count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1754                 }
1755         }
1756
1757         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1758
1759         /* wildcards always succeed */
1760         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1761                 return (0);
1762
1763         return (count == 0 ? -ENOENT : 0);
1764 }
1765
1766 void
1767 ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
1768 {
1769         /* The router is telling me she's been notified of a change in
1770          * gateway state....
1771          */
1772         struct lnet_process_id id = {
1773                 .nid    = gw_nid,
1774                 .pid    = LNET_PID_ANY,
1775         };
1776
1777         CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1778                 alive ? "up" : "down");
1779
1780         if (!alive) {
1781                 /* If the gateway crashed, close all open connections... */
1782                 ksocknal_close_matching_conns (id, 0);
1783                 return;
1784         }
1785
1786         /* ...otherwise do nothing.  We can only establish new connections
1787          * if we have autroutes, and these connect on demand. */
1788 }
1789
1790 void
1791 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when)
1792 {
1793         int connect = 1;
1794         time64_t last_alive = 0;
1795         time64_t now = ktime_get_real_seconds();
1796         ksock_peer_ni_t *peer_ni = NULL;
1797         rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1798         struct lnet_process_id id = {
1799                 .nid = nid,
1800                 .pid = LNET_PID_LUSTRE,
1801         };
1802
1803         read_lock(glock);
1804
1805         peer_ni = ksocknal_find_peer_locked(ni, id);
1806         if (peer_ni != NULL) {
1807                 struct list_head       *tmp;
1808                 ksock_conn_t     *conn;
1809                 int               bufnob;
1810
1811                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1812                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1813                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1814
1815                         if (bufnob < conn->ksnc_tx_bufnob) {
1816                                 /* something got ACKed */
1817                                 conn->ksnc_tx_deadline =
1818                                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1819                                 peer_ni->ksnp_last_alive = now;
1820                                 conn->ksnc_tx_bufnob = bufnob;
1821                         }
1822                 }
1823
1824                 last_alive = peer_ni->ksnp_last_alive;
1825                 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1826                         connect = 0;
1827         }
1828
1829         read_unlock(glock);
1830
1831         if (last_alive != 0)
1832                 *when = last_alive;
1833
1834         CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago, connect %d\n",
1835                libcfs_nid2str(nid), peer_ni,
1836                last_alive ? cfs_duration_sec(now - last_alive) : -1,
1837                connect);
1838
1839         if (!connect)
1840                 return;
1841
1842         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1843
1844         write_lock_bh(glock);
1845
1846         peer_ni = ksocknal_find_peer_locked(ni, id);
1847         if (peer_ni != NULL)
1848                 ksocknal_launch_all_connections_locked(peer_ni);
1849
1850         write_unlock_bh(glock);
1851         return;
1852 }
1853
1854 static void
1855 ksocknal_push_peer (ksock_peer_ni_t *peer_ni)
1856 {
1857         int               index;
1858         int               i;
1859         struct list_head       *tmp;
1860         ksock_conn_t     *conn;
1861
1862         for (index = 0; ; index++) {
1863                 read_lock(&ksocknal_data.ksnd_global_lock);
1864
1865                 i = 0;
1866                 conn = NULL;
1867
1868                 list_for_each(tmp, &peer_ni->ksnp_conns) {
1869                         if (i++ == index) {
1870                                 conn = list_entry(tmp, ksock_conn_t,
1871                                                        ksnc_list);
1872                                 ksocknal_conn_addref(conn);
1873                                 break;
1874                         }
1875                 }
1876
1877                 read_unlock(&ksocknal_data.ksnd_global_lock);
1878
1879                 if (conn == NULL)
1880                         break;
1881
1882                 ksocknal_lib_push_conn (conn);
1883                 ksocknal_conn_decref(conn);
1884         }
1885 }
1886
1887 static int
1888 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1889 {
1890         struct list_head *start;
1891         struct list_head *end;
1892         struct list_head *tmp;
1893         int               rc = -ENOENT;
1894         unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
1895
1896         if (id.nid == LNET_NID_ANY) {
1897                 start = &ksocknal_data.ksnd_peers[0];
1898                 end = &ksocknal_data.ksnd_peers[hsize - 1];
1899         } else {
1900                 start = end = ksocknal_nid2peerlist(id.nid);
1901         }
1902
1903         for (tmp = start; tmp <= end; tmp++) {
1904                 int     peer_off; /* searching offset in peer_ni hash table */
1905
1906                 for (peer_off = 0; ; peer_off++) {
1907                         ksock_peer_ni_t *peer_ni;
1908                         int           i = 0;
1909
1910                         read_lock(&ksocknal_data.ksnd_global_lock);
1911                         list_for_each_entry(peer_ni, tmp, ksnp_list) {
1912                                 if (!((id.nid == LNET_NID_ANY ||
1913                                        id.nid == peer_ni->ksnp_id.nid) &&
1914                                       (id.pid == LNET_PID_ANY ||
1915                                        id.pid == peer_ni->ksnp_id.pid)))
1916                                         continue;
1917
1918                                 if (i++ == peer_off) {
1919                                         ksocknal_peer_addref(peer_ni);
1920                                         break;
1921                                 }
1922                         }
1923                         read_unlock(&ksocknal_data.ksnd_global_lock);
1924
1925                         if (i == 0) /* no match */
1926                                 break;
1927
1928                         rc = 0;
1929                         ksocknal_push_peer(peer_ni);
1930                         ksocknal_peer_decref(peer_ni);
1931                 }
1932         }
1933         return rc;
1934 }
1935
1936 static int
1937 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1938 {
1939         ksock_net_t       *net = ni->ni_data;
1940         ksock_interface_t *iface;
1941         int                rc;
1942         int                i;
1943         int                j;
1944         struct list_head        *ptmp;
1945         ksock_peer_ni_t      *peer_ni;
1946         struct list_head        *rtmp;
1947         ksock_route_t     *route;
1948
1949         if (ipaddress == 0 ||
1950             netmask == 0)
1951                 return (-EINVAL);
1952
1953         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1954
1955         iface = ksocknal_ip2iface(ni, ipaddress);
1956         if (iface != NULL) {
1957                 /* silently ignore dups */
1958                 rc = 0;
1959         } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1960                 rc = -ENOSPC;
1961         } else {
1962                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1963
1964                 iface->ksni_ipaddr = ipaddress;
1965                 iface->ksni_netmask = netmask;
1966                 iface->ksni_nroutes = 0;
1967                 iface->ksni_npeers = 0;
1968
1969                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1970                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1971                                 peer_ni = list_entry(ptmp, ksock_peer_ni_t,
1972                                                       ksnp_list);
1973
1974                                 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1975                                         if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1976                                                 iface->ksni_npeers++;
1977
1978                                 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1979                                         route = list_entry(rtmp,
1980                                                                ksock_route_t,
1981                                                                ksnr_list);
1982
1983                                         if (route->ksnr_myipaddr == ipaddress)
1984                                                 iface->ksni_nroutes++;
1985                                 }
1986                         }
1987                 }
1988
1989                 rc = 0;
1990                 /* NB only new connections will pay attention to the new interface! */
1991         }
1992
1993         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1994
1995         return (rc);
1996 }
1997
1998 static void
1999 ksocknal_peer_del_interface_locked(ksock_peer_ni_t *peer_ni, __u32 ipaddr)
2000 {
2001         struct list_head         *tmp;
2002         struct list_head         *nxt;
2003         ksock_route_t      *route;
2004         ksock_conn_t       *conn;
2005         int                 i;
2006         int                 j;
2007
2008         for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2009                 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2010                         for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2011                                 peer_ni->ksnp_passive_ips[j-1] =
2012                                         peer_ni->ksnp_passive_ips[j];
2013                         peer_ni->ksnp_n_passive_ips--;
2014                         break;
2015                 }
2016
2017         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2018                 route = list_entry(tmp, ksock_route_t, ksnr_list);
2019
2020                 if (route->ksnr_myipaddr != ipaddr)
2021                         continue;
2022
2023                 if (route->ksnr_share_count != 0) {
2024                         /* Manually created; keep, but unbind */
2025                         route->ksnr_myipaddr = 0;
2026                 } else {
2027                         ksocknal_del_route_locked(route);
2028                 }
2029         }
2030
2031         list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2032                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2033
2034                 if (conn->ksnc_myipaddr == ipaddr)
2035                         ksocknal_close_conn_locked (conn, 0);
2036         }
2037 }
2038
2039 static int
2040 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2041 {
2042         ksock_net_t       *net = ni->ni_data;
2043         int                rc = -ENOENT;
2044         struct list_head        *tmp;
2045         struct list_head        *nxt;
2046         ksock_peer_ni_t      *peer_ni;
2047         __u32              this_ip;
2048         int                i;
2049         int                j;
2050
2051         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2052
2053         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2054                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2055
2056                 if (!(ipaddress == 0 ||
2057                       ipaddress == this_ip))
2058                         continue;
2059
2060                 rc = 0;
2061
2062                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2063                         net->ksnn_interfaces[j-1] =
2064                                 net->ksnn_interfaces[j];
2065
2066                 net->ksnn_ninterfaces--;
2067
2068                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2069                         list_for_each_safe(tmp, nxt,
2070                                                &ksocknal_data.ksnd_peers[j]) {
2071                                 peer_ni = list_entry(tmp, ksock_peer_ni_t,
2072                                                       ksnp_list);
2073
2074                                 if (peer_ni->ksnp_ni != ni)
2075                                         continue;
2076
2077                                 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2078                         }
2079                 }
2080         }
2081
2082         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2083
2084         return (rc);
2085 }
2086
2087 int
2088 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2089 {
2090         struct lnet_process_id id = {0};
2091         struct libcfs_ioctl_data *data = arg;
2092         int rc;
2093
2094         switch(cmd) {
2095         case IOC_LIBCFS_GET_INTERFACE: {
2096                 ksock_net_t       *net = ni->ni_data;
2097                 ksock_interface_t *iface;
2098
2099                 read_lock(&ksocknal_data.ksnd_global_lock);
2100
2101                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2102                         rc = -ENOENT;
2103                 } else {
2104                         rc = 0;
2105                         iface = &net->ksnn_interfaces[data->ioc_count];
2106
2107                         data->ioc_u32[0] = iface->ksni_ipaddr;
2108                         data->ioc_u32[1] = iface->ksni_netmask;
2109                         data->ioc_u32[2] = iface->ksni_npeers;
2110                         data->ioc_u32[3] = iface->ksni_nroutes;
2111                 }
2112
2113                 read_unlock(&ksocknal_data.ksnd_global_lock);
2114                 return rc;
2115         }
2116
2117         case IOC_LIBCFS_ADD_INTERFACE:
2118                 return ksocknal_add_interface(ni,
2119                                               data->ioc_u32[0], /* IP address */
2120                                               data->ioc_u32[1]); /* net mask */
2121
2122         case IOC_LIBCFS_DEL_INTERFACE:
2123                 return ksocknal_del_interface(ni,
2124                                               data->ioc_u32[0]); /* IP address */
2125
2126         case IOC_LIBCFS_GET_PEER: {
2127                 __u32            myip = 0;
2128                 __u32            ip = 0;
2129                 int              port = 0;
2130                 int              conn_count = 0;
2131                 int              share_count = 0;
2132
2133                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2134                                             &id, &myip, &ip, &port,
2135                                             &conn_count,  &share_count);
2136                 if (rc != 0)
2137                         return rc;
2138
2139                 data->ioc_nid    = id.nid;
2140                 data->ioc_count  = share_count;
2141                 data->ioc_u32[0] = ip;
2142                 data->ioc_u32[1] = port;
2143                 data->ioc_u32[2] = myip;
2144                 data->ioc_u32[3] = conn_count;
2145                 data->ioc_u32[4] = id.pid;
2146                 return 0;
2147         }
2148
2149         case IOC_LIBCFS_ADD_PEER:
2150                 id.nid = data->ioc_nid;
2151                 id.pid = LNET_PID_LUSTRE;
2152                 return ksocknal_add_peer (ni, id,
2153                                           data->ioc_u32[0], /* IP */
2154                                           data->ioc_u32[1]); /* port */
2155
2156         case IOC_LIBCFS_DEL_PEER:
2157                 id.nid = data->ioc_nid;
2158                 id.pid = LNET_PID_ANY;
2159                 return ksocknal_del_peer (ni, id,
2160                                           data->ioc_u32[0]); /* IP */
2161
2162         case IOC_LIBCFS_GET_CONN: {
2163                 int           txmem;
2164                 int           rxmem;
2165                 int           nagle;
2166                 ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2167
2168                 if (conn == NULL)
2169                         return -ENOENT;
2170
2171                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2172
2173                 data->ioc_count  = txmem;
2174                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2175                 data->ioc_flags  = nagle;
2176                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2177                 data->ioc_u32[1] = conn->ksnc_port;
2178                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2179                 data->ioc_u32[3] = conn->ksnc_type;
2180                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2181                 data->ioc_u32[5] = rxmem;
2182                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2183                 ksocknal_conn_decref(conn);
2184                 return 0;
2185         }
2186
2187         case IOC_LIBCFS_CLOSE_CONNECTION:
2188                 id.nid = data->ioc_nid;
2189                 id.pid = LNET_PID_ANY;
2190                 return ksocknal_close_matching_conns (id,
2191                                                       data->ioc_u32[0]);
2192
2193         case IOC_LIBCFS_REGISTER_MYNID:
2194                 /* Ignore if this is a noop */
2195                 if (data->ioc_nid == ni->ni_nid)
2196                         return 0;
2197
2198                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2199                        libcfs_nid2str(data->ioc_nid),
2200                        libcfs_nid2str(ni->ni_nid));
2201                 return -EINVAL;
2202
2203         case IOC_LIBCFS_PUSH_CONNECTION:
2204                 id.nid = data->ioc_nid;
2205                 id.pid = LNET_PID_ANY;
2206                 return ksocknal_push(ni, id);
2207
2208         default:
2209                 return -EINVAL;
2210         }
2211         /* not reached */
2212 }
2213
2214 static void
2215 ksocknal_free_buffers (void)
2216 {
2217         LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2218
2219         if (ksocknal_data.ksnd_sched_info != NULL) {
2220                 struct ksock_sched_info *info;
2221                 int                     i;
2222
2223                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2224                         if (info->ksi_scheds != NULL) {
2225                                 LIBCFS_FREE(info->ksi_scheds,
2226                                             info->ksi_nthreads_max *
2227                                             sizeof(info->ksi_scheds[0]));
2228                         }
2229                 }
2230                 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2231         }
2232
2233         LIBCFS_FREE (ksocknal_data.ksnd_peers,
2234                      sizeof(struct list_head) *
2235                      ksocknal_data.ksnd_peer_hash_size);
2236
2237         spin_lock(&ksocknal_data.ksnd_tx_lock);
2238
2239         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2240                 struct list_head        zlist;
2241                 ksock_tx_t      *tx;
2242
2243                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2244                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2245                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2246
2247                 while (!list_empty(&zlist)) {
2248                         tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2249                         list_del(&tx->tx_list);
2250                         LIBCFS_FREE(tx, tx->tx_desc_size);
2251                 }
2252         } else {
2253                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2254         }
2255 }
2256
2257 static void
2258 ksocknal_base_shutdown(void)
2259 {
2260         struct ksock_sched_info *info;
2261         ksock_sched_t           *sched;
2262         int                     i;
2263         int                     j;
2264
2265         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2266                atomic_read (&libcfs_kmemory));
2267         LASSERT (ksocknal_data.ksnd_nnets == 0);
2268
2269         switch (ksocknal_data.ksnd_init) {
2270         default:
2271                 LASSERT (0);
2272
2273         case SOCKNAL_INIT_ALL:
2274         case SOCKNAL_INIT_DATA:
2275                 LASSERT (ksocknal_data.ksnd_peers != NULL);
2276                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2277                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2278                 }
2279
2280                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2281                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2282                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2283                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2284                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2285
2286                 if (ksocknal_data.ksnd_sched_info != NULL) {
2287                         cfs_percpt_for_each(info, i,
2288                                             ksocknal_data.ksnd_sched_info) {
2289                                 if (info->ksi_scheds == NULL)
2290                                         continue;
2291
2292                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2293
2294                                         sched = &info->ksi_scheds[j];
2295                                         LASSERT(list_empty(&sched->\
2296                                                                kss_tx_conns));
2297                                         LASSERT(list_empty(&sched->\
2298                                                                kss_rx_conns));
2299                                         LASSERT(list_empty(&sched-> \
2300                                                   kss_zombie_noop_txs));
2301                                         LASSERT(sched->kss_nconns == 0);
2302                                 }
2303                         }
2304                 }
2305
2306                 /* flag threads to terminate; wake and wait for them to die */
2307                 ksocknal_data.ksnd_shuttingdown = 1;
2308                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2309                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2310
2311                 if (ksocknal_data.ksnd_sched_info != NULL) {
2312                         cfs_percpt_for_each(info, i,
2313                                             ksocknal_data.ksnd_sched_info) {
2314                                 if (info->ksi_scheds == NULL)
2315                                         continue;
2316
2317                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2318                                         sched = &info->ksi_scheds[j];
2319                                         wake_up_all(&sched->kss_waitq);
2320                                 }
2321                         }
2322                 }
2323
2324                 i = 4;
2325                 read_lock(&ksocknal_data.ksnd_global_lock);
2326                 while (ksocknal_data.ksnd_nthreads != 0) {
2327                         i++;
2328                         /* power of 2? */
2329                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2330                                 "waiting for %d threads to terminate\n",
2331                                 ksocknal_data.ksnd_nthreads);
2332                         read_unlock(&ksocknal_data.ksnd_global_lock);
2333                         set_current_state(TASK_UNINTERRUPTIBLE);
2334                         schedule_timeout(cfs_time_seconds(1));
2335                         read_lock(&ksocknal_data.ksnd_global_lock);
2336                 }
2337                 read_unlock(&ksocknal_data.ksnd_global_lock);
2338
2339                 ksocknal_free_buffers();
2340
2341                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2342                 break;
2343         }
2344
2345         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2346                atomic_read (&libcfs_kmemory));
2347
2348         module_put(THIS_MODULE);
2349 }
2350
2351 static int
2352 ksocknal_base_startup(void)
2353 {
2354         struct ksock_sched_info *info;
2355         int                     rc;
2356         int                     i;
2357
2358         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2359         LASSERT (ksocknal_data.ksnd_nnets == 0);
2360
2361         memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2362
2363         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2364         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2365                      sizeof(struct list_head) *
2366                      ksocknal_data.ksnd_peer_hash_size);
2367         if (ksocknal_data.ksnd_peers == NULL)
2368                 return -ENOMEM;
2369
2370         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2371                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2372
2373         rwlock_init(&ksocknal_data.ksnd_global_lock);
2374         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2375
2376         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2377         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2378         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2379         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2380         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2381
2382         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2383         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2384         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2385         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2386
2387         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2388         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2389
2390         /* NB memset above zeros whole of ksocknal_data */
2391
2392         /* flag lists/ptrs/locks initialised */
2393         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2394         try_module_get(THIS_MODULE);
2395
2396         ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2397                                                          sizeof(*info));
2398         if (ksocknal_data.ksnd_sched_info == NULL)
2399                 goto failed;
2400
2401         cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2402                 ksock_sched_t   *sched;
2403                 int             nthrs;
2404
2405                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2406                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2407                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2408                 } else {
2409                         /* max to half of CPUs, assume another half should be
2410                          * reserved for upper layer modules */
2411                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2412                 }
2413
2414                 info->ksi_nthreads_max = nthrs;
2415                 info->ksi_cpt = i;
2416
2417                 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2418                                  info->ksi_nthreads_max * sizeof(*sched));
2419                 if (info->ksi_scheds == NULL)
2420                         goto failed;
2421
2422                 for (; nthrs > 0; nthrs--) {
2423                         sched = &info->ksi_scheds[nthrs - 1];
2424
2425                         sched->kss_info = info;
2426                         spin_lock_init(&sched->kss_lock);
2427                         INIT_LIST_HEAD(&sched->kss_rx_conns);
2428                         INIT_LIST_HEAD(&sched->kss_tx_conns);
2429                         INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2430                         init_waitqueue_head(&sched->kss_waitq);
2431                 }
2432         }
2433
2434         ksocknal_data.ksnd_connd_starting         = 0;
2435         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2436         ksocknal_data.ksnd_connd_starting_stamp   = ktime_get_real_seconds();
2437         /* must have at least 2 connds to remain responsive to accepts while
2438          * connecting */
2439         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2440                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2441
2442         if (*ksocknal_tunables.ksnd_nconnds_max <
2443             *ksocknal_tunables.ksnd_nconnds) {
2444                 ksocknal_tunables.ksnd_nconnds_max =
2445                         ksocknal_tunables.ksnd_nconnds;
2446         }
2447
2448         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2449                 char name[16];
2450                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2451                 ksocknal_data.ksnd_connd_starting++;
2452                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2453
2454
2455                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2456                 rc = ksocknal_thread_start(ksocknal_connd,
2457                                            (void *)((uintptr_t)i), name);
2458                 if (rc != 0) {
2459                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2460                         ksocknal_data.ksnd_connd_starting--;
2461                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2462                         CERROR("Can't spawn socknal connd: %d\n", rc);
2463                         goto failed;
2464                 }
2465         }
2466
2467         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2468         if (rc != 0) {
2469                 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2470                 goto failed;
2471         }
2472
2473         /* flag everything initialised */
2474         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2475
2476         return 0;
2477
2478  failed:
2479         ksocknal_base_shutdown();
2480         return -ENETDOWN;
2481 }
2482
2483 static void
2484 ksocknal_debug_peerhash(struct lnet_ni *ni)
2485 {
2486         ksock_peer_ni_t *peer_ni = NULL;
2487         struct list_head        *tmp;
2488         int             i;
2489
2490         read_lock(&ksocknal_data.ksnd_global_lock);
2491
2492         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2493                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2494                         peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
2495
2496                         if (peer_ni->ksnp_ni == ni) break;
2497
2498                         peer_ni = NULL;
2499                 }
2500         }
2501
2502         if (peer_ni != NULL) {
2503                 ksock_route_t *route;
2504                 ksock_conn_t  *conn;
2505
2506                 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2507                        "closing %d, accepting %d, err %d, zcookie %llu, "
2508                        "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2509                        atomic_read(&peer_ni->ksnp_refcount),
2510                        peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2511                        peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2512                        peer_ni->ksnp_zc_next_cookie,
2513                        !list_empty(&peer_ni->ksnp_tx_queue),
2514                        !list_empty(&peer_ni->ksnp_zc_req_list));
2515
2516                 list_for_each(tmp, &peer_ni->ksnp_routes) {
2517                         route = list_entry(tmp, ksock_route_t, ksnr_list);
2518                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2519                                "del %d\n", atomic_read(&route->ksnr_refcount),
2520                                route->ksnr_scheduled, route->ksnr_connecting,
2521                                route->ksnr_connected, route->ksnr_deleted);
2522                 }
2523
2524                 list_for_each(tmp, &peer_ni->ksnp_conns) {
2525                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2526                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2527                                atomic_read(&conn->ksnc_conn_refcount),
2528                                atomic_read(&conn->ksnc_sock_refcount),
2529                                conn->ksnc_type, conn->ksnc_closing);
2530                 }
2531         }
2532
2533         read_unlock(&ksocknal_data.ksnd_global_lock);
2534         return;
2535 }
2536
2537 void
2538 ksocknal_shutdown(struct lnet_ni *ni)
2539 {
2540         ksock_net_t *net = ni->ni_data;
2541         struct lnet_process_id anyid = {
2542                 .nid = LNET_NID_ANY,
2543                 .pid = LNET_PID_ANY,
2544         };
2545         int i;
2546
2547         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2548         LASSERT(ksocknal_data.ksnd_nnets > 0);
2549
2550         spin_lock_bh(&net->ksnn_lock);
2551         net->ksnn_shutdown = 1;                 /* prevent new peers */
2552         spin_unlock_bh(&net->ksnn_lock);
2553
2554         /* Delete all peers */
2555         ksocknal_del_peer(ni, anyid, 0);
2556
2557         /* Wait for all peer_ni state to clean up */
2558         i = 2;
2559         spin_lock_bh(&net->ksnn_lock);
2560         while (net->ksnn_npeers != 0) {
2561                 spin_unlock_bh(&net->ksnn_lock);
2562
2563                 i++;
2564                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2565                        "waiting for %d peers to disconnect\n",
2566                        net->ksnn_npeers);
2567                 set_current_state(TASK_UNINTERRUPTIBLE);
2568                 schedule_timeout(cfs_time_seconds(1));
2569
2570                 ksocknal_debug_peerhash(ni);
2571
2572                 spin_lock_bh(&net->ksnn_lock);
2573         }
2574         spin_unlock_bh(&net->ksnn_lock);
2575
2576         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2577                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2578                 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2579         }
2580
2581         list_del(&net->ksnn_list);
2582         LIBCFS_FREE(net, sizeof(*net));
2583
2584         ksocknal_data.ksnd_nnets--;
2585         if (ksocknal_data.ksnd_nnets == 0)
2586                 ksocknal_base_shutdown();
2587 }
2588
2589 static int
2590 ksocknal_enumerate_interfaces(ksock_net_t *net)
2591 {
2592         char      **names;
2593         int         i;
2594         int         j;
2595         int         rc;
2596         int         n;
2597
2598         n = lnet_ipif_enumerate(&names);
2599         if (n <= 0) {
2600                 CERROR("Can't enumerate interfaces: %d\n", n);
2601                 return n;
2602         }
2603
2604         for (i = j = 0; i < n; i++) {
2605                 int        up;
2606                 __u32      ip;
2607                 __u32      mask;
2608
2609                 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2610                         continue;
2611
2612                 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2613                 if (rc != 0) {
2614                         CWARN("Can't get interface %s info: %d\n",
2615                               names[i], rc);
2616                         continue;
2617                 }
2618
2619                 if (!up) {
2620                         CWARN("Ignoring interface %s (down)\n",
2621                               names[i]);
2622                         continue;
2623                 }
2624
2625                 if (j == LNET_MAX_INTERFACES) {
2626                         CWARN("Ignoring interface %s (too many interfaces)\n",
2627                               names[i]);
2628                         continue;
2629                 }
2630
2631                 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2632                 net->ksnn_interfaces[j].ksni_netmask = mask;
2633                 strlcpy(net->ksnn_interfaces[j].ksni_name,
2634                         names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2635                 j++;
2636         }
2637
2638         lnet_ipif_free_enumeration(names, n);
2639
2640         if (j == 0)
2641                 CERROR("Can't find any usable interfaces\n");
2642
2643         return j;
2644 }
2645
2646 static int
2647 ksocknal_search_new_ipif(ksock_net_t *net)
2648 {
2649         int     new_ipif = 0;
2650         int     i;
2651
2652         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2653                 char            *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2654                 char            *colon = strchr(ifnam, ':');
2655                 int             found  = 0;
2656                 ksock_net_t     *tmp;
2657                 int             j;
2658
2659                 if (colon != NULL) /* ignore alias device */
2660                         *colon = 0;
2661
2662                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2663                                         ksnn_list) {
2664                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2665                                 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2666                                              ksni_name[0];
2667                                 char *colon2 = strchr(ifnam2, ':');
2668
2669                                 if (colon2 != NULL)
2670                                         *colon2 = 0;
2671
2672                                 found = strcmp(ifnam, ifnam2) == 0;
2673                                 if (colon2 != NULL)
2674                                         *colon2 = ':';
2675                         }
2676                         if (found)
2677                                 break;
2678                 }
2679
2680                 new_ipif += !found;
2681                 if (colon != NULL)
2682                         *colon = ':';
2683         }
2684
2685         return new_ipif;
2686 }
2687
2688 static int
2689 ksocknal_start_schedulers(struct ksock_sched_info *info)
2690 {
2691         int     nthrs;
2692         int     rc = 0;
2693         int     i;
2694
2695         if (info->ksi_nthreads == 0) {
2696                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2697                         nthrs = info->ksi_nthreads_max;
2698                 } else {
2699                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2700                                                info->ksi_cpt);
2701                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2702                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2703                 }
2704                 nthrs = min(nthrs, info->ksi_nthreads_max);
2705         } else {
2706                 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2707                 /* increase two threads if there is new interface */
2708                 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2709         }
2710
2711         for (i = 0; i < nthrs; i++) {
2712                 long            id;
2713                 char            name[20];
2714                 ksock_sched_t   *sched;
2715                 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2716                 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2717                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2718                          info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2719
2720                 rc = ksocknal_thread_start(ksocknal_scheduler,
2721                                            (void *)id, name);
2722                 if (rc == 0)
2723                         continue;
2724
2725                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2726                        info->ksi_cpt, info->ksi_nthreads + i, rc);
2727                 break;
2728         }
2729
2730         info->ksi_nthreads += i;
2731         return rc;
2732 }
2733
2734 static int
2735 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2736 {
2737         int     newif = ksocknal_search_new_ipif(net);
2738         int     rc;
2739         int     i;
2740
2741         if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2742                 return -EINVAL;
2743
2744         for (i = 0; i < ncpts; i++) {
2745                 struct ksock_sched_info *info;
2746                 int cpt = (cpts == NULL) ? i : cpts[i];
2747
2748                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2749                 info = ksocknal_data.ksnd_sched_info[cpt];
2750
2751                 if (!newif && info->ksi_nthreads > 0)
2752                         continue;
2753
2754                 rc = ksocknal_start_schedulers(info);
2755                 if (rc != 0)
2756                         return rc;
2757         }
2758         return 0;
2759 }
2760
2761 int
2762 ksocknal_startup(struct lnet_ni *ni)
2763 {
2764         ksock_net_t  *net;
2765         int           rc;
2766         int           i;
2767         struct net_device *net_dev;
2768         int node_id;
2769
2770         LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2771
2772         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2773                 rc = ksocknal_base_startup();
2774                 if (rc != 0)
2775                         return rc;
2776         }
2777
2778         LIBCFS_ALLOC(net, sizeof(*net));
2779         if (net == NULL)
2780                 goto fail_0;
2781
2782         spin_lock_init(&net->ksnn_lock);
2783         net->ksnn_incarnation = ktime_get_real_ns();
2784         ni->ni_data = net;
2785         if (!ni->ni_net->net_tunables_set) {
2786                 ni->ni_net->net_tunables.lct_peer_timeout =
2787                         *ksocknal_tunables.ksnd_peertimeout;
2788                 ni->ni_net->net_tunables.lct_max_tx_credits =
2789                         *ksocknal_tunables.ksnd_credits;
2790                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2791                         *ksocknal_tunables.ksnd_peertxcredits;
2792                 ni->ni_net->net_tunables.lct_peer_rtr_credits =
2793                         *ksocknal_tunables.ksnd_peerrtrcredits;
2794                 ni->ni_net->net_tunables_set = true;
2795         }
2796
2797
2798         if (ni->ni_interfaces[0] == NULL) {
2799                 rc = ksocknal_enumerate_interfaces(net);
2800                 if (rc <= 0)
2801                         goto fail_1;
2802
2803                 net->ksnn_ninterfaces = 1;
2804         } else {
2805                 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2806                         int    up;
2807
2808                         if (ni->ni_interfaces[i] == NULL)
2809                                 break;
2810
2811                         rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2812                                 &net->ksnn_interfaces[i].ksni_ipaddr,
2813                                 &net->ksnn_interfaces[i].ksni_netmask);
2814
2815                         if (rc != 0) {
2816                                 CERROR("Can't get interface %s info: %d\n",
2817                                        ni->ni_interfaces[i], rc);
2818                                 goto fail_1;
2819                         }
2820
2821                         if (!up) {
2822                                 CERROR("Interface %s is down\n",
2823                                        ni->ni_interfaces[i]);
2824                                 goto fail_1;
2825                         }
2826
2827                         strlcpy(net->ksnn_interfaces[i].ksni_name,
2828                                 ni->ni_interfaces[i],
2829                                 sizeof(net->ksnn_interfaces[i].ksni_name));
2830
2831                 }
2832                 net->ksnn_ninterfaces = i;
2833         }
2834
2835         net_dev = dev_get_by_name(&init_net,
2836                                   net->ksnn_interfaces[0].ksni_name);
2837         if (net_dev != NULL) {
2838                 node_id = dev_to_node(&net_dev->dev);
2839                 ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
2840                 dev_put(net_dev);
2841         } else {
2842                 ni->ni_dev_cpt = CFS_CPT_ANY;
2843         }
2844
2845         /* call it before add it to ksocknal_data.ksnd_nets */
2846         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2847         if (rc != 0)
2848                 goto fail_1;
2849
2850         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2851                                 net->ksnn_interfaces[0].ksni_ipaddr);
2852         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2853
2854         ksocknal_data.ksnd_nnets++;
2855
2856         return 0;
2857
2858  fail_1:
2859         LIBCFS_FREE(net, sizeof(*net));
2860  fail_0:
2861         if (ksocknal_data.ksnd_nnets == 0)
2862                 ksocknal_base_shutdown();
2863
2864         return -ENETDOWN;
2865 }
2866
2867
2868 static void __exit ksocklnd_exit(void)
2869 {
2870         lnet_unregister_lnd(&the_ksocklnd);
2871 }
2872
2873 static int __init ksocklnd_init(void)
2874 {
2875         int rc;
2876
2877         /* check ksnr_connected/connecting field large enough */
2878         CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2879         CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2880
2881         /* initialize the_ksocklnd */
2882         the_ksocklnd.lnd_type     = SOCKLND;
2883         the_ksocklnd.lnd_startup  = ksocknal_startup;
2884         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2885         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2886         the_ksocklnd.lnd_send     = ksocknal_send;
2887         the_ksocklnd.lnd_recv     = ksocknal_recv;
2888         the_ksocklnd.lnd_notify   = ksocknal_notify;
2889         the_ksocklnd.lnd_query    = ksocknal_query;
2890         the_ksocklnd.lnd_accept   = ksocknal_accept;
2891
2892         rc = ksocknal_tunables_init();
2893         if (rc != 0)
2894                 return rc;
2895
2896         lnet_register_lnd(&the_ksocklnd);
2897
2898         return 0;
2899 }
2900
2901 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2902 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2903 MODULE_VERSION("2.8.0");
2904 MODULE_LICENSE("GPL");
2905
2906 module_init(ksocklnd_init);
2907 module_exit(ksocklnd_exit);