4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/socklnd/socklnd.c
34 * Author: Zach Brown <zab@zabbo.net>
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eric@bartonsoftware.com>
40 #include <linux/pci.h>
43 static lnd_t the_ksocklnd;
44 ksock_nal_data_t ksocknal_data;
46 static ksock_interface_t *
47 ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
49 ksock_net_t *net = ni->ni_data;
51 ksock_interface_t *iface;
53 for (i = 0; i < net->ksnn_ninterfaces; i++) {
54 LASSERT(i < LNET_MAX_INTERFACES);
55 iface = &net->ksnn_interfaces[i];
57 if (iface->ksni_ipaddr == ip)
64 static ksock_route_t *
65 ksocknal_create_route (__u32 ipaddr, int port)
69 LIBCFS_ALLOC (route, sizeof (*route));
73 atomic_set (&route->ksnr_refcount, 1);
74 route->ksnr_peer = NULL;
75 route->ksnr_retry_interval = 0; /* OK to connect at any time */
76 route->ksnr_ipaddr = ipaddr;
77 route->ksnr_port = port;
78 route->ksnr_scheduled = 0;
79 route->ksnr_connecting = 0;
80 route->ksnr_connected = 0;
81 route->ksnr_deleted = 0;
82 route->ksnr_conn_count = 0;
83 route->ksnr_share_count = 0;
89 ksocknal_destroy_route (ksock_route_t *route)
91 LASSERT (atomic_read(&route->ksnr_refcount) == 0);
93 if (route->ksnr_peer != NULL)
94 ksocknal_peer_decref(route->ksnr_peer);
96 LIBCFS_FREE (route, sizeof (*route));
100 ksocknal_create_peer(ksock_peer_ni_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
102 int cpt = lnet_cpt_of_nid(id.nid, ni);
103 ksock_net_t *net = ni->ni_data;
104 ksock_peer_ni_t *peer_ni;
106 LASSERT(id.nid != LNET_NID_ANY);
107 LASSERT(id.pid != LNET_PID_ANY);
108 LASSERT(!in_interrupt());
110 LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
114 peer_ni->ksnp_ni = ni;
115 peer_ni->ksnp_id = id;
116 atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
117 peer_ni->ksnp_closing = 0;
118 peer_ni->ksnp_accepting = 0;
119 peer_ni->ksnp_proto = NULL;
120 peer_ni->ksnp_last_alive = 0;
121 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
123 INIT_LIST_HEAD(&peer_ni->ksnp_conns);
124 INIT_LIST_HEAD(&peer_ni->ksnp_routes);
125 INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
126 INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
127 spin_lock_init(&peer_ni->ksnp_lock);
129 spin_lock_bh(&net->ksnn_lock);
131 if (net->ksnn_shutdown) {
132 spin_unlock_bh(&net->ksnn_lock);
134 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
135 CERROR("Can't create peer_ni: network shutdown\n");
141 spin_unlock_bh(&net->ksnn_lock);
148 ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni)
150 ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
152 CDEBUG (D_NET, "peer_ni %s %p deleted\n",
153 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
155 LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
156 LASSERT(peer_ni->ksnp_accepting == 0);
157 LASSERT(list_empty(&peer_ni->ksnp_conns));
158 LASSERT(list_empty(&peer_ni->ksnp_routes));
159 LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
160 LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
162 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
164 /* NB a peer_ni's connections and routes keep a reference on their peer_ni
165 * until they are destroyed, so we can be assured that _all_ state to
166 * do with this peer_ni has been cleaned up when its refcount drops to
168 spin_lock_bh(&net->ksnn_lock);
170 spin_unlock_bh(&net->ksnn_lock);
174 ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
176 struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
177 struct list_head *tmp;
178 ksock_peer_ni_t *peer_ni;
180 list_for_each(tmp, peer_list) {
182 peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
184 LASSERT(!peer_ni->ksnp_closing);
186 if (peer_ni->ksnp_ni != ni)
189 if (peer_ni->ksnp_id.nid != id.nid ||
190 peer_ni->ksnp_id.pid != id.pid)
193 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
194 peer_ni, libcfs_id2str(id),
195 atomic_read(&peer_ni->ksnp_refcount));
202 ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
204 ksock_peer_ni_t *peer_ni;
206 read_lock(&ksocknal_data.ksnd_global_lock);
207 peer_ni = ksocknal_find_peer_locked(ni, id);
208 if (peer_ni != NULL) /* +1 ref for caller? */
209 ksocknal_peer_addref(peer_ni);
210 read_unlock(&ksocknal_data.ksnd_global_lock);
216 ksocknal_unlink_peer_locked (ksock_peer_ni_t *peer_ni)
220 ksock_interface_t *iface;
222 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
223 LASSERT (i < LNET_MAX_INTERFACES);
224 ip = peer_ni->ksnp_passive_ips[i];
226 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
227 /* All IPs in peer_ni->ksnp_passive_ips[] come from the
228 * interface list, therefore the call must succeed. */
229 LASSERT (iface != NULL);
231 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
232 peer_ni, iface, iface->ksni_nroutes);
233 iface->ksni_npeers--;
236 LASSERT(list_empty(&peer_ni->ksnp_conns));
237 LASSERT(list_empty(&peer_ni->ksnp_routes));
238 LASSERT(!peer_ni->ksnp_closing);
239 peer_ni->ksnp_closing = 1;
240 list_del(&peer_ni->ksnp_list);
241 /* lose peerlist's ref */
242 ksocknal_peer_decref(peer_ni);
246 ksocknal_get_peer_info (lnet_ni_t *ni, int index,
247 lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
248 int *port, int *conn_count, int *share_count)
250 ksock_peer_ni_t *peer_ni;
251 struct list_head *ptmp;
252 ksock_route_t *route;
253 struct list_head *rtmp;
258 read_lock(&ksocknal_data.ksnd_global_lock);
260 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
261 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
262 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
264 if (peer_ni->ksnp_ni != ni)
267 if (peer_ni->ksnp_n_passive_ips == 0 &&
268 list_empty(&peer_ni->ksnp_routes)) {
272 *id = peer_ni->ksnp_id;
282 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
286 *id = peer_ni->ksnp_id;
287 *myip = peer_ni->ksnp_passive_ips[j];
296 list_for_each(rtmp, &peer_ni->ksnp_routes) {
300 route = list_entry(rtmp, ksock_route_t,
303 *id = peer_ni->ksnp_id;
304 *myip = route->ksnr_myipaddr;
305 *peer_ip = route->ksnr_ipaddr;
306 *port = route->ksnr_port;
307 *conn_count = route->ksnr_conn_count;
308 *share_count = route->ksnr_share_count;
315 read_unlock(&ksocknal_data.ksnd_global_lock);
320 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
322 ksock_peer_ni_t *peer_ni = route->ksnr_peer;
323 int type = conn->ksnc_type;
324 ksock_interface_t *iface;
326 conn->ksnc_route = route;
327 ksocknal_route_addref(route);
329 if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
330 if (route->ksnr_myipaddr == 0) {
331 /* route wasn't bound locally yet (the initial route) */
332 CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
333 libcfs_id2str(peer_ni->ksnp_id),
335 &conn->ksnc_myipaddr);
337 CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
338 "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
340 &route->ksnr_myipaddr,
341 &conn->ksnc_myipaddr);
343 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
344 route->ksnr_myipaddr);
346 iface->ksni_nroutes--;
348 route->ksnr_myipaddr = conn->ksnc_myipaddr;
349 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
350 route->ksnr_myipaddr);
352 iface->ksni_nroutes++;
355 route->ksnr_connected |= (1<<type);
356 route->ksnr_conn_count++;
358 /* Successful connection => further attempts can
359 * proceed immediately */
360 route->ksnr_retry_interval = 0;
364 ksocknal_add_route_locked (ksock_peer_ni_t *peer_ni, ksock_route_t *route)
366 struct list_head *tmp;
368 ksock_route_t *route2;
370 LASSERT(!peer_ni->ksnp_closing);
371 LASSERT(route->ksnr_peer == NULL);
372 LASSERT(!route->ksnr_scheduled);
373 LASSERT(!route->ksnr_connecting);
374 LASSERT(route->ksnr_connected == 0);
376 /* LASSERT(unique) */
377 list_for_each(tmp, &peer_ni->ksnp_routes) {
378 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
380 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
381 CERROR("Duplicate route %s %pI4h\n",
382 libcfs_id2str(peer_ni->ksnp_id),
383 &route->ksnr_ipaddr);
388 route->ksnr_peer = peer_ni;
389 ksocknal_peer_addref(peer_ni);
390 /* peer_ni's routelist takes over my ref on 'route' */
391 list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
393 list_for_each(tmp, &peer_ni->ksnp_conns) {
394 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
396 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
399 ksocknal_associate_route_conn_locked(route, conn);
400 /* keep going (typed routes) */
405 ksocknal_del_route_locked (ksock_route_t *route)
407 ksock_peer_ni_t *peer_ni = route->ksnr_peer;
408 ksock_interface_t *iface;
410 struct list_head *ctmp;
411 struct list_head *cnxt;
413 LASSERT(!route->ksnr_deleted);
415 /* Close associated conns */
416 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
417 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
419 if (conn->ksnc_route != route)
422 ksocknal_close_conn_locked(conn, 0);
425 if (route->ksnr_myipaddr != 0) {
426 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
427 route->ksnr_myipaddr);
429 iface->ksni_nroutes--;
432 route->ksnr_deleted = 1;
433 list_del(&route->ksnr_list);
434 ksocknal_route_decref(route); /* drop peer_ni's ref */
436 if (list_empty(&peer_ni->ksnp_routes) &&
437 list_empty(&peer_ni->ksnp_conns)) {
438 /* I've just removed the last route to a peer_ni with no active
440 ksocknal_unlink_peer_locked(peer_ni);
445 ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
447 struct list_head *tmp;
448 ksock_peer_ni_t *peer_ni;
449 ksock_peer_ni_t *peer2;
450 ksock_route_t *route;
451 ksock_route_t *route2;
454 if (id.nid == LNET_NID_ANY ||
455 id.pid == LNET_PID_ANY)
458 /* Have a brand new peer_ni ready... */
459 rc = ksocknal_create_peer(&peer_ni, ni, id);
463 route = ksocknal_create_route (ipaddr, port);
465 ksocknal_peer_decref(peer_ni);
469 write_lock_bh(&ksocknal_data.ksnd_global_lock);
471 /* always called with a ref on ni, so shutdown can't have started */
472 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
474 peer2 = ksocknal_find_peer_locked(ni, id);
476 ksocknal_peer_decref(peer_ni);
479 /* peer_ni table takes my ref on peer_ni */
480 list_add_tail(&peer_ni->ksnp_list,
481 ksocknal_nid2peerlist(id.nid));
485 list_for_each(tmp, &peer_ni->ksnp_routes) {
486 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
488 if (route2->ksnr_ipaddr == ipaddr)
493 if (route2 == NULL) {
494 ksocknal_add_route_locked(peer_ni, route);
495 route->ksnr_share_count++;
497 ksocknal_route_decref(route);
498 route2->ksnr_share_count++;
501 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
507 ksocknal_del_peer_locked (ksock_peer_ni_t *peer_ni, __u32 ip)
510 ksock_route_t *route;
511 struct list_head *tmp;
512 struct list_head *nxt;
515 LASSERT(!peer_ni->ksnp_closing);
517 /* Extra ref prevents peer_ni disappearing until I'm done with it */
518 ksocknal_peer_addref(peer_ni);
520 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
521 route = list_entry(tmp, ksock_route_t, ksnr_list);
524 if (!(ip == 0 || route->ksnr_ipaddr == ip))
527 route->ksnr_share_count = 0;
528 /* This deletes associated conns too */
529 ksocknal_del_route_locked(route);
533 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
534 route = list_entry(tmp, ksock_route_t, ksnr_list);
535 nshared += route->ksnr_share_count;
539 /* remove everything else if there are no explicit entries
542 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
543 route = list_entry(tmp, ksock_route_t, ksnr_list);
545 /* we should only be removing auto-entries */
546 LASSERT(route->ksnr_share_count == 0);
547 ksocknal_del_route_locked(route);
550 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
551 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
553 ksocknal_close_conn_locked(conn, 0);
557 ksocknal_peer_decref(peer_ni);
558 /* NB peer_ni unlinks itself when last conn/route is removed */
562 ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
564 struct list_head zombies = LIST_HEAD_INIT(zombies);
565 struct list_head *ptmp;
566 struct list_head *pnxt;
567 ksock_peer_ni_t *peer_ni;
573 write_lock_bh(&ksocknal_data.ksnd_global_lock);
575 if (id.nid != LNET_NID_ANY) {
576 hi = (int)(ksocknal_nid2peerlist(id.nid) -
577 ksocknal_data.ksnd_peers);
581 hi = ksocknal_data.ksnd_peer_hash_size - 1;
584 for (i = lo; i <= hi; i++) {
585 list_for_each_safe(ptmp, pnxt,
586 &ksocknal_data.ksnd_peers[i]) {
587 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
589 if (peer_ni->ksnp_ni != ni)
592 if (!((id.nid == LNET_NID_ANY ||
593 peer_ni->ksnp_id.nid == id.nid) &&
594 (id.pid == LNET_PID_ANY ||
595 peer_ni->ksnp_id.pid == id.pid)))
598 ksocknal_peer_addref(peer_ni); /* a ref for me... */
600 ksocknal_del_peer_locked(peer_ni, ip);
602 if (peer_ni->ksnp_closing &&
603 !list_empty(&peer_ni->ksnp_tx_queue)) {
604 LASSERT(list_empty(&peer_ni->ksnp_conns));
605 LASSERT(list_empty(&peer_ni->ksnp_routes));
607 list_splice_init(&peer_ni->ksnp_tx_queue,
611 ksocknal_peer_decref(peer_ni); /* ...till here */
613 rc = 0; /* matched! */
617 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
619 ksocknal_txlist_done(ni, &zombies, 1);
624 static ksock_conn_t *
625 ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
627 ksock_peer_ni_t *peer_ni;
628 struct list_head *ptmp;
630 struct list_head *ctmp;
633 read_lock(&ksocknal_data.ksnd_global_lock);
635 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
636 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
637 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
639 LASSERT(!peer_ni->ksnp_closing);
641 if (peer_ni->ksnp_ni != ni)
644 list_for_each(ctmp, &peer_ni->ksnp_conns) {
648 conn = list_entry(ctmp, ksock_conn_t,
650 ksocknal_conn_addref(conn);
651 read_unlock(&ksocknal_data. \
658 read_unlock(&ksocknal_data.ksnd_global_lock);
662 static ksock_sched_t *
663 ksocknal_choose_scheduler_locked(unsigned int cpt)
665 struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
666 ksock_sched_t *sched;
669 LASSERT(info->ksi_nthreads > 0);
671 sched = &info->ksi_scheds[0];
673 * NB: it's safe so far, but info->ksi_nthreads could be changed
674 * at runtime when we have dynamic LNet configuration, then we
675 * need to take care of this.
677 for (i = 1; i < info->ksi_nthreads; i++) {
678 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
679 sched = &info->ksi_scheds[i];
686 ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
688 ksock_net_t *net = ni->ni_data;
692 read_lock(&ksocknal_data.ksnd_global_lock);
694 nip = net->ksnn_ninterfaces;
695 LASSERT (nip <= LNET_MAX_INTERFACES);
697 /* Only offer interfaces for additional connections if I have
700 read_unlock(&ksocknal_data.ksnd_global_lock);
704 for (i = 0; i < nip; i++) {
705 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
706 LASSERT (ipaddrs[i] != 0);
709 read_unlock(&ksocknal_data.ksnd_global_lock);
714 ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
716 int best_netmatch = 0;
723 for (i = 0; i < nips; i++) {
727 this_xor = (ips[i] ^ iface->ksni_ipaddr);
728 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
731 best_netmatch < this_netmatch ||
732 (best_netmatch == this_netmatch &&
733 best_xor > this_xor)))
737 best_netmatch = this_netmatch;
746 ksocknal_select_ips(ksock_peer_ni_t *peer_ni, __u32 *peerips, int n_peerips)
748 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
749 ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
750 ksock_interface_t *iface;
751 ksock_interface_t *best_iface;
762 /* CAVEAT EMPTOR: We do all our interface matching with an
763 * exclusive hold of global lock at IRQ priority. We're only
764 * expecting to be dealing with small numbers of interfaces, so the
765 * O(n**3)-ness shouldn't matter */
767 /* Also note that I'm not going to return more than n_peerips
768 * interfaces, even if I have more myself */
770 write_lock_bh(global_lock);
772 LASSERT (n_peerips <= LNET_MAX_INTERFACES);
773 LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
775 /* Only match interfaces for additional connections
776 * if I have > 1 interface */
777 n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
778 MIN(n_peerips, net->ksnn_ninterfaces);
780 for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
781 /* ^ yes really... */
783 /* If we have any new interfaces, first tick off all the
784 * peer_ni IPs that match old interfaces, then choose new
785 * interfaces to match the remaining peer_ni IPS.
786 * We don't forget interfaces we've stopped using; we might
787 * start using them again... */
789 if (i < peer_ni->ksnp_n_passive_ips) {
791 ip = peer_ni->ksnp_passive_ips[i];
792 best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
794 /* peer_ni passive ips are kept up to date */
795 LASSERT(best_iface != NULL);
797 /* choose a new interface */
798 LASSERT (i == peer_ni->ksnp_n_passive_ips);
804 for (j = 0; j < net->ksnn_ninterfaces; j++) {
805 iface = &net->ksnn_interfaces[j];
806 ip = iface->ksni_ipaddr;
808 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
809 if (peer_ni->ksnp_passive_ips[k] == ip)
812 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
815 k = ksocknal_match_peerip(iface, peerips, n_peerips);
816 xor = (ip ^ peerips[k]);
817 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
819 if (!(best_iface == NULL ||
820 best_netmatch < this_netmatch ||
821 (best_netmatch == this_netmatch &&
822 best_npeers > iface->ksni_npeers)))
826 best_netmatch = this_netmatch;
827 best_npeers = iface->ksni_npeers;
830 LASSERT(best_iface != NULL);
832 best_iface->ksni_npeers++;
833 ip = best_iface->ksni_ipaddr;
834 peer_ni->ksnp_passive_ips[i] = ip;
835 peer_ni->ksnp_n_passive_ips = i+1;
838 /* mark the best matching peer_ni IP used */
839 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
843 /* Overwrite input peer_ni IP addresses */
844 memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
846 write_unlock_bh(global_lock);
852 ksocknal_create_routes(ksock_peer_ni_t *peer_ni, int port,
853 __u32 *peer_ipaddrs, int npeer_ipaddrs)
855 ksock_route_t *newroute = NULL;
856 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
857 lnet_ni_t *ni = peer_ni->ksnp_ni;
858 ksock_net_t *net = ni->ni_data;
859 struct list_head *rtmp;
860 ksock_route_t *route;
861 ksock_interface_t *iface;
862 ksock_interface_t *best_iface;
869 /* CAVEAT EMPTOR: We do all our interface matching with an
870 * exclusive hold of global lock at IRQ priority. We're only
871 * expecting to be dealing with small numbers of interfaces, so the
872 * O(n**3)-ness here shouldn't matter */
874 write_lock_bh(global_lock);
876 if (net->ksnn_ninterfaces < 2) {
877 /* Only create additional connections
878 * if I have > 1 interface */
879 write_unlock_bh(global_lock);
883 LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
885 for (i = 0; i < npeer_ipaddrs; i++) {
886 if (newroute != NULL) {
887 newroute->ksnr_ipaddr = peer_ipaddrs[i];
889 write_unlock_bh(global_lock);
891 newroute = ksocknal_create_route(peer_ipaddrs[i], port);
892 if (newroute == NULL)
895 write_lock_bh(global_lock);
898 if (peer_ni->ksnp_closing) {
899 /* peer_ni got closed under me */
903 /* Already got a route? */
905 list_for_each(rtmp, &peer_ni->ksnp_routes) {
906 route = list_entry(rtmp, ksock_route_t, ksnr_list);
908 if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
920 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
922 /* Select interface to connect from */
923 for (j = 0; j < net->ksnn_ninterfaces; j++) {
924 iface = &net->ksnn_interfaces[j];
926 /* Using this interface already? */
927 list_for_each(rtmp, &peer_ni->ksnp_routes) {
928 route = list_entry(rtmp, ksock_route_t,
931 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
939 this_netmatch = (((iface->ksni_ipaddr ^
940 newroute->ksnr_ipaddr) &
941 iface->ksni_netmask) == 0) ? 1 : 0;
943 if (!(best_iface == NULL ||
944 best_netmatch < this_netmatch ||
945 (best_netmatch == this_netmatch &&
946 best_nroutes > iface->ksni_nroutes)))
950 best_netmatch = this_netmatch;
951 best_nroutes = iface->ksni_nroutes;
954 if (best_iface == NULL)
957 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
958 best_iface->ksni_nroutes++;
960 ksocknal_add_route_locked(peer_ni, newroute);
964 write_unlock_bh(global_lock);
965 if (newroute != NULL)
966 ksocknal_route_decref(newroute);
970 ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
977 rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
978 LASSERT(rc == 0); /* we succeeded before */
980 LIBCFS_ALLOC(cr, sizeof(*cr));
982 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
983 "%pI4h: memory exhausted\n", &peer_ip);
989 cr->ksncr_sock = sock;
991 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
993 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
994 wake_up(&ksocknal_data.ksnd_connd_waitq);
996 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1001 ksocknal_connecting (ksock_peer_ni_t *peer_ni, __u32 ipaddr)
1003 ksock_route_t *route;
1005 list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1006 if (route->ksnr_ipaddr == ipaddr)
1007 return route->ksnr_connecting;
1013 ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
1014 struct socket *sock, int type)
1016 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1017 struct list_head zombies = LIST_HEAD_INIT(zombies);
1018 lnet_process_id_t peerid;
1019 struct list_head *tmp;
1022 ksock_conn_t *conn2;
1023 ksock_peer_ni_t *peer_ni = NULL;
1024 ksock_peer_ni_t *peer2;
1025 ksock_sched_t *sched;
1026 struct ksock_hello_msg *hello;
1034 active = (route != NULL);
1036 LASSERT (active == (type != SOCKLND_CONN_NONE));
1038 LIBCFS_ALLOC(conn, sizeof(*conn));
1044 conn->ksnc_peer = NULL;
1045 conn->ksnc_route = NULL;
1046 conn->ksnc_sock = sock;
1047 /* 2 ref, 1 for conn, another extra ref prevents socket
1048 * being closed before establishment of connection */
1049 atomic_set (&conn->ksnc_sock_refcount, 2);
1050 conn->ksnc_type = type;
1051 ksocknal_lib_save_callback(sock, conn);
1052 atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1054 conn->ksnc_rx_ready = 0;
1055 conn->ksnc_rx_scheduled = 0;
1057 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1058 conn->ksnc_tx_ready = 0;
1059 conn->ksnc_tx_scheduled = 0;
1060 conn->ksnc_tx_carrier = NULL;
1061 atomic_set (&conn->ksnc_tx_nob, 0);
1063 LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1064 kshm_ips[LNET_MAX_INTERFACES]));
1065 if (hello == NULL) {
1070 /* stash conn's local and remote addrs */
1071 rc = ksocknal_lib_get_conn_addrs (conn);
1075 /* Find out/confirm peer_ni's NID and connection type and get the
1076 * vector of interfaces she's willing to let me connect to.
1077 * Passive connections use the listener timeout since the peer_ni sends
1081 peer_ni = route->ksnr_peer;
1082 LASSERT(ni == peer_ni->ksnp_ni);
1084 /* Active connection sends HELLO eagerly */
1085 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1086 peerid = peer_ni->ksnp_id;
1088 write_lock_bh(global_lock);
1089 conn->ksnc_proto = peer_ni->ksnp_proto;
1090 write_unlock_bh(global_lock);
1092 if (conn->ksnc_proto == NULL) {
1093 conn->ksnc_proto = &ksocknal_protocol_v3x;
1094 #if SOCKNAL_VERSION_DEBUG
1095 if (*ksocknal_tunables.ksnd_protocol == 2)
1096 conn->ksnc_proto = &ksocknal_protocol_v2x;
1097 else if (*ksocknal_tunables.ksnd_protocol == 1)
1098 conn->ksnc_proto = &ksocknal_protocol_v1x;
1102 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1106 peerid.nid = LNET_NID_ANY;
1107 peerid.pid = LNET_PID_ANY;
1109 /* Passive, get protocol from peer_ni */
1110 conn->ksnc_proto = NULL;
1113 rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1117 LASSERT (rc == 0 || active);
1118 LASSERT (conn->ksnc_proto != NULL);
1119 LASSERT (peerid.nid != LNET_NID_ANY);
1121 cpt = lnet_cpt_of_nid(peerid.nid, ni);
1124 ksocknal_peer_addref(peer_ni);
1125 write_lock_bh(global_lock);
1127 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1131 write_lock_bh(global_lock);
1133 /* called with a ref on ni, so shutdown can't have started */
1134 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1136 peer2 = ksocknal_find_peer_locked(ni, peerid);
1137 if (peer2 == NULL) {
1138 /* NB this puts an "empty" peer_ni in the peer_ni
1139 * table (which takes my ref) */
1140 list_add_tail(&peer_ni->ksnp_list,
1141 ksocknal_nid2peerlist(peerid.nid));
1143 ksocknal_peer_decref(peer_ni);
1148 ksocknal_peer_addref(peer_ni);
1149 peer_ni->ksnp_accepting++;
1151 /* Am I already connecting to this guy? Resolve in
1152 * favour of higher NID... */
1153 if (peerid.nid < ni->ni_nid &&
1154 ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1156 warn = "connection race resolution";
1161 if (peer_ni->ksnp_closing ||
1162 (active && route->ksnr_deleted)) {
1163 /* peer_ni/route got closed under me */
1165 warn = "peer_ni/route removed";
1169 if (peer_ni->ksnp_proto == NULL) {
1170 /* Never connected before.
1171 * NB recv_hello may have returned EPROTO to signal my peer_ni
1172 * wants a different protocol than the one I asked for.
1174 LASSERT(list_empty(&peer_ni->ksnp_conns));
1176 peer_ni->ksnp_proto = conn->ksnc_proto;
1177 peer_ni->ksnp_incarnation = incarnation;
1180 if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1181 peer_ni->ksnp_incarnation != incarnation) {
1182 /* peer_ni rebooted or I've got the wrong protocol version */
1183 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1185 peer_ni->ksnp_proto = NULL;
1187 warn = peer_ni->ksnp_incarnation != incarnation ?
1188 "peer_ni rebooted" :
1189 "wrong proto version";
1199 warn = "lost conn race";
1202 warn = "retry with different protocol version";
1206 /* Refuse to duplicate an existing connection, unless this is a
1207 * loopback connection */
1208 if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1209 list_for_each(tmp, &peer_ni->ksnp_conns) {
1210 conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1212 if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1213 conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1214 conn2->ksnc_type != conn->ksnc_type)
1217 /* Reply on a passive connection attempt so the peer_ni
1218 * realises we're connected. */
1228 /* If the connection created by this route didn't bind to the IP
1229 * address the route connected to, the connection/route matching
1230 * code below probably isn't going to work. */
1232 route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1233 CERROR("Route %s %pI4h connected to %pI4h\n",
1234 libcfs_id2str(peer_ni->ksnp_id),
1235 &route->ksnr_ipaddr,
1236 &conn->ksnc_ipaddr);
1239 /* Search for a route corresponding to the new connection and
1240 * create an association. This allows incoming connections created
1241 * by routes in my peer_ni to match my own route entries so I don't
1242 * continually create duplicate routes. */
1243 list_for_each(tmp, &peer_ni->ksnp_routes) {
1244 route = list_entry(tmp, ksock_route_t, ksnr_list);
1246 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1249 ksocknal_associate_route_conn_locked(route, conn);
1253 conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
1254 peer_ni->ksnp_last_alive = ktime_get_real_seconds();
1255 peer_ni->ksnp_send_keepalive = 0;
1256 peer_ni->ksnp_error = 0;
1258 sched = ksocknal_choose_scheduler_locked(cpt);
1259 sched->kss_nconns++;
1260 conn->ksnc_scheduler = sched;
1262 conn->ksnc_tx_last_post = ktime_get_real_seconds();
1263 /* Set the deadline for the outgoing HELLO to drain */
1264 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1265 conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1266 smp_mb(); /* order with adding to peer_ni's conn list */
1268 list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1269 ksocknal_conn_addref(conn);
1271 ksocknal_new_packet(conn, 0);
1273 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1275 /* Take packets blocking for this connection. */
1276 list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1277 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1281 list_del(&tx->tx_list);
1282 ksocknal_queue_tx_locked(tx, conn);
1285 write_unlock_bh(global_lock);
1287 /* We've now got a new connection. Any errors from here on are just
1288 * like "normal" comms errors and we close the connection normally.
1289 * NB (a) we still have to send the reply HELLO for passive
1291 * (b) normal I/O on the conn is blocked until I setup and call the
1295 CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1296 " incarnation:%lld sched[%d:%d]\n",
1297 libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1298 &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1299 conn->ksnc_port, incarnation, cpt,
1300 (int)(sched - &sched->kss_info->ksi_scheds[0]));
1303 /* additional routes after interface exchange? */
1304 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1305 hello->kshm_ips, hello->kshm_nips);
1307 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1309 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1312 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1313 kshm_ips[LNET_MAX_INTERFACES]));
1315 /* setup the socket AFTER I've received hello (it disables
1316 * SO_LINGER). I might call back to the acceptor who may want
1317 * to send a protocol version response and then close the
1318 * socket; this ensures the socket only tears down after the
1319 * response has been sent. */
1321 rc = ksocknal_lib_setup_sock(sock);
1323 write_lock_bh(global_lock);
1325 /* NB my callbacks block while I hold ksnd_global_lock */
1326 ksocknal_lib_set_callback(sock, conn);
1329 peer_ni->ksnp_accepting--;
1331 write_unlock_bh(global_lock);
1334 write_lock_bh(global_lock);
1335 if (!conn->ksnc_closing) {
1336 /* could be closed by another thread */
1337 ksocknal_close_conn_locked(conn, rc);
1339 write_unlock_bh(global_lock);
1340 } else if (ksocknal_connsock_addref(conn) == 0) {
1341 /* Allow I/O to proceed. */
1342 ksocknal_read_callback(conn);
1343 ksocknal_write_callback(conn);
1344 ksocknal_connsock_decref(conn);
1347 ksocknal_connsock_decref(conn);
1348 ksocknal_conn_decref(conn);
1352 if (!peer_ni->ksnp_closing &&
1353 list_empty(&peer_ni->ksnp_conns) &&
1354 list_empty(&peer_ni->ksnp_routes)) {
1355 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1356 list_del_init(&peer_ni->ksnp_tx_queue);
1357 ksocknal_unlink_peer_locked(peer_ni);
1360 write_unlock_bh(global_lock);
1364 CERROR("Not creating conn %s type %d: %s\n",
1365 libcfs_id2str(peerid), conn->ksnc_type, warn);
1367 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1368 libcfs_id2str(peerid), conn->ksnc_type, warn);
1373 /* Request retry by replying with CONN_NONE
1374 * ksnc_proto has been set already */
1375 conn->ksnc_type = SOCKLND_CONN_NONE;
1376 hello->kshm_nips = 0;
1377 ksocknal_send_hello(ni, conn, peerid.nid, hello);
1380 write_lock_bh(global_lock);
1381 peer_ni->ksnp_accepting--;
1382 write_unlock_bh(global_lock);
1385 ksocknal_txlist_done(ni, &zombies, 1);
1386 ksocknal_peer_decref(peer_ni);
1390 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1391 kshm_ips[LNET_MAX_INTERFACES]));
1393 LIBCFS_FREE(conn, sizeof(*conn));
1401 ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1403 /* This just does the immmediate housekeeping, and queues the
1404 * connection for the reaper to terminate.
1405 * Caller holds ksnd_global_lock exclusively in irq context */
1406 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1407 ksock_route_t *route;
1408 ksock_conn_t *conn2;
1409 struct list_head *tmp;
1411 LASSERT(peer_ni->ksnp_error == 0);
1412 LASSERT(!conn->ksnc_closing);
1413 conn->ksnc_closing = 1;
1415 /* ksnd_deathrow_conns takes over peer_ni's ref */
1416 list_del(&conn->ksnc_list);
1418 route = conn->ksnc_route;
1419 if (route != NULL) {
1420 /* dissociate conn from route... */
1421 LASSERT(!route->ksnr_deleted);
1422 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1425 list_for_each(tmp, &peer_ni->ksnp_conns) {
1426 conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1428 if (conn2->ksnc_route == route &&
1429 conn2->ksnc_type == conn->ksnc_type)
1435 route->ksnr_connected &= ~(1 << conn->ksnc_type);
1437 conn->ksnc_route = NULL;
1439 ksocknal_route_decref(route); /* drop conn's ref on route */
1442 if (list_empty(&peer_ni->ksnp_conns)) {
1443 /* No more connections to this peer_ni */
1445 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1448 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1450 /* throw them to the last connection...,
1451 * these TXs will be send to /dev/null by scheduler */
1452 list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1454 ksocknal_tx_prep(conn, tx);
1456 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1457 list_splice_init(&peer_ni->ksnp_tx_queue,
1458 &conn->ksnc_tx_queue);
1459 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1462 /* renegotiate protocol version */
1463 peer_ni->ksnp_proto = NULL;
1464 /* stash last conn close reason */
1465 peer_ni->ksnp_error = error;
1467 if (list_empty(&peer_ni->ksnp_routes)) {
1468 /* I've just closed last conn belonging to a
1469 * peer_ni with no routes to it */
1470 ksocknal_unlink_peer_locked(peer_ni);
1474 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1476 list_add_tail(&conn->ksnc_list,
1477 &ksocknal_data.ksnd_deathrow_conns);
1478 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1480 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1484 ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
1487 cfs_time_t last_alive = 0;
1489 /* There has been a connection failure or comms error; but I'll only
1490 * tell LNET I think the peer_ni is dead if it's to another kernel and
1491 * there are no connections or connection attempts in existence. */
1493 read_lock(&ksocknal_data.ksnd_global_lock);
1495 if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1496 list_empty(&peer_ni->ksnp_conns) &&
1497 peer_ni->ksnp_accepting == 0 &&
1498 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1500 last_alive = peer_ni->ksnp_last_alive;
1503 read_unlock(&ksocknal_data.ksnd_global_lock);
1506 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
1511 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1513 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1516 struct list_head zlist = LIST_HEAD_INIT(zlist);
1518 /* NB safe to finalize TXs because closing of socket will
1519 * abort all buffered data */
1520 LASSERT(conn->ksnc_sock == NULL);
1522 spin_lock(&peer_ni->ksnp_lock);
1524 list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1525 if (tx->tx_conn != conn)
1528 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1530 tx->tx_msg.ksm_zc_cookies[0] = 0;
1531 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1532 list_del(&tx->tx_zc_list);
1533 list_add(&tx->tx_zc_list, &zlist);
1536 spin_unlock(&peer_ni->ksnp_lock);
1538 while (!list_empty(&zlist)) {
1539 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1541 list_del(&tx->tx_zc_list);
1542 ksocknal_tx_decref(tx);
1547 ksocknal_terminate_conn(ksock_conn_t *conn)
1549 /* This gets called by the reaper (guaranteed thread context) to
1550 * disengage the socket from its callbacks and close it.
1551 * ksnc_refcount will eventually hit zero, and then the reaper will
1553 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1554 ksock_sched_t *sched = conn->ksnc_scheduler;
1557 LASSERT(conn->ksnc_closing);
1559 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1560 spin_lock_bh(&sched->kss_lock);
1562 /* a closing conn is always ready to tx */
1563 conn->ksnc_tx_ready = 1;
1565 if (!conn->ksnc_tx_scheduled &&
1566 !list_empty(&conn->ksnc_tx_queue)) {
1567 list_add_tail(&conn->ksnc_tx_list,
1568 &sched->kss_tx_conns);
1569 conn->ksnc_tx_scheduled = 1;
1570 /* extra ref for scheduler */
1571 ksocknal_conn_addref(conn);
1573 wake_up (&sched->kss_waitq);
1576 spin_unlock_bh(&sched->kss_lock);
1578 /* serialise with callbacks */
1579 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1581 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1583 /* OK, so this conn may not be completely disengaged from its
1584 * scheduler yet, but it _has_ committed to terminate... */
1585 conn->ksnc_scheduler->kss_nconns--;
1587 if (peer_ni->ksnp_error != 0) {
1588 /* peer_ni's last conn closed in error */
1589 LASSERT(list_empty(&peer_ni->ksnp_conns));
1591 peer_ni->ksnp_error = 0; /* avoid multiple notifications */
1594 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1597 ksocknal_peer_failed(peer_ni);
1599 /* The socket is closed on the final put; either here, or in
1600 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1601 * when the connection was established, this will close the socket
1602 * immediately, aborting anything buffered in it. Any hung
1603 * zero-copy transmits will therefore complete in finite time. */
1604 ksocknal_connsock_decref(conn);
1608 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1610 /* Queue the conn for the reaper to destroy */
1612 LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1613 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1615 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1616 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1618 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1622 ksocknal_destroy_conn (ksock_conn_t *conn)
1624 cfs_time_t last_rcv;
1626 /* Final coup-de-grace of the reaper */
1627 CDEBUG (D_NET, "connection %p\n", conn);
1629 LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1630 LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1631 LASSERT (conn->ksnc_sock == NULL);
1632 LASSERT (conn->ksnc_route == NULL);
1633 LASSERT (!conn->ksnc_tx_scheduled);
1634 LASSERT (!conn->ksnc_rx_scheduled);
1635 LASSERT(list_empty(&conn->ksnc_tx_queue));
1637 /* complete current receive if any */
1638 switch (conn->ksnc_rx_state) {
1639 case SOCKNAL_RX_LNET_PAYLOAD:
1640 last_rcv = conn->ksnc_rx_deadline -
1641 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1642 CERROR("Completing partial receive from %s[%d], "
1643 "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1644 "last alive is %ld secs ago\n",
1645 libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1646 &conn->ksnc_ipaddr, conn->ksnc_port,
1647 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1648 cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(),
1650 lnet_finalize (conn->ksnc_peer->ksnp_ni,
1651 conn->ksnc_cookie, -EIO);
1653 case SOCKNAL_RX_LNET_HEADER:
1654 if (conn->ksnc_rx_started)
1655 CERROR("Incomplete receive of lnet header from %s, "
1656 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1657 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1658 &conn->ksnc_ipaddr, conn->ksnc_port,
1659 conn->ksnc_proto->pro_version);
1661 case SOCKNAL_RX_KSM_HEADER:
1662 if (conn->ksnc_rx_started)
1663 CERROR("Incomplete receive of ksock message from %s, "
1664 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1665 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1666 &conn->ksnc_ipaddr, conn->ksnc_port,
1667 conn->ksnc_proto->pro_version);
1669 case SOCKNAL_RX_SLOP:
1670 if (conn->ksnc_rx_started)
1671 CERROR("Incomplete receive of slops from %s, "
1672 "ip %pI4h:%d, with error\n",
1673 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1674 &conn->ksnc_ipaddr, conn->ksnc_port);
1681 ksocknal_peer_decref(conn->ksnc_peer);
1683 LIBCFS_FREE (conn, sizeof (*conn));
1687 ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why)
1690 struct list_head *ctmp;
1691 struct list_head *cnxt;
1694 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1695 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1698 conn->ksnc_ipaddr == ipaddr) {
1700 ksocknal_close_conn_locked (conn, why);
1708 ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1710 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1711 __u32 ipaddr = conn->ksnc_ipaddr;
1714 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1716 count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1718 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1724 ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
1726 ksock_peer_ni_t *peer_ni;
1727 struct list_head *ptmp;
1728 struct list_head *pnxt;
1734 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1736 if (id.nid != LNET_NID_ANY)
1737 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1740 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1743 for (i = lo; i <= hi; i++) {
1744 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1746 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
1748 if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1749 (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1752 count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1756 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1758 /* wildcards always succeed */
1759 if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1762 return (count == 0 ? -ENOENT : 0);
1766 ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
1768 /* The router is telling me she's been notified of a change in
1769 * gateway state.... */
1770 lnet_process_id_t id = {0};
1773 id.pid = LNET_PID_ANY;
1775 CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1776 alive ? "up" : "down");
1779 /* If the gateway crashed, close all open connections... */
1780 ksocknal_close_matching_conns (id, 0);
1784 /* ...otherwise do nothing. We can only establish new connections
1785 * if we have autroutes, and these connect on demand. */
1789 ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1792 time64_t last_alive = 0;
1793 time64_t now = ktime_get_real_seconds();
1794 ksock_peer_ni_t *peer_ni = NULL;
1795 rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1796 lnet_process_id_t id = {
1798 .pid = LNET_PID_LUSTRE,
1803 peer_ni = ksocknal_find_peer_locked(ni, id);
1804 if (peer_ni != NULL) {
1805 struct list_head *tmp;
1809 list_for_each(tmp, &peer_ni->ksnp_conns) {
1810 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1811 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1813 if (bufnob < conn->ksnc_tx_bufnob) {
1814 /* something got ACKed */
1815 conn->ksnc_tx_deadline =
1816 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1817 peer_ni->ksnp_last_alive = now;
1818 conn->ksnc_tx_bufnob = bufnob;
1822 last_alive = peer_ni->ksnp_last_alive;
1823 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1829 if (last_alive != 0)
1832 CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago, connect %d\n",
1833 libcfs_nid2str(nid), peer_ni,
1834 last_alive ? cfs_duration_sec(now - last_alive) : -1,
1840 ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1842 write_lock_bh(glock);
1844 peer_ni = ksocknal_find_peer_locked(ni, id);
1845 if (peer_ni != NULL)
1846 ksocknal_launch_all_connections_locked(peer_ni);
1848 write_unlock_bh(glock);
1853 ksocknal_push_peer (ksock_peer_ni_t *peer_ni)
1857 struct list_head *tmp;
1860 for (index = 0; ; index++) {
1861 read_lock(&ksocknal_data.ksnd_global_lock);
1866 list_for_each(tmp, &peer_ni->ksnp_conns) {
1868 conn = list_entry(tmp, ksock_conn_t,
1870 ksocknal_conn_addref(conn);
1875 read_unlock(&ksocknal_data.ksnd_global_lock);
1880 ksocknal_lib_push_conn (conn);
1881 ksocknal_conn_decref(conn);
1886 ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
1888 struct list_head *start;
1889 struct list_head *end;
1890 struct list_head *tmp;
1892 unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
1894 if (id.nid == LNET_NID_ANY) {
1895 start = &ksocknal_data.ksnd_peers[0];
1896 end = &ksocknal_data.ksnd_peers[hsize - 1];
1898 start = end = ksocknal_nid2peerlist(id.nid);
1901 for (tmp = start; tmp <= end; tmp++) {
1902 int peer_off; /* searching offset in peer_ni hash table */
1904 for (peer_off = 0; ; peer_off++) {
1905 ksock_peer_ni_t *peer_ni;
1908 read_lock(&ksocknal_data.ksnd_global_lock);
1909 list_for_each_entry(peer_ni, tmp, ksnp_list) {
1910 if (!((id.nid == LNET_NID_ANY ||
1911 id.nid == peer_ni->ksnp_id.nid) &&
1912 (id.pid == LNET_PID_ANY ||
1913 id.pid == peer_ni->ksnp_id.pid)))
1916 if (i++ == peer_off) {
1917 ksocknal_peer_addref(peer_ni);
1921 read_unlock(&ksocknal_data.ksnd_global_lock);
1923 if (i == 0) /* no match */
1927 ksocknal_push_peer(peer_ni);
1928 ksocknal_peer_decref(peer_ni);
1935 ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1937 ksock_net_t *net = ni->ni_data;
1938 ksock_interface_t *iface;
1942 struct list_head *ptmp;
1943 ksock_peer_ni_t *peer_ni;
1944 struct list_head *rtmp;
1945 ksock_route_t *route;
1947 if (ipaddress == 0 ||
1951 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1953 iface = ksocknal_ip2iface(ni, ipaddress);
1954 if (iface != NULL) {
1955 /* silently ignore dups */
1957 } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1960 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1962 iface->ksni_ipaddr = ipaddress;
1963 iface->ksni_netmask = netmask;
1964 iface->ksni_nroutes = 0;
1965 iface->ksni_npeers = 0;
1967 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1968 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1969 peer_ni = list_entry(ptmp, ksock_peer_ni_t,
1972 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1973 if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1974 iface->ksni_npeers++;
1976 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1977 route = list_entry(rtmp,
1981 if (route->ksnr_myipaddr == ipaddress)
1982 iface->ksni_nroutes++;
1988 /* NB only new connections will pay attention to the new interface! */
1991 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1997 ksocknal_peer_del_interface_locked(ksock_peer_ni_t *peer_ni, __u32 ipaddr)
1999 struct list_head *tmp;
2000 struct list_head *nxt;
2001 ksock_route_t *route;
2006 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2007 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2008 for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2009 peer_ni->ksnp_passive_ips[j-1] =
2010 peer_ni->ksnp_passive_ips[j];
2011 peer_ni->ksnp_n_passive_ips--;
2015 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2016 route = list_entry(tmp, ksock_route_t, ksnr_list);
2018 if (route->ksnr_myipaddr != ipaddr)
2021 if (route->ksnr_share_count != 0) {
2022 /* Manually created; keep, but unbind */
2023 route->ksnr_myipaddr = 0;
2025 ksocknal_del_route_locked(route);
2029 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2030 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2032 if (conn->ksnc_myipaddr == ipaddr)
2033 ksocknal_close_conn_locked (conn, 0);
2038 ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2040 ksock_net_t *net = ni->ni_data;
2042 struct list_head *tmp;
2043 struct list_head *nxt;
2044 ksock_peer_ni_t *peer_ni;
2049 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2051 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2052 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2054 if (!(ipaddress == 0 ||
2055 ipaddress == this_ip))
2060 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2061 net->ksnn_interfaces[j-1] =
2062 net->ksnn_interfaces[j];
2064 net->ksnn_ninterfaces--;
2066 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2067 list_for_each_safe(tmp, nxt,
2068 &ksocknal_data.ksnd_peers[j]) {
2069 peer_ni = list_entry(tmp, ksock_peer_ni_t,
2072 if (peer_ni->ksnp_ni != ni)
2075 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2080 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2086 ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2088 lnet_process_id_t id = {0};
2089 struct libcfs_ioctl_data *data = arg;
2093 case IOC_LIBCFS_GET_INTERFACE: {
2094 ksock_net_t *net = ni->ni_data;
2095 ksock_interface_t *iface;
2097 read_lock(&ksocknal_data.ksnd_global_lock);
2099 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2103 iface = &net->ksnn_interfaces[data->ioc_count];
2105 data->ioc_u32[0] = iface->ksni_ipaddr;
2106 data->ioc_u32[1] = iface->ksni_netmask;
2107 data->ioc_u32[2] = iface->ksni_npeers;
2108 data->ioc_u32[3] = iface->ksni_nroutes;
2111 read_unlock(&ksocknal_data.ksnd_global_lock);
2115 case IOC_LIBCFS_ADD_INTERFACE:
2116 return ksocknal_add_interface(ni,
2117 data->ioc_u32[0], /* IP address */
2118 data->ioc_u32[1]); /* net mask */
2120 case IOC_LIBCFS_DEL_INTERFACE:
2121 return ksocknal_del_interface(ni,
2122 data->ioc_u32[0]); /* IP address */
2124 case IOC_LIBCFS_GET_PEER: {
2129 int share_count = 0;
2131 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2132 &id, &myip, &ip, &port,
2133 &conn_count, &share_count);
2137 data->ioc_nid = id.nid;
2138 data->ioc_count = share_count;
2139 data->ioc_u32[0] = ip;
2140 data->ioc_u32[1] = port;
2141 data->ioc_u32[2] = myip;
2142 data->ioc_u32[3] = conn_count;
2143 data->ioc_u32[4] = id.pid;
2147 case IOC_LIBCFS_ADD_PEER:
2148 id.nid = data->ioc_nid;
2149 id.pid = LNET_PID_LUSTRE;
2150 return ksocknal_add_peer (ni, id,
2151 data->ioc_u32[0], /* IP */
2152 data->ioc_u32[1]); /* port */
2154 case IOC_LIBCFS_DEL_PEER:
2155 id.nid = data->ioc_nid;
2156 id.pid = LNET_PID_ANY;
2157 return ksocknal_del_peer (ni, id,
2158 data->ioc_u32[0]); /* IP */
2160 case IOC_LIBCFS_GET_CONN: {
2164 ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2169 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2171 data->ioc_count = txmem;
2172 data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
2173 data->ioc_flags = nagle;
2174 data->ioc_u32[0] = conn->ksnc_ipaddr;
2175 data->ioc_u32[1] = conn->ksnc_port;
2176 data->ioc_u32[2] = conn->ksnc_myipaddr;
2177 data->ioc_u32[3] = conn->ksnc_type;
2178 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2179 data->ioc_u32[5] = rxmem;
2180 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2181 ksocknal_conn_decref(conn);
2185 case IOC_LIBCFS_CLOSE_CONNECTION:
2186 id.nid = data->ioc_nid;
2187 id.pid = LNET_PID_ANY;
2188 return ksocknal_close_matching_conns (id,
2191 case IOC_LIBCFS_REGISTER_MYNID:
2192 /* Ignore if this is a noop */
2193 if (data->ioc_nid == ni->ni_nid)
2196 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2197 libcfs_nid2str(data->ioc_nid),
2198 libcfs_nid2str(ni->ni_nid));
2201 case IOC_LIBCFS_PUSH_CONNECTION:
2202 id.nid = data->ioc_nid;
2203 id.pid = LNET_PID_ANY;
2204 return ksocknal_push(ni, id);
2213 ksocknal_free_buffers (void)
2215 LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2217 if (ksocknal_data.ksnd_sched_info != NULL) {
2218 struct ksock_sched_info *info;
2221 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2222 if (info->ksi_scheds != NULL) {
2223 LIBCFS_FREE(info->ksi_scheds,
2224 info->ksi_nthreads_max *
2225 sizeof(info->ksi_scheds[0]));
2228 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2231 LIBCFS_FREE (ksocknal_data.ksnd_peers,
2232 sizeof(struct list_head) *
2233 ksocknal_data.ksnd_peer_hash_size);
2235 spin_lock(&ksocknal_data.ksnd_tx_lock);
2237 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2238 struct list_head zlist;
2241 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2242 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2243 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2245 while (!list_empty(&zlist)) {
2246 tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2247 list_del(&tx->tx_list);
2248 LIBCFS_FREE(tx, tx->tx_desc_size);
2251 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2256 ksocknal_base_shutdown(void)
2258 struct ksock_sched_info *info;
2259 ksock_sched_t *sched;
2263 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2264 atomic_read (&libcfs_kmemory));
2265 LASSERT (ksocknal_data.ksnd_nnets == 0);
2267 switch (ksocknal_data.ksnd_init) {
2271 case SOCKNAL_INIT_ALL:
2272 case SOCKNAL_INIT_DATA:
2273 LASSERT (ksocknal_data.ksnd_peers != NULL);
2274 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2275 LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2278 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2279 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2280 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2281 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2282 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2284 if (ksocknal_data.ksnd_sched_info != NULL) {
2285 cfs_percpt_for_each(info, i,
2286 ksocknal_data.ksnd_sched_info) {
2287 if (info->ksi_scheds == NULL)
2290 for (j = 0; j < info->ksi_nthreads_max; j++) {
2292 sched = &info->ksi_scheds[j];
2293 LASSERT(list_empty(&sched->\
2295 LASSERT(list_empty(&sched->\
2297 LASSERT(list_empty(&sched-> \
2298 kss_zombie_noop_txs));
2299 LASSERT(sched->kss_nconns == 0);
2304 /* flag threads to terminate; wake and wait for them to die */
2305 ksocknal_data.ksnd_shuttingdown = 1;
2306 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2307 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2309 if (ksocknal_data.ksnd_sched_info != NULL) {
2310 cfs_percpt_for_each(info, i,
2311 ksocknal_data.ksnd_sched_info) {
2312 if (info->ksi_scheds == NULL)
2315 for (j = 0; j < info->ksi_nthreads_max; j++) {
2316 sched = &info->ksi_scheds[j];
2317 wake_up_all(&sched->kss_waitq);
2323 read_lock(&ksocknal_data.ksnd_global_lock);
2324 while (ksocknal_data.ksnd_nthreads != 0) {
2327 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2328 "waiting for %d threads to terminate\n",
2329 ksocknal_data.ksnd_nthreads);
2330 read_unlock(&ksocknal_data.ksnd_global_lock);
2331 set_current_state(TASK_UNINTERRUPTIBLE);
2332 schedule_timeout(cfs_time_seconds(1));
2333 read_lock(&ksocknal_data.ksnd_global_lock);
2335 read_unlock(&ksocknal_data.ksnd_global_lock);
2337 ksocknal_free_buffers();
2339 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2343 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2344 atomic_read (&libcfs_kmemory));
2346 module_put(THIS_MODULE);
2349 static __u64 ksocknal_new_incarnation(void)
2353 /* The incarnation number is the time this module loaded and it
2354 * identifies this particular instance of the socknal. Hopefully
2355 * we won't be able to reboot more frequently than 1MHz for the
2356 * forseeable future :) */
2358 do_gettimeofday(&tv);
2360 return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2364 ksocknal_base_startup(void)
2366 struct ksock_sched_info *info;
2370 LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2371 LASSERT (ksocknal_data.ksnd_nnets == 0);
2373 memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2375 ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2376 LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2377 sizeof(struct list_head) *
2378 ksocknal_data.ksnd_peer_hash_size);
2379 if (ksocknal_data.ksnd_peers == NULL)
2382 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2383 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2385 rwlock_init(&ksocknal_data.ksnd_global_lock);
2386 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2388 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2389 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2390 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2391 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2392 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2394 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2395 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2396 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2397 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2399 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2400 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2402 /* NB memset above zeros whole of ksocknal_data */
2404 /* flag lists/ptrs/locks initialised */
2405 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2406 try_module_get(THIS_MODULE);
2408 ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2410 if (ksocknal_data.ksnd_sched_info == NULL)
2413 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2414 ksock_sched_t *sched;
2417 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2418 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2419 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2421 /* max to half of CPUs, assume another half should be
2422 * reserved for upper layer modules */
2423 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2426 info->ksi_nthreads_max = nthrs;
2429 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2430 info->ksi_nthreads_max * sizeof(*sched));
2431 if (info->ksi_scheds == NULL)
2434 for (; nthrs > 0; nthrs--) {
2435 sched = &info->ksi_scheds[nthrs - 1];
2437 sched->kss_info = info;
2438 spin_lock_init(&sched->kss_lock);
2439 INIT_LIST_HEAD(&sched->kss_rx_conns);
2440 INIT_LIST_HEAD(&sched->kss_tx_conns);
2441 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2442 init_waitqueue_head(&sched->kss_waitq);
2446 ksocknal_data.ksnd_connd_starting = 0;
2447 ksocknal_data.ksnd_connd_failed_stamp = 0;
2448 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2449 /* must have at least 2 connds to remain responsive to accepts while
2451 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2452 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2454 if (*ksocknal_tunables.ksnd_nconnds_max <
2455 *ksocknal_tunables.ksnd_nconnds) {
2456 ksocknal_tunables.ksnd_nconnds_max =
2457 ksocknal_tunables.ksnd_nconnds;
2460 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2462 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2463 ksocknal_data.ksnd_connd_starting++;
2464 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2467 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2468 rc = ksocknal_thread_start(ksocknal_connd,
2469 (void *)((uintptr_t)i), name);
2471 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2472 ksocknal_data.ksnd_connd_starting--;
2473 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2474 CERROR("Can't spawn socknal connd: %d\n", rc);
2479 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2481 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2485 /* flag everything initialised */
2486 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2491 ksocknal_base_shutdown();
2496 ksocknal_debug_peerhash (lnet_ni_t *ni)
2498 ksock_peer_ni_t *peer_ni = NULL;
2499 struct list_head *tmp;
2502 read_lock(&ksocknal_data.ksnd_global_lock);
2504 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2505 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2506 peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
2508 if (peer_ni->ksnp_ni == ni) break;
2514 if (peer_ni != NULL) {
2515 ksock_route_t *route;
2518 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2519 "closing %d, accepting %d, err %d, zcookie %llu, "
2520 "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2521 atomic_read(&peer_ni->ksnp_refcount),
2522 peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2523 peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2524 peer_ni->ksnp_zc_next_cookie,
2525 !list_empty(&peer_ni->ksnp_tx_queue),
2526 !list_empty(&peer_ni->ksnp_zc_req_list));
2528 list_for_each(tmp, &peer_ni->ksnp_routes) {
2529 route = list_entry(tmp, ksock_route_t, ksnr_list);
2530 CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2531 "del %d\n", atomic_read(&route->ksnr_refcount),
2532 route->ksnr_scheduled, route->ksnr_connecting,
2533 route->ksnr_connected, route->ksnr_deleted);
2536 list_for_each(tmp, &peer_ni->ksnp_conns) {
2537 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2538 CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2539 atomic_read(&conn->ksnc_conn_refcount),
2540 atomic_read(&conn->ksnc_sock_refcount),
2541 conn->ksnc_type, conn->ksnc_closing);
2545 read_unlock(&ksocknal_data.ksnd_global_lock);
2550 ksocknal_shutdown (lnet_ni_t *ni)
2552 ksock_net_t *net = ni->ni_data;
2554 lnet_process_id_t anyid = {0};
2556 anyid.nid = LNET_NID_ANY;
2557 anyid.pid = LNET_PID_ANY;
2559 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2560 LASSERT(ksocknal_data.ksnd_nnets > 0);
2562 spin_lock_bh(&net->ksnn_lock);
2563 net->ksnn_shutdown = 1; /* prevent new peers */
2564 spin_unlock_bh(&net->ksnn_lock);
2566 /* Delete all peers */
2567 ksocknal_del_peer(ni, anyid, 0);
2569 /* Wait for all peer_ni state to clean up */
2571 spin_lock_bh(&net->ksnn_lock);
2572 while (net->ksnn_npeers != 0) {
2573 spin_unlock_bh(&net->ksnn_lock);
2576 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2577 "waiting for %d peers to disconnect\n",
2579 set_current_state(TASK_UNINTERRUPTIBLE);
2580 schedule_timeout(cfs_time_seconds(1));
2582 ksocknal_debug_peerhash(ni);
2584 spin_lock_bh(&net->ksnn_lock);
2586 spin_unlock_bh(&net->ksnn_lock);
2588 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2589 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2590 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2593 list_del(&net->ksnn_list);
2594 LIBCFS_FREE(net, sizeof(*net));
2596 ksocknal_data.ksnd_nnets--;
2597 if (ksocknal_data.ksnd_nnets == 0)
2598 ksocknal_base_shutdown();
2602 ksocknal_enumerate_interfaces(ksock_net_t *net)
2610 n = lnet_ipif_enumerate(&names);
2612 CERROR("Can't enumerate interfaces: %d\n", n);
2616 for (i = j = 0; i < n; i++) {
2621 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2624 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2626 CWARN("Can't get interface %s info: %d\n",
2632 CWARN("Ignoring interface %s (down)\n",
2637 if (j == LNET_MAX_INTERFACES) {
2638 CWARN("Ignoring interface %s (too many interfaces)\n",
2643 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2644 net->ksnn_interfaces[j].ksni_netmask = mask;
2645 strlcpy(net->ksnn_interfaces[j].ksni_name,
2646 names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2650 lnet_ipif_free_enumeration(names, n);
2653 CERROR("Can't find any usable interfaces\n");
2659 ksocknal_search_new_ipif(ksock_net_t *net)
2664 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2665 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2666 char *colon = strchr(ifnam, ':');
2671 if (colon != NULL) /* ignore alias device */
2674 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2676 for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2677 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2679 char *colon2 = strchr(ifnam2, ':');
2684 found = strcmp(ifnam, ifnam2) == 0;
2701 ksocknal_start_schedulers(struct ksock_sched_info *info)
2707 if (info->ksi_nthreads == 0) {
2708 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2709 nthrs = info->ksi_nthreads_max;
2711 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2713 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2714 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2716 nthrs = min(nthrs, info->ksi_nthreads_max);
2718 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2719 /* increase two threads if there is new interface */
2720 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2723 for (i = 0; i < nthrs; i++) {
2726 ksock_sched_t *sched;
2727 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2728 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2729 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2730 info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2732 rc = ksocknal_thread_start(ksocknal_scheduler,
2737 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2738 info->ksi_cpt, info->ksi_nthreads + i, rc);
2742 info->ksi_nthreads += i;
2747 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2749 int newif = ksocknal_search_new_ipif(net);
2753 if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2756 for (i = 0; i < ncpts; i++) {
2757 struct ksock_sched_info *info;
2758 int cpt = (cpts == NULL) ? i : cpts[i];
2760 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2761 info = ksocknal_data.ksnd_sched_info[cpt];
2763 if (!newif && info->ksi_nthreads > 0)
2766 rc = ksocknal_start_schedulers(info);
2774 ksocknal_startup (lnet_ni_t *ni)
2779 struct net_device *net_dev;
2782 LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2784 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2785 rc = ksocknal_base_startup();
2790 LIBCFS_ALLOC(net, sizeof(*net));
2794 spin_lock_init(&net->ksnn_lock);
2795 net->ksnn_incarnation = ksocknal_new_incarnation();
2797 if (!ni->ni_net->net_tunables_set) {
2798 ni->ni_net->net_tunables.lct_peer_timeout =
2799 *ksocknal_tunables.ksnd_peertimeout;
2800 ni->ni_net->net_tunables.lct_max_tx_credits =
2801 *ksocknal_tunables.ksnd_credits;
2802 ni->ni_net->net_tunables.lct_peer_tx_credits =
2803 *ksocknal_tunables.ksnd_peertxcredits;
2804 ni->ni_net->net_tunables.lct_peer_rtr_credits =
2805 *ksocknal_tunables.ksnd_peerrtrcredits;
2806 ni->ni_net->net_tunables_set = true;
2810 if (ni->ni_interfaces[0] == NULL) {
2811 rc = ksocknal_enumerate_interfaces(net);
2815 net->ksnn_ninterfaces = 1;
2817 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2820 if (ni->ni_interfaces[i] == NULL)
2823 rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2824 &net->ksnn_interfaces[i].ksni_ipaddr,
2825 &net->ksnn_interfaces[i].ksni_netmask);
2828 CERROR("Can't get interface %s info: %d\n",
2829 ni->ni_interfaces[i], rc);
2834 CERROR("Interface %s is down\n",
2835 ni->ni_interfaces[i]);
2839 strlcpy(net->ksnn_interfaces[i].ksni_name,
2840 ni->ni_interfaces[i],
2841 sizeof(net->ksnn_interfaces[i].ksni_name));
2844 net->ksnn_ninterfaces = i;
2847 net_dev = dev_get_by_name(&init_net,
2848 net->ksnn_interfaces[0].ksni_name);
2849 if (net_dev != NULL) {
2850 node_id = dev_to_node(&net_dev->dev);
2851 ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
2854 ni->ni_dev_cpt = CFS_CPT_ANY;
2857 /* call it before add it to ksocknal_data.ksnd_nets */
2858 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2862 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2863 net->ksnn_interfaces[0].ksni_ipaddr);
2864 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2866 ksocknal_data.ksnd_nnets++;
2871 LIBCFS_FREE(net, sizeof(*net));
2873 if (ksocknal_data.ksnd_nnets == 0)
2874 ksocknal_base_shutdown();
2880 static void __exit ksocklnd_exit(void)
2882 lnet_unregister_lnd(&the_ksocklnd);
2885 static int __init ksocklnd_init(void)
2889 /* check ksnr_connected/connecting field large enough */
2890 CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2891 CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2893 /* initialize the_ksocklnd */
2894 the_ksocklnd.lnd_type = SOCKLND;
2895 the_ksocklnd.lnd_startup = ksocknal_startup;
2896 the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2897 the_ksocklnd.lnd_ctl = ksocknal_ctl;
2898 the_ksocklnd.lnd_send = ksocknal_send;
2899 the_ksocklnd.lnd_recv = ksocknal_recv;
2900 the_ksocklnd.lnd_notify = ksocknal_notify;
2901 the_ksocklnd.lnd_query = ksocknal_query;
2902 the_ksocklnd.lnd_accept = ksocknal_accept;
2904 rc = ksocknal_tunables_init();
2908 lnet_register_lnd(&the_ksocklnd);
2913 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2914 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2915 MODULE_VERSION("2.8.0");
2916 MODULE_LICENSE("GPL");
2918 module_init(ksocklnd_init);
2919 module_exit(ksocklnd_exit);