4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/socklnd/socklnd.c
38 * Author: Zach Brown <zab@zabbo.net>
39 * Author: Peter J. Braam <braam@clusterfs.com>
40 * Author: Phil Schwan <phil@clusterfs.com>
41 * Author: Eric Barton <eric@bartonsoftware.com>
47 ksock_nal_data_t ksocknal_data;
50 ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
52 ksock_net_t *net = ni->ni_data;
54 ksock_interface_t *iface;
56 for (i = 0; i < net->ksnn_ninterfaces; i++) {
57 LASSERT(i < LNET_MAX_INTERFACES);
58 iface = &net->ksnn_interfaces[i];
60 if (iface->ksni_ipaddr == ip)
68 ksocknal_create_route (__u32 ipaddr, int port)
72 LIBCFS_ALLOC (route, sizeof (*route));
76 cfs_atomic_set (&route->ksnr_refcount, 1);
77 route->ksnr_peer = NULL;
78 route->ksnr_retry_interval = 0; /* OK to connect at any time */
79 route->ksnr_ipaddr = ipaddr;
80 route->ksnr_port = port;
81 route->ksnr_scheduled = 0;
82 route->ksnr_connecting = 0;
83 route->ksnr_connected = 0;
84 route->ksnr_deleted = 0;
85 route->ksnr_conn_count = 0;
86 route->ksnr_share_count = 0;
92 ksocknal_destroy_route (ksock_route_t *route)
94 LASSERT (cfs_atomic_read(&route->ksnr_refcount) == 0);
96 if (route->ksnr_peer != NULL)
97 ksocknal_peer_decref(route->ksnr_peer);
99 LIBCFS_FREE (route, sizeof (*route));
103 ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
105 ksock_net_t *net = ni->ni_data;
108 LASSERT (id.nid != LNET_NID_ANY);
109 LASSERT (id.pid != LNET_PID_ANY);
110 LASSERT (!cfs_in_interrupt());
112 LIBCFS_ALLOC (peer, sizeof (*peer));
116 memset (peer, 0, sizeof (*peer)); /* NULL pointers/clear flags etc */
120 cfs_atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
121 peer->ksnp_closing = 0;
122 peer->ksnp_accepting = 0;
123 peer->ksnp_proto = NULL;
124 peer->ksnp_last_alive = 0;
125 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
127 CFS_INIT_LIST_HEAD (&peer->ksnp_conns);
128 CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
129 CFS_INIT_LIST_HEAD (&peer->ksnp_tx_queue);
130 CFS_INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
131 spin_lock_init(&peer->ksnp_lock);
133 spin_lock_bh(&net->ksnn_lock);
135 if (net->ksnn_shutdown) {
136 spin_unlock_bh(&net->ksnn_lock);
138 LIBCFS_FREE(peer, sizeof(*peer));
139 CERROR("Can't create peer: network shutdown\n");
145 spin_unlock_bh(&net->ksnn_lock);
152 ksocknal_destroy_peer (ksock_peer_t *peer)
154 ksock_net_t *net = peer->ksnp_ni->ni_data;
156 CDEBUG (D_NET, "peer %s %p deleted\n",
157 libcfs_id2str(peer->ksnp_id), peer);
159 LASSERT (cfs_atomic_read (&peer->ksnp_refcount) == 0);
160 LASSERT (peer->ksnp_accepting == 0);
161 LASSERT (cfs_list_empty (&peer->ksnp_conns));
162 LASSERT (cfs_list_empty (&peer->ksnp_routes));
163 LASSERT (cfs_list_empty (&peer->ksnp_tx_queue));
164 LASSERT (cfs_list_empty (&peer->ksnp_zc_req_list));
166 LIBCFS_FREE (peer, sizeof (*peer));
168 /* NB a peer's connections and routes keep a reference on their peer
169 * until they are destroyed, so we can be assured that _all_ state to
170 * do with this peer has been cleaned up when its refcount drops to
172 spin_lock_bh(&net->ksnn_lock);
174 spin_unlock_bh(&net->ksnn_lock);
178 ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
180 cfs_list_t *peer_list = ksocknal_nid2peerlist(id.nid);
184 cfs_list_for_each (tmp, peer_list) {
186 peer = cfs_list_entry (tmp, ksock_peer_t, ksnp_list);
188 LASSERT (!peer->ksnp_closing);
190 if (peer->ksnp_ni != ni)
193 if (peer->ksnp_id.nid != id.nid ||
194 peer->ksnp_id.pid != id.pid)
197 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
198 peer, libcfs_id2str(id),
199 cfs_atomic_read(&peer->ksnp_refcount));
206 ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
210 read_lock(&ksocknal_data.ksnd_global_lock);
211 peer = ksocknal_find_peer_locked(ni, id);
212 if (peer != NULL) /* +1 ref for caller? */
213 ksocknal_peer_addref(peer);
214 read_unlock(&ksocknal_data.ksnd_global_lock);
220 ksocknal_unlink_peer_locked (ksock_peer_t *peer)
224 ksock_interface_t *iface;
226 for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
227 LASSERT (i < LNET_MAX_INTERFACES);
228 ip = peer->ksnp_passive_ips[i];
230 iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
231 /* All IPs in peer->ksnp_passive_ips[] come from the
232 * interface list, therefore the call must succeed. */
233 LASSERT (iface != NULL);
235 CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
236 peer, iface, iface->ksni_nroutes);
237 iface->ksni_npeers--;
240 LASSERT (cfs_list_empty(&peer->ksnp_conns));
241 LASSERT (cfs_list_empty(&peer->ksnp_routes));
242 LASSERT (!peer->ksnp_closing);
243 peer->ksnp_closing = 1;
244 cfs_list_del (&peer->ksnp_list);
245 /* lose peerlist's ref */
246 ksocknal_peer_decref(peer);
250 ksocknal_get_peer_info (lnet_ni_t *ni, int index,
251 lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
252 int *port, int *conn_count, int *share_count)
256 ksock_route_t *route;
262 read_lock(&ksocknal_data.ksnd_global_lock);
264 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
266 cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
267 peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
269 if (peer->ksnp_ni != ni)
272 if (peer->ksnp_n_passive_ips == 0 &&
273 cfs_list_empty(&peer->ksnp_routes)) {
287 for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
292 *myip = peer->ksnp_passive_ips[j];
301 cfs_list_for_each (rtmp, &peer->ksnp_routes) {
305 route = cfs_list_entry(rtmp, ksock_route_t,
309 *myip = route->ksnr_myipaddr;
310 *peer_ip = route->ksnr_ipaddr;
311 *port = route->ksnr_port;
312 *conn_count = route->ksnr_conn_count;
313 *share_count = route->ksnr_share_count;
320 read_unlock(&ksocknal_data.ksnd_global_lock);
325 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
327 ksock_peer_t *peer = route->ksnr_peer;
328 int type = conn->ksnc_type;
329 ksock_interface_t *iface;
331 conn->ksnc_route = route;
332 ksocknal_route_addref(route);
334 if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
335 if (route->ksnr_myipaddr == 0) {
336 /* route wasn't bound locally yet (the initial route) */
337 CDEBUG(D_NET, "Binding %s %u.%u.%u.%u to %u.%u.%u.%u\n",
338 libcfs_id2str(peer->ksnp_id),
339 HIPQUAD(route->ksnr_ipaddr),
340 HIPQUAD(conn->ksnc_myipaddr));
342 CDEBUG(D_NET, "Rebinding %s %u.%u.%u.%u from "
343 "%u.%u.%u.%u to %u.%u.%u.%u\n",
344 libcfs_id2str(peer->ksnp_id),
345 HIPQUAD(route->ksnr_ipaddr),
346 HIPQUAD(route->ksnr_myipaddr),
347 HIPQUAD(conn->ksnc_myipaddr));
349 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
350 route->ksnr_myipaddr);
352 iface->ksni_nroutes--;
354 route->ksnr_myipaddr = conn->ksnc_myipaddr;
355 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
356 route->ksnr_myipaddr);
358 iface->ksni_nroutes++;
361 route->ksnr_connected |= (1<<type);
362 route->ksnr_conn_count++;
364 /* Successful connection => further attempts can
365 * proceed immediately */
366 route->ksnr_retry_interval = 0;
370 ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
374 ksock_route_t *route2;
376 LASSERT (!peer->ksnp_closing);
377 LASSERT (route->ksnr_peer == NULL);
378 LASSERT (!route->ksnr_scheduled);
379 LASSERT (!route->ksnr_connecting);
380 LASSERT (route->ksnr_connected == 0);
382 /* LASSERT(unique) */
383 cfs_list_for_each(tmp, &peer->ksnp_routes) {
384 route2 = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
386 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
387 CERROR ("Duplicate route %s %u.%u.%u.%u\n",
388 libcfs_id2str(peer->ksnp_id),
389 HIPQUAD(route->ksnr_ipaddr));
394 route->ksnr_peer = peer;
395 ksocknal_peer_addref(peer);
396 /* peer's routelist takes over my ref on 'route' */
397 cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
399 cfs_list_for_each(tmp, &peer->ksnp_conns) {
400 conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
402 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
405 ksocknal_associate_route_conn_locked(route, conn);
406 /* keep going (typed routes) */
411 ksocknal_del_route_locked (ksock_route_t *route)
413 ksock_peer_t *peer = route->ksnr_peer;
414 ksock_interface_t *iface;
419 LASSERT (!route->ksnr_deleted);
421 /* Close associated conns */
422 cfs_list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
423 conn = cfs_list_entry(ctmp, ksock_conn_t, ksnc_list);
425 if (conn->ksnc_route != route)
428 ksocknal_close_conn_locked (conn, 0);
431 if (route->ksnr_myipaddr != 0) {
432 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
433 route->ksnr_myipaddr);
435 iface->ksni_nroutes--;
438 route->ksnr_deleted = 1;
439 cfs_list_del (&route->ksnr_list);
440 ksocknal_route_decref(route); /* drop peer's ref */
442 if (cfs_list_empty (&peer->ksnp_routes) &&
443 cfs_list_empty (&peer->ksnp_conns)) {
444 /* I've just removed the last route to a peer with no active
446 ksocknal_unlink_peer_locked (peer);
451 ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
456 ksock_route_t *route;
457 ksock_route_t *route2;
460 if (id.nid == LNET_NID_ANY ||
461 id.pid == LNET_PID_ANY)
464 /* Have a brand new peer ready... */
465 rc = ksocknal_create_peer(&peer, ni, id);
469 route = ksocknal_create_route (ipaddr, port);
471 ksocknal_peer_decref(peer);
475 write_lock_bh(&ksocknal_data.ksnd_global_lock);
477 /* always called with a ref on ni, so shutdown can't have started */
478 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
480 peer2 = ksocknal_find_peer_locked (ni, id);
482 ksocknal_peer_decref(peer);
485 /* peer table takes my ref on peer */
486 cfs_list_add_tail (&peer->ksnp_list,
487 ksocknal_nid2peerlist (id.nid));
491 cfs_list_for_each (tmp, &peer->ksnp_routes) {
492 route2 = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
494 if (route2->ksnr_ipaddr == ipaddr)
499 if (route2 == NULL) {
500 ksocknal_add_route_locked(peer, route);
501 route->ksnr_share_count++;
503 ksocknal_route_decref(route);
504 route2->ksnr_share_count++;
507 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
513 ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
516 ksock_route_t *route;
521 LASSERT (!peer->ksnp_closing);
523 /* Extra ref prevents peer disappearing until I'm done with it */
524 ksocknal_peer_addref(peer);
526 cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
527 route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
530 if (!(ip == 0 || route->ksnr_ipaddr == ip))
533 route->ksnr_share_count = 0;
534 /* This deletes associated conns too */
535 ksocknal_del_route_locked (route);
539 cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
540 route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
541 nshared += route->ksnr_share_count;
545 /* remove everything else if there are no explicit entries
548 cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
549 route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
551 /* we should only be removing auto-entries */
552 LASSERT(route->ksnr_share_count == 0);
553 ksocknal_del_route_locked (route);
556 cfs_list_for_each_safe (tmp, nxt, &peer->ksnp_conns) {
557 conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
559 ksocknal_close_conn_locked(conn, 0);
563 ksocknal_peer_decref(peer);
564 /* NB peer unlinks itself when last conn/route is removed */
568 ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
570 CFS_LIST_HEAD (zombies);
579 write_lock_bh(&ksocknal_data.ksnd_global_lock);
581 if (id.nid != LNET_NID_ANY)
582 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
585 hi = ksocknal_data.ksnd_peer_hash_size - 1;
588 for (i = lo; i <= hi; i++) {
589 cfs_list_for_each_safe (ptmp, pnxt,
590 &ksocknal_data.ksnd_peers[i]) {
591 peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
593 if (peer->ksnp_ni != ni)
596 if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
597 (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
600 ksocknal_peer_addref(peer); /* a ref for me... */
602 ksocknal_del_peer_locked (peer, ip);
604 if (peer->ksnp_closing &&
605 !cfs_list_empty(&peer->ksnp_tx_queue)) {
606 LASSERT (cfs_list_empty(&peer->ksnp_conns));
607 LASSERT (cfs_list_empty(&peer->ksnp_routes));
609 cfs_list_splice_init(&peer->ksnp_tx_queue,
613 ksocknal_peer_decref(peer); /* ...till here */
615 rc = 0; /* matched! */
619 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
621 ksocknal_txlist_done(ni, &zombies, 1);
627 ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
635 read_lock(&ksocknal_data.ksnd_global_lock);
637 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
638 cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
639 peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
641 LASSERT (!peer->ksnp_closing);
643 if (peer->ksnp_ni != ni)
646 cfs_list_for_each (ctmp, &peer->ksnp_conns) {
650 conn = cfs_list_entry (ctmp, ksock_conn_t,
652 ksocknal_conn_addref(conn);
653 read_unlock(&ksocknal_data. \
660 read_unlock(&ksocknal_data.ksnd_global_lock);
665 ksocknal_choose_scheduler_locked(unsigned int cpt)
667 struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
668 ksock_sched_t *sched;
671 LASSERT(info->ksi_nthreads > 0);
673 sched = &info->ksi_scheds[0];
675 * NB: it's safe so far, but info->ksi_nthreads could be changed
676 * at runtime when we have dynamic LNet configuration, then we
677 * need to take care of this.
679 for (i = 1; i < info->ksi_nthreads; i++) {
680 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
681 sched = &info->ksi_scheds[i];
688 ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
690 ksock_net_t *net = ni->ni_data;
694 read_lock(&ksocknal_data.ksnd_global_lock);
696 nip = net->ksnn_ninterfaces;
697 LASSERT (nip <= LNET_MAX_INTERFACES);
699 /* Only offer interfaces for additional connections if I have
702 read_unlock(&ksocknal_data.ksnd_global_lock);
706 for (i = 0; i < nip; i++) {
707 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
708 LASSERT (ipaddrs[i] != 0);
711 read_unlock(&ksocknal_data.ksnd_global_lock);
716 ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
718 int best_netmatch = 0;
725 for (i = 0; i < nips; i++) {
729 this_xor = (ips[i] ^ iface->ksni_ipaddr);
730 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
733 best_netmatch < this_netmatch ||
734 (best_netmatch == this_netmatch &&
735 best_xor > this_xor)))
739 best_netmatch = this_netmatch;
748 ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
750 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
751 ksock_net_t *net = peer->ksnp_ni->ni_data;
752 ksock_interface_t *iface;
753 ksock_interface_t *best_iface;
764 /* CAVEAT EMPTOR: We do all our interface matching with an
765 * exclusive hold of global lock at IRQ priority. We're only
766 * expecting to be dealing with small numbers of interfaces, so the
767 * O(n**3)-ness shouldn't matter */
769 /* Also note that I'm not going to return more than n_peerips
770 * interfaces, even if I have more myself */
772 write_lock_bh(global_lock);
774 LASSERT (n_peerips <= LNET_MAX_INTERFACES);
775 LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
777 /* Only match interfaces for additional connections
778 * if I have > 1 interface */
779 n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
780 MIN(n_peerips, net->ksnn_ninterfaces);
782 for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
783 /* ^ yes really... */
785 /* If we have any new interfaces, first tick off all the
786 * peer IPs that match old interfaces, then choose new
787 * interfaces to match the remaining peer IPS.
788 * We don't forget interfaces we've stopped using; we might
789 * start using them again... */
791 if (i < peer->ksnp_n_passive_ips) {
793 ip = peer->ksnp_passive_ips[i];
794 best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
796 /* peer passive ips are kept up to date */
797 LASSERT(best_iface != NULL);
799 /* choose a new interface */
800 LASSERT (i == peer->ksnp_n_passive_ips);
806 for (j = 0; j < net->ksnn_ninterfaces; j++) {
807 iface = &net->ksnn_interfaces[j];
808 ip = iface->ksni_ipaddr;
810 for (k = 0; k < peer->ksnp_n_passive_ips; k++)
811 if (peer->ksnp_passive_ips[k] == ip)
814 if (k < peer->ksnp_n_passive_ips) /* using it already */
817 k = ksocknal_match_peerip(iface, peerips, n_peerips);
818 xor = (ip ^ peerips[k]);
819 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
821 if (!(best_iface == NULL ||
822 best_netmatch < this_netmatch ||
823 (best_netmatch == this_netmatch &&
824 best_npeers > iface->ksni_npeers)))
828 best_netmatch = this_netmatch;
829 best_npeers = iface->ksni_npeers;
832 best_iface->ksni_npeers++;
833 ip = best_iface->ksni_ipaddr;
834 peer->ksnp_passive_ips[i] = ip;
835 peer->ksnp_n_passive_ips = i+1;
838 LASSERT (best_iface != NULL);
840 /* mark the best matching peer IP used */
841 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
845 /* Overwrite input peer IP addresses */
846 memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
848 write_unlock_bh(global_lock);
854 ksocknal_create_routes(ksock_peer_t *peer, int port,
855 __u32 *peer_ipaddrs, int npeer_ipaddrs)
857 ksock_route_t *newroute = NULL;
858 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
859 lnet_ni_t *ni = peer->ksnp_ni;
860 ksock_net_t *net = ni->ni_data;
862 ksock_route_t *route;
863 ksock_interface_t *iface;
864 ksock_interface_t *best_iface;
871 /* CAVEAT EMPTOR: We do all our interface matching with an
872 * exclusive hold of global lock at IRQ priority. We're only
873 * expecting to be dealing with small numbers of interfaces, so the
874 * O(n**3)-ness here shouldn't matter */
876 write_lock_bh(global_lock);
878 if (net->ksnn_ninterfaces < 2) {
879 /* Only create additional connections
880 * if I have > 1 interface */
881 write_unlock_bh(global_lock);
885 LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
887 for (i = 0; i < npeer_ipaddrs; i++) {
888 if (newroute != NULL) {
889 newroute->ksnr_ipaddr = peer_ipaddrs[i];
891 write_unlock_bh(global_lock);
893 newroute = ksocknal_create_route(peer_ipaddrs[i], port);
894 if (newroute == NULL)
897 write_lock_bh(global_lock);
900 if (peer->ksnp_closing) {
901 /* peer got closed under me */
905 /* Already got a route? */
907 cfs_list_for_each(rtmp, &peer->ksnp_routes) {
908 route = cfs_list_entry(rtmp, ksock_route_t, ksnr_list);
910 if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
922 LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
924 /* Select interface to connect from */
925 for (j = 0; j < net->ksnn_ninterfaces; j++) {
926 iface = &net->ksnn_interfaces[j];
928 /* Using this interface already? */
929 cfs_list_for_each(rtmp, &peer->ksnp_routes) {
930 route = cfs_list_entry(rtmp, ksock_route_t,
933 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
941 this_netmatch = (((iface->ksni_ipaddr ^
942 newroute->ksnr_ipaddr) &
943 iface->ksni_netmask) == 0) ? 1 : 0;
945 if (!(best_iface == NULL ||
946 best_netmatch < this_netmatch ||
947 (best_netmatch == this_netmatch &&
948 best_nroutes > iface->ksni_nroutes)))
952 best_netmatch = this_netmatch;
953 best_nroutes = iface->ksni_nroutes;
956 if (best_iface == NULL)
959 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
960 best_iface->ksni_nroutes++;
962 ksocknal_add_route_locked(peer, newroute);
966 write_unlock_bh(global_lock);
967 if (newroute != NULL)
968 ksocknal_route_decref(newroute);
972 ksocknal_accept (lnet_ni_t *ni, cfs_socket_t *sock)
979 rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
980 LASSERT (rc == 0); /* we succeeded before */
982 LIBCFS_ALLOC(cr, sizeof(*cr));
984 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
985 "%u.%u.%u.%u: memory exhausted\n",
992 cr->ksncr_sock = sock;
994 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
996 cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
997 cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
999 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1004 ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
1006 ksock_route_t *route;
1008 cfs_list_for_each_entry_typed (route, &peer->ksnp_routes,
1009 ksock_route_t, ksnr_list) {
1011 if (route->ksnr_ipaddr == ipaddr)
1012 return route->ksnr_connecting;
1018 ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
1019 cfs_socket_t *sock, int type)
1021 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1022 CFS_LIST_HEAD (zombies);
1023 lnet_process_id_t peerid;
1027 ksock_conn_t *conn2;
1028 ksock_peer_t *peer = NULL;
1029 ksock_peer_t *peer2;
1030 ksock_sched_t *sched;
1031 ksock_hello_msg_t *hello;
1039 active = (route != NULL);
1041 LASSERT (active == (type != SOCKLND_CONN_NONE));
1043 LIBCFS_ALLOC(conn, sizeof(*conn));
1049 memset (conn, 0, sizeof (*conn));
1051 conn->ksnc_peer = NULL;
1052 conn->ksnc_route = NULL;
1053 conn->ksnc_sock = sock;
1054 /* 2 ref, 1 for conn, another extra ref prevents socket
1055 * being closed before establishment of connection */
1056 cfs_atomic_set (&conn->ksnc_sock_refcount, 2);
1057 conn->ksnc_type = type;
1058 ksocknal_lib_save_callback(sock, conn);
1059 cfs_atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1061 conn->ksnc_rx_ready = 0;
1062 conn->ksnc_rx_scheduled = 0;
1064 CFS_INIT_LIST_HEAD (&conn->ksnc_tx_queue);
1065 conn->ksnc_tx_ready = 0;
1066 conn->ksnc_tx_scheduled = 0;
1067 conn->ksnc_tx_carrier = NULL;
1068 cfs_atomic_set (&conn->ksnc_tx_nob, 0);
1070 LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
1071 kshm_ips[LNET_MAX_INTERFACES]));
1072 if (hello == NULL) {
1077 /* stash conn's local and remote addrs */
1078 rc = ksocknal_lib_get_conn_addrs (conn);
1082 /* Find out/confirm peer's NID and connection type and get the
1083 * vector of interfaces she's willing to let me connect to.
1084 * Passive connections use the listener timeout since the peer sends
1088 peer = route->ksnr_peer;
1089 LASSERT(ni == peer->ksnp_ni);
1091 /* Active connection sends HELLO eagerly */
1092 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1093 peerid = peer->ksnp_id;
1095 write_lock_bh(global_lock);
1096 conn->ksnc_proto = peer->ksnp_proto;
1097 write_unlock_bh(global_lock);
1099 if (conn->ksnc_proto == NULL) {
1100 conn->ksnc_proto = &ksocknal_protocol_v3x;
1101 #if SOCKNAL_VERSION_DEBUG
1102 if (*ksocknal_tunables.ksnd_protocol == 2)
1103 conn->ksnc_proto = &ksocknal_protocol_v2x;
1104 else if (*ksocknal_tunables.ksnd_protocol == 1)
1105 conn->ksnc_proto = &ksocknal_protocol_v1x;
1109 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1113 peerid.nid = LNET_NID_ANY;
1114 peerid.pid = LNET_PID_ANY;
1116 /* Passive, get protocol from peer */
1117 conn->ksnc_proto = NULL;
1120 rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1124 LASSERT (rc == 0 || active);
1125 LASSERT (conn->ksnc_proto != NULL);
1126 LASSERT (peerid.nid != LNET_NID_ANY);
1128 cpt = lnet_cpt_of_nid(peerid.nid);
1131 ksocknal_peer_addref(peer);
1132 write_lock_bh(global_lock);
1134 rc = ksocknal_create_peer(&peer, ni, peerid);
1138 write_lock_bh(global_lock);
1140 /* called with a ref on ni, so shutdown can't have started */
1141 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1143 peer2 = ksocknal_find_peer_locked(ni, peerid);
1144 if (peer2 == NULL) {
1145 /* NB this puts an "empty" peer in the peer
1146 * table (which takes my ref) */
1147 cfs_list_add_tail(&peer->ksnp_list,
1148 ksocknal_nid2peerlist(peerid.nid));
1150 ksocknal_peer_decref(peer);
1155 ksocknal_peer_addref(peer);
1156 peer->ksnp_accepting++;
1158 /* Am I already connecting to this guy? Resolve in
1159 * favour of higher NID... */
1160 if (peerid.nid < ni->ni_nid &&
1161 ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
1163 warn = "connection race resolution";
1168 if (peer->ksnp_closing ||
1169 (active && route->ksnr_deleted)) {
1170 /* peer/route got closed under me */
1172 warn = "peer/route removed";
1176 if (peer->ksnp_proto == NULL) {
1177 /* Never connected before.
1178 * NB recv_hello may have returned EPROTO to signal my peer
1179 * wants a different protocol than the one I asked for.
1181 LASSERT (cfs_list_empty(&peer->ksnp_conns));
1183 peer->ksnp_proto = conn->ksnc_proto;
1184 peer->ksnp_incarnation = incarnation;
1187 if (peer->ksnp_proto != conn->ksnc_proto ||
1188 peer->ksnp_incarnation != incarnation) {
1189 /* Peer rebooted or I've got the wrong protocol version */
1190 ksocknal_close_peer_conns_locked(peer, 0, 0);
1192 peer->ksnp_proto = NULL;
1194 warn = peer->ksnp_incarnation != incarnation ?
1196 "wrong proto version";
1206 warn = "lost conn race";
1209 warn = "retry with different protocol version";
1213 /* Refuse to duplicate an existing connection, unless this is a
1214 * loopback connection */
1215 if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1216 cfs_list_for_each(tmp, &peer->ksnp_conns) {
1217 conn2 = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
1219 if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1220 conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1221 conn2->ksnc_type != conn->ksnc_type)
1224 /* Reply on a passive connection attempt so the peer
1225 * realises we're connected. */
1235 /* If the connection created by this route didn't bind to the IP
1236 * address the route connected to, the connection/route matching
1237 * code below probably isn't going to work. */
1239 route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1240 CERROR("Route %s %u.%u.%u.%u connected to %u.%u.%u.%u\n",
1241 libcfs_id2str(peer->ksnp_id),
1242 HIPQUAD(route->ksnr_ipaddr),
1243 HIPQUAD(conn->ksnc_ipaddr));
1246 /* Search for a route corresponding to the new connection and
1247 * create an association. This allows incoming connections created
1248 * by routes in my peer to match my own route entries so I don't
1249 * continually create duplicate routes. */
1250 cfs_list_for_each (tmp, &peer->ksnp_routes) {
1251 route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
1253 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1256 ksocknal_associate_route_conn_locked(route, conn);
1260 conn->ksnc_peer = peer; /* conn takes my ref on peer */
1261 peer->ksnp_last_alive = cfs_time_current();
1262 peer->ksnp_send_keepalive = 0;
1263 peer->ksnp_error = 0;
1265 sched = ksocknal_choose_scheduler_locked(cpt);
1266 sched->kss_nconns++;
1267 conn->ksnc_scheduler = sched;
1269 conn->ksnc_tx_last_post = cfs_time_current();
1270 /* Set the deadline for the outgoing HELLO to drain */
1271 conn->ksnc_tx_bufnob = libcfs_sock_wmem_queued(sock);
1272 conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1273 cfs_mb(); /* order with adding to peer's conn list */
1275 cfs_list_add (&conn->ksnc_list, &peer->ksnp_conns);
1276 ksocknal_conn_addref(conn);
1278 ksocknal_new_packet(conn, 0);
1280 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1282 /* Take packets blocking for this connection. */
1283 cfs_list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
1284 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
1287 cfs_list_del (&tx->tx_list);
1288 ksocknal_queue_tx_locked (tx, conn);
1291 write_unlock_bh(global_lock);
1293 /* We've now got a new connection. Any errors from here on are just
1294 * like "normal" comms errors and we close the connection normally.
1295 * NB (a) we still have to send the reply HELLO for passive
1297 * (b) normal I/O on the conn is blocked until I setup and call the
1301 CDEBUG(D_NET, "New conn %s p %d.x %u.%u.%u.%u -> %u.%u.%u.%u/%d"
1302 " incarnation:"LPD64" sched[%d:%d]\n",
1303 libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1304 HIPQUAD(conn->ksnc_myipaddr), HIPQUAD(conn->ksnc_ipaddr),
1305 conn->ksnc_port, incarnation, cpt,
1306 (int)(sched - &sched->kss_info->ksi_scheds[0]));
1309 /* additional routes after interface exchange? */
1310 ksocknal_create_routes(peer, conn->ksnc_port,
1311 hello->kshm_ips, hello->kshm_nips);
1313 hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
1315 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1318 LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1319 kshm_ips[LNET_MAX_INTERFACES]));
1321 /* setup the socket AFTER I've received hello (it disables
1322 * SO_LINGER). I might call back to the acceptor who may want
1323 * to send a protocol version response and then close the
1324 * socket; this ensures the socket only tears down after the
1325 * response has been sent. */
1327 rc = ksocknal_lib_setup_sock(sock);
1329 write_lock_bh(global_lock);
1331 /* NB my callbacks block while I hold ksnd_global_lock */
1332 ksocknal_lib_set_callback(sock, conn);
1335 peer->ksnp_accepting--;
1337 write_unlock_bh(global_lock);
1340 write_lock_bh(global_lock);
1341 if (!conn->ksnc_closing) {
1342 /* could be closed by another thread */
1343 ksocknal_close_conn_locked(conn, rc);
1345 write_unlock_bh(global_lock);
1346 } else if (ksocknal_connsock_addref(conn) == 0) {
1347 /* Allow I/O to proceed. */
1348 ksocknal_read_callback(conn);
1349 ksocknal_write_callback(conn);
1350 ksocknal_connsock_decref(conn);
1353 ksocknal_connsock_decref(conn);
1354 ksocknal_conn_decref(conn);
1358 if (!peer->ksnp_closing &&
1359 cfs_list_empty (&peer->ksnp_conns) &&
1360 cfs_list_empty (&peer->ksnp_routes)) {
1361 cfs_list_add(&zombies, &peer->ksnp_tx_queue);
1362 cfs_list_del_init(&peer->ksnp_tx_queue);
1363 ksocknal_unlink_peer_locked(peer);
1366 write_unlock_bh(global_lock);
1370 CERROR("Not creating conn %s type %d: %s\n",
1371 libcfs_id2str(peerid), conn->ksnc_type, warn);
1373 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1374 libcfs_id2str(peerid), conn->ksnc_type, warn);
1379 /* Request retry by replying with CONN_NONE
1380 * ksnc_proto has been set already */
1381 conn->ksnc_type = SOCKLND_CONN_NONE;
1382 hello->kshm_nips = 0;
1383 ksocknal_send_hello(ni, conn, peerid.nid, hello);
1386 write_lock_bh(global_lock);
1387 peer->ksnp_accepting--;
1388 write_unlock_bh(global_lock);
1391 ksocknal_txlist_done(ni, &zombies, 1);
1392 ksocknal_peer_decref(peer);
1396 LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1397 kshm_ips[LNET_MAX_INTERFACES]));
1399 LIBCFS_FREE (conn, sizeof(*conn));
1402 libcfs_sock_release(sock);
1407 ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1409 /* This just does the immmediate housekeeping, and queues the
1410 * connection for the reaper to terminate.
1411 * Caller holds ksnd_global_lock exclusively in irq context */
1412 ksock_peer_t *peer = conn->ksnc_peer;
1413 ksock_route_t *route;
1414 ksock_conn_t *conn2;
1417 LASSERT (peer->ksnp_error == 0);
1418 LASSERT (!conn->ksnc_closing);
1419 conn->ksnc_closing = 1;
1421 /* ksnd_deathrow_conns takes over peer's ref */
1422 cfs_list_del (&conn->ksnc_list);
1424 route = conn->ksnc_route;
1425 if (route != NULL) {
1426 /* dissociate conn from route... */
1427 LASSERT (!route->ksnr_deleted);
1428 LASSERT ((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1431 cfs_list_for_each(tmp, &peer->ksnp_conns) {
1432 conn2 = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
1434 if (conn2->ksnc_route == route &&
1435 conn2->ksnc_type == conn->ksnc_type)
1441 route->ksnr_connected &= ~(1 << conn->ksnc_type);
1443 conn->ksnc_route = NULL;
1445 #if 0 /* irrelevent with only eager routes */
1446 /* make route least favourite */
1447 cfs_list_del (&route->ksnr_list);
1448 cfs_list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
1450 ksocknal_route_decref(route); /* drop conn's ref on route */
1453 if (cfs_list_empty (&peer->ksnp_conns)) {
1454 /* No more connections to this peer */
1456 if (!cfs_list_empty(&peer->ksnp_tx_queue)) {
1459 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
1461 /* throw them to the last connection...,
1462 * these TXs will be send to /dev/null by scheduler */
1463 cfs_list_for_each_entry(tx, &peer->ksnp_tx_queue,
1465 ksocknal_tx_prep(conn, tx);
1467 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1468 cfs_list_splice_init(&peer->ksnp_tx_queue,
1469 &conn->ksnc_tx_queue);
1470 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1473 peer->ksnp_proto = NULL; /* renegotiate protocol version */
1474 peer->ksnp_error = error; /* stash last conn close reason */
1476 if (cfs_list_empty (&peer->ksnp_routes)) {
1477 /* I've just closed last conn belonging to a
1478 * peer with no routes to it */
1479 ksocknal_unlink_peer_locked (peer);
1483 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1485 cfs_list_add_tail(&conn->ksnc_list,
1486 &ksocknal_data.ksnd_deathrow_conns);
1487 cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
1489 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1493 ksocknal_peer_failed (ksock_peer_t *peer)
1496 cfs_time_t last_alive = 0;
1498 /* There has been a connection failure or comms error; but I'll only
1499 * tell LNET I think the peer is dead if it's to another kernel and
1500 * there are no connections or connection attempts in existance. */
1502 read_lock(&ksocknal_data.ksnd_global_lock);
1504 if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1505 cfs_list_empty(&peer->ksnp_conns) &&
1506 peer->ksnp_accepting == 0 &&
1507 ksocknal_find_connecting_route_locked(peer) == NULL) {
1509 last_alive = peer->ksnp_last_alive;
1512 read_unlock(&ksocknal_data.ksnd_global_lock);
1515 lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
1520 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1522 ksock_peer_t *peer = conn->ksnc_peer;
1525 CFS_LIST_HEAD (zlist);
1527 /* NB safe to finalize TXs because closing of socket will
1528 * abort all buffered data */
1529 LASSERT (conn->ksnc_sock == NULL);
1531 spin_lock(&peer->ksnp_lock);
1533 cfs_list_for_each_entry_safe_typed(tx, tmp, &peer->ksnp_zc_req_list,
1534 ksock_tx_t, tx_zc_list) {
1535 if (tx->tx_conn != conn)
1538 LASSERT (tx->tx_msg.ksm_zc_cookies[0] != 0);
1540 tx->tx_msg.ksm_zc_cookies[0] = 0;
1541 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1542 cfs_list_del(&tx->tx_zc_list);
1543 cfs_list_add(&tx->tx_zc_list, &zlist);
1546 spin_unlock(&peer->ksnp_lock);
1548 while (!cfs_list_empty(&zlist)) {
1549 tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1551 cfs_list_del(&tx->tx_zc_list);
1552 ksocknal_tx_decref(tx);
1557 ksocknal_terminate_conn (ksock_conn_t *conn)
1559 /* This gets called by the reaper (guaranteed thread context) to
1560 * disengage the socket from its callbacks and close it.
1561 * ksnc_refcount will eventually hit zero, and then the reaper will
1563 ksock_peer_t *peer = conn->ksnc_peer;
1564 ksock_sched_t *sched = conn->ksnc_scheduler;
1567 LASSERT(conn->ksnc_closing);
1569 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1570 spin_lock_bh(&sched->kss_lock);
1572 /* a closing conn is always ready to tx */
1573 conn->ksnc_tx_ready = 1;
1575 if (!conn->ksnc_tx_scheduled &&
1576 !cfs_list_empty(&conn->ksnc_tx_queue)){
1577 cfs_list_add_tail (&conn->ksnc_tx_list,
1578 &sched->kss_tx_conns);
1579 conn->ksnc_tx_scheduled = 1;
1580 /* extra ref for scheduler */
1581 ksocknal_conn_addref(conn);
1583 cfs_waitq_signal (&sched->kss_waitq);
1586 spin_unlock_bh(&sched->kss_lock);
1588 /* serialise with callbacks */
1589 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1591 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1593 /* OK, so this conn may not be completely disengaged from its
1594 * scheduler yet, but it _has_ committed to terminate... */
1595 conn->ksnc_scheduler->kss_nconns--;
1597 if (peer->ksnp_error != 0) {
1598 /* peer's last conn closed in error */
1599 LASSERT (cfs_list_empty (&peer->ksnp_conns));
1601 peer->ksnp_error = 0; /* avoid multiple notifications */
1604 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1607 ksocknal_peer_failed(peer);
1609 /* The socket is closed on the final put; either here, or in
1610 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1611 * when the connection was established, this will close the socket
1612 * immediately, aborting anything buffered in it. Any hung
1613 * zero-copy transmits will therefore complete in finite time. */
1614 ksocknal_connsock_decref(conn);
1618 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1620 /* Queue the conn for the reaper to destroy */
1622 LASSERT(cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
1623 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1625 cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1626 cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
1628 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1632 ksocknal_destroy_conn (ksock_conn_t *conn)
1634 cfs_time_t last_rcv;
1636 /* Final coup-de-grace of the reaper */
1637 CDEBUG (D_NET, "connection %p\n", conn);
1639 LASSERT (cfs_atomic_read (&conn->ksnc_conn_refcount) == 0);
1640 LASSERT (cfs_atomic_read (&conn->ksnc_sock_refcount) == 0);
1641 LASSERT (conn->ksnc_sock == NULL);
1642 LASSERT (conn->ksnc_route == NULL);
1643 LASSERT (!conn->ksnc_tx_scheduled);
1644 LASSERT (!conn->ksnc_rx_scheduled);
1645 LASSERT (cfs_list_empty(&conn->ksnc_tx_queue));
1647 /* complete current receive if any */
1648 switch (conn->ksnc_rx_state) {
1649 case SOCKNAL_RX_LNET_PAYLOAD:
1650 last_rcv = conn->ksnc_rx_deadline -
1651 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1652 CERROR("Completing partial receive from %s[%d]"
1653 ", ip %d.%d.%d.%d:%d, with error, wanted: %d, left: %d, "
1654 "last alive is %ld secs ago\n",
1655 libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1656 HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port,
1657 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1658 cfs_duration_sec(cfs_time_sub(cfs_time_current(),
1660 lnet_finalize (conn->ksnc_peer->ksnp_ni,
1661 conn->ksnc_cookie, -EIO);
1663 case SOCKNAL_RX_LNET_HEADER:
1664 if (conn->ksnc_rx_started)
1665 CERROR("Incomplete receive of lnet header from %s"
1666 ", ip %d.%d.%d.%d:%d, with error, protocol: %d.x.\n",
1667 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1668 HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port,
1669 conn->ksnc_proto->pro_version);
1671 case SOCKNAL_RX_KSM_HEADER:
1672 if (conn->ksnc_rx_started)
1673 CERROR("Incomplete receive of ksock message from %s"
1674 ", ip %d.%d.%d.%d:%d, with error, protocol: %d.x.\n",
1675 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1676 HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port,
1677 conn->ksnc_proto->pro_version);
1679 case SOCKNAL_RX_SLOP:
1680 if (conn->ksnc_rx_started)
1681 CERROR("Incomplete receive of slops from %s"
1682 ", ip %d.%d.%d.%d:%d, with error\n",
1683 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1684 HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
1691 ksocknal_peer_decref(conn->ksnc_peer);
1693 LIBCFS_FREE (conn, sizeof (*conn));
1697 ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
1704 cfs_list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
1705 conn = cfs_list_entry (ctmp, ksock_conn_t, ksnc_list);
1708 conn->ksnc_ipaddr == ipaddr) {
1710 ksocknal_close_conn_locked (conn, why);
1718 ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1720 ksock_peer_t *peer = conn->ksnc_peer;
1721 __u32 ipaddr = conn->ksnc_ipaddr;
1724 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1726 count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
1728 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1734 ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
1744 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1746 if (id.nid != LNET_NID_ANY)
1747 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1750 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1753 for (i = lo; i <= hi; i++) {
1754 cfs_list_for_each_safe (ptmp, pnxt,
1755 &ksocknal_data.ksnd_peers[i]) {
1757 peer = cfs_list_entry (ptmp, ksock_peer_t, ksnp_list);
1759 if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
1760 (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
1763 count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0);
1767 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1769 /* wildcards always succeed */
1770 if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1773 return (count == 0 ? -ENOENT : 0);
1777 ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
1779 /* The router is telling me she's been notified of a change in
1780 * gateway state.... */
1781 lnet_process_id_t id = {0};
1784 id.pid = LNET_PID_ANY;
1786 CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1787 alive ? "up" : "down");
1790 /* If the gateway crashed, close all open connections... */
1791 ksocknal_close_matching_conns (id, 0);
1795 /* ...otherwise do nothing. We can only establish new connections
1796 * if we have autroutes, and these connect on demand. */
1800 ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1803 cfs_time_t last_alive = 0;
1804 cfs_time_t now = cfs_time_current();
1805 ksock_peer_t *peer = NULL;
1806 rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1807 lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
1811 peer = ksocknal_find_peer_locked(ni, id);
1817 cfs_list_for_each (tmp, &peer->ksnp_conns) {
1818 conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
1819 bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
1821 if (bufnob < conn->ksnc_tx_bufnob) {
1822 /* something got ACKed */
1823 conn->ksnc_tx_deadline =
1824 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1825 peer->ksnp_last_alive = now;
1826 conn->ksnc_tx_bufnob = bufnob;
1830 last_alive = peer->ksnp_last_alive;
1831 if (ksocknal_find_connectable_route_locked(peer) == NULL)
1837 if (last_alive != 0)
1840 CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
1841 libcfs_nid2str(nid), peer,
1842 last_alive ? cfs_duration_sec(now - last_alive) : -1,
1848 ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1850 write_lock_bh(glock);
1852 peer = ksocknal_find_peer_locked(ni, id);
1854 ksocknal_launch_all_connections_locked(peer);
1856 write_unlock_bh(glock);
1861 ksocknal_push_peer (ksock_peer_t *peer)
1868 for (index = 0; ; index++) {
1869 read_lock(&ksocknal_data.ksnd_global_lock);
1874 cfs_list_for_each (tmp, &peer->ksnp_conns) {
1876 conn = cfs_list_entry (tmp, ksock_conn_t,
1878 ksocknal_conn_addref(conn);
1883 read_unlock(&ksocknal_data.ksnd_global_lock);
1888 ksocknal_lib_push_conn (conn);
1889 ksocknal_conn_decref(conn);
1894 ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
1903 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1904 for (j = 0; ; j++) {
1905 read_lock(&ksocknal_data.ksnd_global_lock);
1910 cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
1911 peer = cfs_list_entry(tmp, ksock_peer_t,
1914 if (!((id.nid == LNET_NID_ANY ||
1915 id.nid == peer->ksnp_id.nid) &&
1916 (id.pid == LNET_PID_ANY ||
1917 id.pid == peer->ksnp_id.pid))) {
1923 ksocknal_peer_addref(peer);
1928 read_unlock(&ksocknal_data.ksnd_global_lock);
1932 ksocknal_push_peer (peer);
1933 ksocknal_peer_decref(peer);
1943 ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1945 ksock_net_t *net = ni->ni_data;
1946 ksock_interface_t *iface;
1953 ksock_route_t *route;
1955 if (ipaddress == 0 ||
1959 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1961 iface = ksocknal_ip2iface(ni, ipaddress);
1962 if (iface != NULL) {
1963 /* silently ignore dups */
1965 } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1968 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1970 iface->ksni_ipaddr = ipaddress;
1971 iface->ksni_netmask = netmask;
1972 iface->ksni_nroutes = 0;
1973 iface->ksni_npeers = 0;
1975 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1976 cfs_list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1977 peer = cfs_list_entry(ptmp, ksock_peer_t,
1980 for (j = 0; j < peer->ksnp_n_passive_ips; j++)
1981 if (peer->ksnp_passive_ips[j] == ipaddress)
1982 iface->ksni_npeers++;
1984 cfs_list_for_each(rtmp, &peer->ksnp_routes) {
1985 route = cfs_list_entry(rtmp,
1989 if (route->ksnr_myipaddr == ipaddress)
1990 iface->ksni_nroutes++;
1996 /* NB only new connections will pay attention to the new interface! */
1999 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2005 ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
2009 ksock_route_t *route;
2014 for (i = 0; i < peer->ksnp_n_passive_ips; i++)
2015 if (peer->ksnp_passive_ips[i] == ipaddr) {
2016 for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
2017 peer->ksnp_passive_ips[j-1] =
2018 peer->ksnp_passive_ips[j];
2019 peer->ksnp_n_passive_ips--;
2023 cfs_list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
2024 route = cfs_list_entry (tmp, ksock_route_t, ksnr_list);
2026 if (route->ksnr_myipaddr != ipaddr)
2029 if (route->ksnr_share_count != 0) {
2030 /* Manually created; keep, but unbind */
2031 route->ksnr_myipaddr = 0;
2033 ksocknal_del_route_locked(route);
2037 cfs_list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
2038 conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
2040 if (conn->ksnc_myipaddr == ipaddr)
2041 ksocknal_close_conn_locked (conn, 0);
2046 ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2048 ksock_net_t *net = ni->ni_data;
2057 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2059 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2060 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2062 if (!(ipaddress == 0 ||
2063 ipaddress == this_ip))
2068 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2069 net->ksnn_interfaces[j-1] =
2070 net->ksnn_interfaces[j];
2072 net->ksnn_ninterfaces--;
2074 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2075 cfs_list_for_each_safe(tmp, nxt,
2076 &ksocknal_data.ksnd_peers[j]) {
2077 peer = cfs_list_entry(tmp, ksock_peer_t,
2080 if (peer->ksnp_ni != ni)
2083 ksocknal_peer_del_interface_locked(peer, this_ip);
2088 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2094 ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2096 lnet_process_id_t id = {0};
2097 struct libcfs_ioctl_data *data = arg;
2101 case IOC_LIBCFS_GET_INTERFACE: {
2102 ksock_net_t *net = ni->ni_data;
2103 ksock_interface_t *iface;
2105 read_lock(&ksocknal_data.ksnd_global_lock);
2107 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2111 iface = &net->ksnn_interfaces[data->ioc_count];
2113 data->ioc_u32[0] = iface->ksni_ipaddr;
2114 data->ioc_u32[1] = iface->ksni_netmask;
2115 data->ioc_u32[2] = iface->ksni_npeers;
2116 data->ioc_u32[3] = iface->ksni_nroutes;
2119 read_unlock(&ksocknal_data.ksnd_global_lock);
2123 case IOC_LIBCFS_ADD_INTERFACE:
2124 return ksocknal_add_interface(ni,
2125 data->ioc_u32[0], /* IP address */
2126 data->ioc_u32[1]); /* net mask */
2128 case IOC_LIBCFS_DEL_INTERFACE:
2129 return ksocknal_del_interface(ni,
2130 data->ioc_u32[0]); /* IP address */
2132 case IOC_LIBCFS_GET_PEER: {
2137 int share_count = 0;
2139 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2140 &id, &myip, &ip, &port,
2141 &conn_count, &share_count);
2145 data->ioc_nid = id.nid;
2146 data->ioc_count = share_count;
2147 data->ioc_u32[0] = ip;
2148 data->ioc_u32[1] = port;
2149 data->ioc_u32[2] = myip;
2150 data->ioc_u32[3] = conn_count;
2151 data->ioc_u32[4] = id.pid;
2155 case IOC_LIBCFS_ADD_PEER:
2156 id.nid = data->ioc_nid;
2157 id.pid = LUSTRE_SRV_LNET_PID;
2158 return ksocknal_add_peer (ni, id,
2159 data->ioc_u32[0], /* IP */
2160 data->ioc_u32[1]); /* port */
2162 case IOC_LIBCFS_DEL_PEER:
2163 id.nid = data->ioc_nid;
2164 id.pid = LNET_PID_ANY;
2165 return ksocknal_del_peer (ni, id,
2166 data->ioc_u32[0]); /* IP */
2168 case IOC_LIBCFS_GET_CONN: {
2172 ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2177 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2179 data->ioc_count = txmem;
2180 data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
2181 data->ioc_flags = nagle;
2182 data->ioc_u32[0] = conn->ksnc_ipaddr;
2183 data->ioc_u32[1] = conn->ksnc_port;
2184 data->ioc_u32[2] = conn->ksnc_myipaddr;
2185 data->ioc_u32[3] = conn->ksnc_type;
2186 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2187 data->ioc_u32[5] = rxmem;
2188 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2189 ksocknal_conn_decref(conn);
2193 case IOC_LIBCFS_CLOSE_CONNECTION:
2194 id.nid = data->ioc_nid;
2195 id.pid = LNET_PID_ANY;
2196 return ksocknal_close_matching_conns (id,
2199 case IOC_LIBCFS_REGISTER_MYNID:
2200 /* Ignore if this is a noop */
2201 if (data->ioc_nid == ni->ni_nid)
2204 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2205 libcfs_nid2str(data->ioc_nid),
2206 libcfs_nid2str(ni->ni_nid));
2209 case IOC_LIBCFS_PUSH_CONNECTION:
2210 id.nid = data->ioc_nid;
2211 id.pid = LNET_PID_ANY;
2212 return ksocknal_push(ni, id);
2221 ksocknal_free_buffers (void)
2223 LASSERT (cfs_atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2225 if (ksocknal_data.ksnd_sched_info != NULL) {
2226 struct ksock_sched_info *info;
2229 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2230 if (info->ksi_scheds != NULL) {
2231 LIBCFS_FREE(info->ksi_scheds,
2232 info->ksi_nthreads_max *
2233 sizeof(info->ksi_scheds[0]));
2236 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2239 LIBCFS_FREE (ksocknal_data.ksnd_peers,
2240 sizeof (cfs_list_t) *
2241 ksocknal_data.ksnd_peer_hash_size);
2243 spin_lock(&ksocknal_data.ksnd_tx_lock);
2245 if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2249 cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2250 cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2251 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2253 while (!cfs_list_empty(&zlist)) {
2254 tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
2255 cfs_list_del(&tx->tx_list);
2256 LIBCFS_FREE(tx, tx->tx_desc_size);
2259 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2264 ksocknal_base_shutdown(void)
2266 struct ksock_sched_info *info;
2267 ksock_sched_t *sched;
2271 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2272 cfs_atomic_read (&libcfs_kmemory));
2273 LASSERT (ksocknal_data.ksnd_nnets == 0);
2275 switch (ksocknal_data.ksnd_init) {
2279 case SOCKNAL_INIT_ALL:
2280 case SOCKNAL_INIT_DATA:
2281 LASSERT (ksocknal_data.ksnd_peers != NULL);
2282 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2283 LASSERT (cfs_list_empty (&ksocknal_data.ksnd_peers[i]));
2286 LASSERT(cfs_list_empty(&ksocknal_data.ksnd_nets));
2287 LASSERT (cfs_list_empty (&ksocknal_data.ksnd_enomem_conns));
2288 LASSERT (cfs_list_empty (&ksocknal_data.ksnd_zombie_conns));
2289 LASSERT (cfs_list_empty (&ksocknal_data.ksnd_connd_connreqs));
2290 LASSERT (cfs_list_empty (&ksocknal_data.ksnd_connd_routes));
2292 if (ksocknal_data.ksnd_sched_info != NULL) {
2293 cfs_percpt_for_each(info, i,
2294 ksocknal_data.ksnd_sched_info) {
2295 if (info->ksi_scheds == NULL)
2298 for (j = 0; j < info->ksi_nthreads_max; j++) {
2300 sched = &info->ksi_scheds[j];
2301 LASSERT(cfs_list_empty(&sched->\
2303 LASSERT(cfs_list_empty(&sched->\
2305 LASSERT(cfs_list_empty(&sched-> \
2306 kss_zombie_noop_txs));
2307 LASSERT(sched->kss_nconns == 0);
2312 /* flag threads to terminate; wake and wait for them to die */
2313 ksocknal_data.ksnd_shuttingdown = 1;
2314 cfs_waitq_broadcast(&ksocknal_data.ksnd_connd_waitq);
2315 cfs_waitq_broadcast(&ksocknal_data.ksnd_reaper_waitq);
2317 if (ksocknal_data.ksnd_sched_info != NULL) {
2318 cfs_percpt_for_each(info, i,
2319 ksocknal_data.ksnd_sched_info) {
2320 if (info->ksi_scheds == NULL)
2323 for (j = 0; j < info->ksi_nthreads_max; j++) {
2324 sched = &info->ksi_scheds[j];
2325 cfs_waitq_broadcast(&sched->kss_waitq);
2331 read_lock(&ksocknal_data.ksnd_global_lock);
2332 while (ksocknal_data.ksnd_nthreads != 0) {
2334 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2335 "waiting for %d threads to terminate\n",
2336 ksocknal_data.ksnd_nthreads);
2337 read_unlock(&ksocknal_data.ksnd_global_lock);
2338 cfs_pause(cfs_time_seconds(1));
2339 read_lock(&ksocknal_data.ksnd_global_lock);
2341 read_unlock(&ksocknal_data.ksnd_global_lock);
2343 ksocknal_free_buffers();
2345 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2349 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2350 cfs_atomic_read (&libcfs_kmemory));
2352 PORTAL_MODULE_UNUSE;
2356 ksocknal_new_incarnation (void)
2360 /* The incarnation number is the time this module loaded and it
2361 * identifies this particular instance of the socknal. Hopefully
2362 * we won't be able to reboot more frequently than 1MHz for the
2363 * forseeable future :) */
2365 cfs_gettimeofday(&tv);
2367 return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2371 ksocknal_base_startup(void)
2373 struct ksock_sched_info *info;
2377 LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2378 LASSERT (ksocknal_data.ksnd_nnets == 0);
2380 memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2382 ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2383 LIBCFS_ALLOC (ksocknal_data.ksnd_peers,
2384 sizeof (cfs_list_t) *
2385 ksocknal_data.ksnd_peer_hash_size);
2386 if (ksocknal_data.ksnd_peers == NULL)
2389 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2390 CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2392 rwlock_init(&ksocknal_data.ksnd_global_lock);
2393 CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2395 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2396 CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
2397 CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
2398 CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
2399 cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
2401 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2402 CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
2403 CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
2404 cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
2406 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2407 CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
2409 /* NB memset above zeros whole of ksocknal_data */
2411 /* flag lists/ptrs/locks initialised */
2412 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2415 ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2417 if (ksocknal_data.ksnd_sched_info == NULL)
2420 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2421 ksock_sched_t *sched;
2424 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2425 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2426 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2428 /* max to half of CPUs, assume another half should be
2429 * reserved for upper layer modules */
2430 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2433 info->ksi_nthreads_max = nthrs;
2436 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2437 info->ksi_nthreads_max * sizeof(*sched));
2438 if (info->ksi_scheds == NULL)
2441 for (; nthrs > 0; nthrs--) {
2442 sched = &info->ksi_scheds[nthrs - 1];
2444 sched->kss_info = info;
2445 spin_lock_init(&sched->kss_lock);
2446 CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
2447 CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
2448 CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2449 cfs_waitq_init(&sched->kss_waitq);
2453 ksocknal_data.ksnd_connd_starting = 0;
2454 ksocknal_data.ksnd_connd_failed_stamp = 0;
2455 ksocknal_data.ksnd_connd_starting_stamp = cfs_time_current_sec();
2456 /* must have at least 2 connds to remain responsive to accepts while
2458 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2459 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2461 if (*ksocknal_tunables.ksnd_nconnds_max <
2462 *ksocknal_tunables.ksnd_nconnds) {
2463 ksocknal_tunables.ksnd_nconnds_max =
2464 ksocknal_tunables.ksnd_nconnds;
2467 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2468 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2469 ksocknal_data.ksnd_connd_starting++;
2470 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2472 rc = ksocknal_thread_start(ksocknal_connd,
2473 (void *)((ulong_ptr_t)i));
2475 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2476 ksocknal_data.ksnd_connd_starting--;
2477 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2478 CERROR("Can't spawn socknal connd: %d\n", rc);
2483 rc = ksocknal_thread_start (ksocknal_reaper, NULL);
2485 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2489 /* flag everything initialised */
2490 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2495 ksocknal_base_shutdown();
2500 ksocknal_debug_peerhash (lnet_ni_t *ni)
2502 ksock_peer_t *peer = NULL;
2506 read_lock(&ksocknal_data.ksnd_global_lock);
2508 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2509 cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
2510 peer = cfs_list_entry (tmp, ksock_peer_t, ksnp_list);
2512 if (peer->ksnp_ni == ni) break;
2519 ksock_route_t *route;
2522 CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
2523 "closing %d, accepting %d, err %d, zcookie "LPU64", "
2524 "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
2525 cfs_atomic_read(&peer->ksnp_refcount),
2526 peer->ksnp_sharecount, peer->ksnp_closing,
2527 peer->ksnp_accepting, peer->ksnp_error,
2528 peer->ksnp_zc_next_cookie,
2529 !cfs_list_empty(&peer->ksnp_tx_queue),
2530 !cfs_list_empty(&peer->ksnp_zc_req_list));
2532 cfs_list_for_each (tmp, &peer->ksnp_routes) {
2533 route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
2534 CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2535 "del %d\n", cfs_atomic_read(&route->ksnr_refcount),
2536 route->ksnr_scheduled, route->ksnr_connecting,
2537 route->ksnr_connected, route->ksnr_deleted);
2540 cfs_list_for_each (tmp, &peer->ksnp_conns) {
2541 conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
2542 CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2543 cfs_atomic_read(&conn->ksnc_conn_refcount),
2544 cfs_atomic_read(&conn->ksnc_sock_refcount),
2545 conn->ksnc_type, conn->ksnc_closing);
2549 read_unlock(&ksocknal_data.ksnd_global_lock);
2554 ksocknal_shutdown (lnet_ni_t *ni)
2556 ksock_net_t *net = ni->ni_data;
2558 lnet_process_id_t anyid = {0};
2560 anyid.nid = LNET_NID_ANY;
2561 anyid.pid = LNET_PID_ANY;
2563 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2564 LASSERT(ksocknal_data.ksnd_nnets > 0);
2566 spin_lock_bh(&net->ksnn_lock);
2567 net->ksnn_shutdown = 1; /* prevent new peers */
2568 spin_unlock_bh(&net->ksnn_lock);
2570 /* Delete all peers */
2571 ksocknal_del_peer(ni, anyid, 0);
2573 /* Wait for all peer state to clean up */
2575 spin_lock_bh(&net->ksnn_lock);
2576 while (net->ksnn_npeers != 0) {
2577 spin_unlock_bh(&net->ksnn_lock);
2580 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2581 "waiting for %d peers to disconnect\n",
2583 cfs_pause(cfs_time_seconds(1));
2585 ksocknal_debug_peerhash(ni);
2587 spin_lock_bh(&net->ksnn_lock);
2589 spin_unlock_bh(&net->ksnn_lock);
2591 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2592 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2593 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2596 cfs_list_del(&net->ksnn_list);
2597 LIBCFS_FREE(net, sizeof(*net));
2599 ksocknal_data.ksnd_nnets--;
2600 if (ksocknal_data.ksnd_nnets == 0)
2601 ksocknal_base_shutdown();
2605 ksocknal_enumerate_interfaces(ksock_net_t *net)
2613 n = libcfs_ipif_enumerate(&names);
2615 CERROR("Can't enumerate interfaces: %d\n", n);
2619 for (i = j = 0; i < n; i++) {
2624 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2627 rc = libcfs_ipif_query(names[i], &up, &ip, &mask);
2629 CWARN("Can't get interface %s info: %d\n",
2635 CWARN("Ignoring interface %s (down)\n",
2640 if (j == LNET_MAX_INTERFACES) {
2641 CWARN("Ignoring interface %s (too many interfaces)\n",
2646 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2647 net->ksnn_interfaces[j].ksni_netmask = mask;
2648 strncpy(&net->ksnn_interfaces[j].ksni_name[0],
2649 names[i], IFNAMSIZ);
2653 libcfs_ipif_free_enumeration(names, n);
2656 CERROR("Can't find any usable interfaces\n");
2662 ksocknal_search_new_ipif(ksock_net_t *net)
2667 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2668 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2669 char *colon = strchr(ifnam, ':');
2674 if (colon != NULL) /* ignore alias device */
2677 cfs_list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2679 for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2680 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2682 char *colon2 = strchr(ifnam2, ':');
2687 found = strcmp(ifnam, ifnam2) == 0;
2704 ksocknal_start_schedulers(struct ksock_sched_info *info)
2710 if (info->ksi_nthreads == 0) {
2711 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2712 nthrs = info->ksi_nthreads_max;
2714 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2716 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2717 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2719 nthrs = min(nthrs, info->ksi_nthreads_max);
2721 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2722 /* increase two threads if there is new interface */
2723 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2726 for (i = 0; i < nthrs; i++) {
2729 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2730 rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id);
2734 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2735 info->ksi_cpt, info->ksi_nthreads + i, rc);
2739 info->ksi_nthreads += i;
2744 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2746 int newif = ksocknal_search_new_ipif(net);
2750 LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
2752 for (i = 0; i < ncpts; i++) {
2753 struct ksock_sched_info *info;
2754 int cpt = (cpts == NULL) ? i : cpts[i];
2756 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2757 info = ksocknal_data.ksnd_sched_info[cpt];
2759 if (!newif && info->ksi_nthreads > 0)
2762 rc = ksocknal_start_schedulers(info);
2770 ksocknal_startup (lnet_ni_t *ni)
2776 LASSERT (ni->ni_lnd == &the_ksocklnd);
2778 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2779 rc = ksocknal_base_startup();
2784 LIBCFS_ALLOC(net, sizeof(*net));
2788 spin_lock_init(&net->ksnn_lock);
2789 net->ksnn_incarnation = ksocknal_new_incarnation();
2791 ni->ni_peertimeout = *ksocknal_tunables.ksnd_peertimeout;
2792 ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
2793 ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits;
2794 ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
2796 if (ni->ni_interfaces[0] == NULL) {
2797 rc = ksocknal_enumerate_interfaces(net);
2801 net->ksnn_ninterfaces = 1;
2803 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2806 if (ni->ni_interfaces[i] == NULL)
2809 rc = libcfs_ipif_query(
2810 ni->ni_interfaces[i], &up,
2811 &net->ksnn_interfaces[i].ksni_ipaddr,
2812 &net->ksnn_interfaces[i].ksni_netmask);
2815 CERROR("Can't get interface %s info: %d\n",
2816 ni->ni_interfaces[i], rc);
2821 CERROR("Interface %s is down\n",
2822 ni->ni_interfaces[i]);
2826 strncpy(&net->ksnn_interfaces[i].ksni_name[0],
2827 ni->ni_interfaces[i], IFNAMSIZ);
2829 net->ksnn_ninterfaces = i;
2832 /* call it before add it to ksocknal_data.ksnd_nets */
2833 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2837 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2838 net->ksnn_interfaces[0].ksni_ipaddr);
2839 cfs_list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2841 ksocknal_data.ksnd_nnets++;
2846 LIBCFS_FREE(net, sizeof(*net));
2848 if (ksocknal_data.ksnd_nnets == 0)
2849 ksocknal_base_shutdown();
2856 ksocknal_module_fini (void)
2858 lnet_unregister_lnd(&the_ksocklnd);
2859 ksocknal_tunables_fini();
2863 ksocknal_module_init (void)
2867 /* check ksnr_connected/connecting field large enough */
2868 CLASSERT (SOCKLND_CONN_NTYPES <= 4);
2869 CLASSERT (SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2871 /* initialize the_ksocklnd */
2872 the_ksocklnd.lnd_type = SOCKLND;
2873 the_ksocklnd.lnd_startup = ksocknal_startup;
2874 the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2875 the_ksocklnd.lnd_ctl = ksocknal_ctl;
2876 the_ksocklnd.lnd_send = ksocknal_send;
2877 the_ksocklnd.lnd_recv = ksocknal_recv;
2878 the_ksocklnd.lnd_notify = ksocknal_notify;
2879 the_ksocklnd.lnd_query = ksocknal_query;
2880 the_ksocklnd.lnd_accept = ksocknal_accept;
2882 rc = ksocknal_tunables_init();
2886 lnet_register_lnd(&the_ksocklnd);
2891 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2892 MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
2893 MODULE_LICENSE("GPL");
2895 cfs_module(ksocknal, "3.0.0", ksocknal_module_init, ksocknal_module_fini);