4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/socklnd/socklnd.c
34 * Author: Zach Brown <zab@zabbo.net>
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eric@bartonsoftware.com>
40 #include <linux/pci.h>
43 static struct lnet_lnd the_ksocklnd;
44 ksock_nal_data_t ksocknal_data;
46 static ksock_interface_t *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
49 ksock_net_t *net = ni->ni_data;
51 ksock_interface_t *iface;
53 for (i = 0; i < net->ksnn_ninterfaces; i++) {
54 LASSERT(i < LNET_MAX_INTERFACES);
55 iface = &net->ksnn_interfaces[i];
57 if (iface->ksni_ipaddr == ip)
64 static ksock_route_t *
65 ksocknal_create_route (__u32 ipaddr, int port)
69 LIBCFS_ALLOC (route, sizeof (*route));
73 atomic_set (&route->ksnr_refcount, 1);
74 route->ksnr_peer = NULL;
75 route->ksnr_retry_interval = 0; /* OK to connect at any time */
76 route->ksnr_ipaddr = ipaddr;
77 route->ksnr_port = port;
78 route->ksnr_scheduled = 0;
79 route->ksnr_connecting = 0;
80 route->ksnr_connected = 0;
81 route->ksnr_deleted = 0;
82 route->ksnr_conn_count = 0;
83 route->ksnr_share_count = 0;
89 ksocknal_destroy_route (ksock_route_t *route)
91 LASSERT (atomic_read(&route->ksnr_refcount) == 0);
93 if (route->ksnr_peer != NULL)
94 ksocknal_peer_decref(route->ksnr_peer);
96 LIBCFS_FREE (route, sizeof (*route));
100 ksocknal_create_peer(ksock_peer_ni_t **peerp, struct lnet_ni *ni,
101 struct lnet_process_id id)
103 int cpt = lnet_cpt_of_nid(id.nid, ni);
104 ksock_net_t *net = ni->ni_data;
105 ksock_peer_ni_t *peer_ni;
107 LASSERT(id.nid != LNET_NID_ANY);
108 LASSERT(id.pid != LNET_PID_ANY);
109 LASSERT(!in_interrupt());
111 LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
115 peer_ni->ksnp_ni = ni;
116 peer_ni->ksnp_id = id;
117 atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
118 peer_ni->ksnp_closing = 0;
119 peer_ni->ksnp_accepting = 0;
120 peer_ni->ksnp_proto = NULL;
121 peer_ni->ksnp_last_alive = 0;
122 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
124 INIT_LIST_HEAD(&peer_ni->ksnp_conns);
125 INIT_LIST_HEAD(&peer_ni->ksnp_routes);
126 INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
127 INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
128 spin_lock_init(&peer_ni->ksnp_lock);
130 spin_lock_bh(&net->ksnn_lock);
132 if (net->ksnn_shutdown) {
133 spin_unlock_bh(&net->ksnn_lock);
135 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
136 CERROR("Can't create peer_ni: network shutdown\n");
142 spin_unlock_bh(&net->ksnn_lock);
149 ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni)
151 ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
153 CDEBUG (D_NET, "peer_ni %s %p deleted\n",
154 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
156 LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
157 LASSERT(peer_ni->ksnp_accepting == 0);
158 LASSERT(list_empty(&peer_ni->ksnp_conns));
159 LASSERT(list_empty(&peer_ni->ksnp_routes));
160 LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
161 LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
163 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
165 /* NB a peer_ni's connections and routes keep a reference on their peer_ni
166 * until they are destroyed, so we can be assured that _all_ state to
167 * do with this peer_ni has been cleaned up when its refcount drops to
169 spin_lock_bh(&net->ksnn_lock);
171 spin_unlock_bh(&net->ksnn_lock);
175 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
177 struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178 struct list_head *tmp;
179 ksock_peer_ni_t *peer_ni;
181 list_for_each(tmp, peer_list) {
183 peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
185 LASSERT(!peer_ni->ksnp_closing);
187 if (peer_ni->ksnp_ni != ni)
190 if (peer_ni->ksnp_id.nid != id.nid ||
191 peer_ni->ksnp_id.pid != id.pid)
194 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
195 peer_ni, libcfs_id2str(id),
196 atomic_read(&peer_ni->ksnp_refcount));
203 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
205 ksock_peer_ni_t *peer_ni;
207 read_lock(&ksocknal_data.ksnd_global_lock);
208 peer_ni = ksocknal_find_peer_locked(ni, id);
209 if (peer_ni != NULL) /* +1 ref for caller? */
210 ksocknal_peer_addref(peer_ni);
211 read_unlock(&ksocknal_data.ksnd_global_lock);
217 ksocknal_unlink_peer_locked (ksock_peer_ni_t *peer_ni)
221 ksock_interface_t *iface;
223 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
224 LASSERT (i < LNET_MAX_INTERFACES);
225 ip = peer_ni->ksnp_passive_ips[i];
227 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
228 /* All IPs in peer_ni->ksnp_passive_ips[] come from the
229 * interface list, therefore the call must succeed. */
230 LASSERT (iface != NULL);
232 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
233 peer_ni, iface, iface->ksni_nroutes);
234 iface->ksni_npeers--;
237 LASSERT(list_empty(&peer_ni->ksnp_conns));
238 LASSERT(list_empty(&peer_ni->ksnp_routes));
239 LASSERT(!peer_ni->ksnp_closing);
240 peer_ni->ksnp_closing = 1;
241 list_del(&peer_ni->ksnp_list);
242 /* lose peerlist's ref */
243 ksocknal_peer_decref(peer_ni);
247 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
248 struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
249 int *port, int *conn_count, int *share_count)
251 ksock_peer_ni_t *peer_ni;
252 struct list_head *ptmp;
253 ksock_route_t *route;
254 struct list_head *rtmp;
259 read_lock(&ksocknal_data.ksnd_global_lock);
261 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
262 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
263 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
265 if (peer_ni->ksnp_ni != ni)
268 if (peer_ni->ksnp_n_passive_ips == 0 &&
269 list_empty(&peer_ni->ksnp_routes)) {
273 *id = peer_ni->ksnp_id;
283 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
287 *id = peer_ni->ksnp_id;
288 *myip = peer_ni->ksnp_passive_ips[j];
297 list_for_each(rtmp, &peer_ni->ksnp_routes) {
301 route = list_entry(rtmp, ksock_route_t,
304 *id = peer_ni->ksnp_id;
305 *myip = route->ksnr_myipaddr;
306 *peer_ip = route->ksnr_ipaddr;
307 *port = route->ksnr_port;
308 *conn_count = route->ksnr_conn_count;
309 *share_count = route->ksnr_share_count;
316 read_unlock(&ksocknal_data.ksnd_global_lock);
321 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
323 ksock_peer_ni_t *peer_ni = route->ksnr_peer;
324 int type = conn->ksnc_type;
325 ksock_interface_t *iface;
327 conn->ksnc_route = route;
328 ksocknal_route_addref(route);
330 if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
331 if (route->ksnr_myipaddr == 0) {
332 /* route wasn't bound locally yet (the initial route) */
333 CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
334 libcfs_id2str(peer_ni->ksnp_id),
336 &conn->ksnc_myipaddr);
338 CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
339 "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
341 &route->ksnr_myipaddr,
342 &conn->ksnc_myipaddr);
344 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
345 route->ksnr_myipaddr);
347 iface->ksni_nroutes--;
349 route->ksnr_myipaddr = conn->ksnc_myipaddr;
350 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
351 route->ksnr_myipaddr);
353 iface->ksni_nroutes++;
356 route->ksnr_connected |= (1<<type);
357 route->ksnr_conn_count++;
359 /* Successful connection => further attempts can
360 * proceed immediately */
361 route->ksnr_retry_interval = 0;
365 ksocknal_add_route_locked (ksock_peer_ni_t *peer_ni, ksock_route_t *route)
367 struct list_head *tmp;
369 ksock_route_t *route2;
371 LASSERT(!peer_ni->ksnp_closing);
372 LASSERT(route->ksnr_peer == NULL);
373 LASSERT(!route->ksnr_scheduled);
374 LASSERT(!route->ksnr_connecting);
375 LASSERT(route->ksnr_connected == 0);
377 /* LASSERT(unique) */
378 list_for_each(tmp, &peer_ni->ksnp_routes) {
379 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
381 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
382 CERROR("Duplicate route %s %pI4h\n",
383 libcfs_id2str(peer_ni->ksnp_id),
384 &route->ksnr_ipaddr);
389 route->ksnr_peer = peer_ni;
390 ksocknal_peer_addref(peer_ni);
391 /* peer_ni's routelist takes over my ref on 'route' */
392 list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
394 list_for_each(tmp, &peer_ni->ksnp_conns) {
395 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
397 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
400 ksocknal_associate_route_conn_locked(route, conn);
401 /* keep going (typed routes) */
406 ksocknal_del_route_locked (ksock_route_t *route)
408 ksock_peer_ni_t *peer_ni = route->ksnr_peer;
409 ksock_interface_t *iface;
411 struct list_head *ctmp;
412 struct list_head *cnxt;
414 LASSERT(!route->ksnr_deleted);
416 /* Close associated conns */
417 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
418 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
420 if (conn->ksnc_route != route)
423 ksocknal_close_conn_locked(conn, 0);
426 if (route->ksnr_myipaddr != 0) {
427 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
428 route->ksnr_myipaddr);
430 iface->ksni_nroutes--;
433 route->ksnr_deleted = 1;
434 list_del(&route->ksnr_list);
435 ksocknal_route_decref(route); /* drop peer_ni's ref */
437 if (list_empty(&peer_ni->ksnp_routes) &&
438 list_empty(&peer_ni->ksnp_conns)) {
439 /* I've just removed the last route to a peer_ni with no active
441 ksocknal_unlink_peer_locked(peer_ni);
446 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
449 struct list_head *tmp;
450 ksock_peer_ni_t *peer_ni;
451 ksock_peer_ni_t *peer2;
452 ksock_route_t *route;
453 ksock_route_t *route2;
456 if (id.nid == LNET_NID_ANY ||
457 id.pid == LNET_PID_ANY)
460 /* Have a brand new peer_ni ready... */
461 rc = ksocknal_create_peer(&peer_ni, ni, id);
465 route = ksocknal_create_route (ipaddr, port);
467 ksocknal_peer_decref(peer_ni);
471 write_lock_bh(&ksocknal_data.ksnd_global_lock);
473 /* always called with a ref on ni, so shutdown can't have started */
474 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
476 peer2 = ksocknal_find_peer_locked(ni, id);
478 ksocknal_peer_decref(peer_ni);
481 /* peer_ni table takes my ref on peer_ni */
482 list_add_tail(&peer_ni->ksnp_list,
483 ksocknal_nid2peerlist(id.nid));
487 list_for_each(tmp, &peer_ni->ksnp_routes) {
488 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
490 if (route2->ksnr_ipaddr == ipaddr)
495 if (route2 == NULL) {
496 ksocknal_add_route_locked(peer_ni, route);
497 route->ksnr_share_count++;
499 ksocknal_route_decref(route);
500 route2->ksnr_share_count++;
503 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
509 ksocknal_del_peer_locked (ksock_peer_ni_t *peer_ni, __u32 ip)
512 ksock_route_t *route;
513 struct list_head *tmp;
514 struct list_head *nxt;
517 LASSERT(!peer_ni->ksnp_closing);
519 /* Extra ref prevents peer_ni disappearing until I'm done with it */
520 ksocknal_peer_addref(peer_ni);
522 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
523 route = list_entry(tmp, ksock_route_t, ksnr_list);
526 if (!(ip == 0 || route->ksnr_ipaddr == ip))
529 route->ksnr_share_count = 0;
530 /* This deletes associated conns too */
531 ksocknal_del_route_locked(route);
535 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
536 route = list_entry(tmp, ksock_route_t, ksnr_list);
537 nshared += route->ksnr_share_count;
541 /* remove everything else if there are no explicit entries
544 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
545 route = list_entry(tmp, ksock_route_t, ksnr_list);
547 /* we should only be removing auto-entries */
548 LASSERT(route->ksnr_share_count == 0);
549 ksocknal_del_route_locked(route);
552 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
553 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
555 ksocknal_close_conn_locked(conn, 0);
559 ksocknal_peer_decref(peer_ni);
560 /* NB peer_ni unlinks itself when last conn/route is removed */
564 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
566 struct list_head zombies = LIST_HEAD_INIT(zombies);
567 struct list_head *ptmp;
568 struct list_head *pnxt;
569 ksock_peer_ni_t *peer_ni;
575 write_lock_bh(&ksocknal_data.ksnd_global_lock);
577 if (id.nid != LNET_NID_ANY) {
578 hi = (int)(ksocknal_nid2peerlist(id.nid) -
579 ksocknal_data.ksnd_peers);
583 hi = ksocknal_data.ksnd_peer_hash_size - 1;
586 for (i = lo; i <= hi; i++) {
587 list_for_each_safe(ptmp, pnxt,
588 &ksocknal_data.ksnd_peers[i]) {
589 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
591 if (peer_ni->ksnp_ni != ni)
594 if (!((id.nid == LNET_NID_ANY ||
595 peer_ni->ksnp_id.nid == id.nid) &&
596 (id.pid == LNET_PID_ANY ||
597 peer_ni->ksnp_id.pid == id.pid)))
600 ksocknal_peer_addref(peer_ni); /* a ref for me... */
602 ksocknal_del_peer_locked(peer_ni, ip);
604 if (peer_ni->ksnp_closing &&
605 !list_empty(&peer_ni->ksnp_tx_queue)) {
606 LASSERT(list_empty(&peer_ni->ksnp_conns));
607 LASSERT(list_empty(&peer_ni->ksnp_routes));
609 list_splice_init(&peer_ni->ksnp_tx_queue,
613 ksocknal_peer_decref(peer_ni); /* ...till here */
615 rc = 0; /* matched! */
619 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
621 ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
626 static ksock_conn_t *
627 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
629 ksock_peer_ni_t *peer_ni;
630 struct list_head *ptmp;
632 struct list_head *ctmp;
635 read_lock(&ksocknal_data.ksnd_global_lock);
637 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
638 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
639 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
641 LASSERT(!peer_ni->ksnp_closing);
643 if (peer_ni->ksnp_ni != ni)
646 list_for_each(ctmp, &peer_ni->ksnp_conns) {
650 conn = list_entry(ctmp, ksock_conn_t,
652 ksocknal_conn_addref(conn);
653 read_unlock(&ksocknal_data. \
660 read_unlock(&ksocknal_data.ksnd_global_lock);
664 static ksock_sched_t *
665 ksocknal_choose_scheduler_locked(unsigned int cpt)
667 struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
668 ksock_sched_t *sched;
671 LASSERT(info->ksi_nthreads > 0);
673 sched = &info->ksi_scheds[0];
675 * NB: it's safe so far, but info->ksi_nthreads could be changed
676 * at runtime when we have dynamic LNet configuration, then we
677 * need to take care of this.
679 for (i = 1; i < info->ksi_nthreads; i++) {
680 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
681 sched = &info->ksi_scheds[i];
688 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
690 ksock_net_t *net = ni->ni_data;
694 read_lock(&ksocknal_data.ksnd_global_lock);
696 nip = net->ksnn_ninterfaces;
697 LASSERT (nip <= LNET_MAX_INTERFACES);
699 /* Only offer interfaces for additional connections if I have
702 read_unlock(&ksocknal_data.ksnd_global_lock);
706 for (i = 0; i < nip; i++) {
707 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
708 LASSERT (ipaddrs[i] != 0);
711 read_unlock(&ksocknal_data.ksnd_global_lock);
716 ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
718 int best_netmatch = 0;
725 for (i = 0; i < nips; i++) {
729 this_xor = (ips[i] ^ iface->ksni_ipaddr);
730 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
733 best_netmatch < this_netmatch ||
734 (best_netmatch == this_netmatch &&
735 best_xor > this_xor)))
739 best_netmatch = this_netmatch;
748 ksocknal_select_ips(ksock_peer_ni_t *peer_ni, __u32 *peerips, int n_peerips)
750 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
751 ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
752 ksock_interface_t *iface;
753 ksock_interface_t *best_iface;
764 /* CAVEAT EMPTOR: We do all our interface matching with an
765 * exclusive hold of global lock at IRQ priority. We're only
766 * expecting to be dealing with small numbers of interfaces, so the
767 * O(n**3)-ness shouldn't matter */
769 /* Also note that I'm not going to return more than n_peerips
770 * interfaces, even if I have more myself */
772 write_lock_bh(global_lock);
774 LASSERT (n_peerips <= LNET_MAX_INTERFACES);
775 LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
777 /* Only match interfaces for additional connections
778 * if I have > 1 interface */
779 n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
780 MIN(n_peerips, net->ksnn_ninterfaces);
782 for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
783 /* ^ yes really... */
785 /* If we have any new interfaces, first tick off all the
786 * peer_ni IPs that match old interfaces, then choose new
787 * interfaces to match the remaining peer_ni IPS.
788 * We don't forget interfaces we've stopped using; we might
789 * start using them again... */
791 if (i < peer_ni->ksnp_n_passive_ips) {
793 ip = peer_ni->ksnp_passive_ips[i];
794 best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
796 /* peer_ni passive ips are kept up to date */
797 LASSERT(best_iface != NULL);
799 /* choose a new interface */
800 LASSERT (i == peer_ni->ksnp_n_passive_ips);
806 for (j = 0; j < net->ksnn_ninterfaces; j++) {
807 iface = &net->ksnn_interfaces[j];
808 ip = iface->ksni_ipaddr;
810 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
811 if (peer_ni->ksnp_passive_ips[k] == ip)
814 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
817 k = ksocknal_match_peerip(iface, peerips, n_peerips);
818 xor = (ip ^ peerips[k]);
819 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
821 if (!(best_iface == NULL ||
822 best_netmatch < this_netmatch ||
823 (best_netmatch == this_netmatch &&
824 best_npeers > iface->ksni_npeers)))
828 best_netmatch = this_netmatch;
829 best_npeers = iface->ksni_npeers;
832 LASSERT(best_iface != NULL);
834 best_iface->ksni_npeers++;
835 ip = best_iface->ksni_ipaddr;
836 peer_ni->ksnp_passive_ips[i] = ip;
837 peer_ni->ksnp_n_passive_ips = i+1;
840 /* mark the best matching peer_ni IP used */
841 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
845 /* Overwrite input peer_ni IP addresses */
846 memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
848 write_unlock_bh(global_lock);
854 ksocknal_create_routes(ksock_peer_ni_t *peer_ni, int port,
855 __u32 *peer_ipaddrs, int npeer_ipaddrs)
857 ksock_route_t *newroute = NULL;
858 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
859 struct lnet_ni *ni = peer_ni->ksnp_ni;
860 ksock_net_t *net = ni->ni_data;
861 struct list_head *rtmp;
862 ksock_route_t *route;
863 ksock_interface_t *iface;
864 ksock_interface_t *best_iface;
871 /* CAVEAT EMPTOR: We do all our interface matching with an
872 * exclusive hold of global lock at IRQ priority. We're only
873 * expecting to be dealing with small numbers of interfaces, so the
874 * O(n**3)-ness here shouldn't matter */
876 write_lock_bh(global_lock);
878 if (net->ksnn_ninterfaces < 2) {
879 /* Only create additional connections
880 * if I have > 1 interface */
881 write_unlock_bh(global_lock);
885 LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
887 for (i = 0; i < npeer_ipaddrs; i++) {
888 if (newroute != NULL) {
889 newroute->ksnr_ipaddr = peer_ipaddrs[i];
891 write_unlock_bh(global_lock);
893 newroute = ksocknal_create_route(peer_ipaddrs[i], port);
894 if (newroute == NULL)
897 write_lock_bh(global_lock);
900 if (peer_ni->ksnp_closing) {
901 /* peer_ni got closed under me */
905 /* Already got a route? */
907 list_for_each(rtmp, &peer_ni->ksnp_routes) {
908 route = list_entry(rtmp, ksock_route_t, ksnr_list);
910 if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
922 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
924 /* Select interface to connect from */
925 for (j = 0; j < net->ksnn_ninterfaces; j++) {
926 iface = &net->ksnn_interfaces[j];
928 /* Using this interface already? */
929 list_for_each(rtmp, &peer_ni->ksnp_routes) {
930 route = list_entry(rtmp, ksock_route_t,
933 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
941 this_netmatch = (((iface->ksni_ipaddr ^
942 newroute->ksnr_ipaddr) &
943 iface->ksni_netmask) == 0) ? 1 : 0;
945 if (!(best_iface == NULL ||
946 best_netmatch < this_netmatch ||
947 (best_netmatch == this_netmatch &&
948 best_nroutes > iface->ksni_nroutes)))
952 best_netmatch = this_netmatch;
953 best_nroutes = iface->ksni_nroutes;
956 if (best_iface == NULL)
959 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
960 best_iface->ksni_nroutes++;
962 ksocknal_add_route_locked(peer_ni, newroute);
966 write_unlock_bh(global_lock);
967 if (newroute != NULL)
968 ksocknal_route_decref(newroute);
972 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
979 rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
980 LASSERT(rc == 0); /* we succeeded before */
982 LIBCFS_ALLOC(cr, sizeof(*cr));
984 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
985 "%pI4h: memory exhausted\n", &peer_ip);
991 cr->ksncr_sock = sock;
993 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
995 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
996 wake_up(&ksocknal_data.ksnd_connd_waitq);
998 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1003 ksocknal_connecting (ksock_peer_ni_t *peer_ni, __u32 ipaddr)
1005 ksock_route_t *route;
1007 list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1008 if (route->ksnr_ipaddr == ipaddr)
1009 return route->ksnr_connecting;
1015 ksocknal_create_conn(struct lnet_ni *ni, ksock_route_t *route,
1016 struct socket *sock, int type)
1018 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1019 struct list_head zombies = LIST_HEAD_INIT(zombies);
1020 struct lnet_process_id peerid;
1021 struct list_head *tmp;
1024 ksock_conn_t *conn2;
1025 ksock_peer_ni_t *peer_ni = NULL;
1026 ksock_peer_ni_t *peer2;
1027 ksock_sched_t *sched;
1028 struct ksock_hello_msg *hello;
1037 active = (route != NULL);
1039 LASSERT (active == (type != SOCKLND_CONN_NONE));
1041 LIBCFS_ALLOC(conn, sizeof(*conn));
1047 conn->ksnc_peer = NULL;
1048 conn->ksnc_route = NULL;
1049 conn->ksnc_sock = sock;
1050 /* 2 ref, 1 for conn, another extra ref prevents socket
1051 * being closed before establishment of connection */
1052 atomic_set (&conn->ksnc_sock_refcount, 2);
1053 conn->ksnc_type = type;
1054 ksocknal_lib_save_callback(sock, conn);
1055 atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1057 conn->ksnc_rx_ready = 0;
1058 conn->ksnc_rx_scheduled = 0;
1060 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1061 conn->ksnc_tx_ready = 0;
1062 conn->ksnc_tx_scheduled = 0;
1063 conn->ksnc_tx_carrier = NULL;
1064 atomic_set (&conn->ksnc_tx_nob, 0);
1066 LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1067 kshm_ips[LNET_MAX_INTERFACES]));
1068 if (hello == NULL) {
1073 /* stash conn's local and remote addrs */
1074 rc = ksocknal_lib_get_conn_addrs (conn);
1078 /* Find out/confirm peer_ni's NID and connection type and get the
1079 * vector of interfaces she's willing to let me connect to.
1080 * Passive connections use the listener timeout since the peer_ni sends
1084 peer_ni = route->ksnr_peer;
1085 LASSERT(ni == peer_ni->ksnp_ni);
1087 /* Active connection sends HELLO eagerly */
1088 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1089 peerid = peer_ni->ksnp_id;
1091 write_lock_bh(global_lock);
1092 conn->ksnc_proto = peer_ni->ksnp_proto;
1093 write_unlock_bh(global_lock);
1095 if (conn->ksnc_proto == NULL) {
1096 conn->ksnc_proto = &ksocknal_protocol_v3x;
1097 #if SOCKNAL_VERSION_DEBUG
1098 if (*ksocknal_tunables.ksnd_protocol == 2)
1099 conn->ksnc_proto = &ksocknal_protocol_v2x;
1100 else if (*ksocknal_tunables.ksnd_protocol == 1)
1101 conn->ksnc_proto = &ksocknal_protocol_v1x;
1105 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1109 peerid.nid = LNET_NID_ANY;
1110 peerid.pid = LNET_PID_ANY;
1112 /* Passive, get protocol from peer_ni */
1113 conn->ksnc_proto = NULL;
1116 rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1120 LASSERT (rc == 0 || active);
1121 LASSERT (conn->ksnc_proto != NULL);
1122 LASSERT (peerid.nid != LNET_NID_ANY);
1124 cpt = lnet_cpt_of_nid(peerid.nid, ni);
1127 ksocknal_peer_addref(peer_ni);
1128 write_lock_bh(global_lock);
1130 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1134 write_lock_bh(global_lock);
1136 /* called with a ref on ni, so shutdown can't have started */
1137 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1139 peer2 = ksocknal_find_peer_locked(ni, peerid);
1140 if (peer2 == NULL) {
1141 /* NB this puts an "empty" peer_ni in the peer_ni
1142 * table (which takes my ref) */
1143 list_add_tail(&peer_ni->ksnp_list,
1144 ksocknal_nid2peerlist(peerid.nid));
1146 ksocknal_peer_decref(peer_ni);
1151 ksocknal_peer_addref(peer_ni);
1152 peer_ni->ksnp_accepting++;
1154 /* Am I already connecting to this guy? Resolve in
1155 * favour of higher NID... */
1156 if (peerid.nid < ni->ni_nid &&
1157 ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1159 warn = "connection race resolution";
1164 if (peer_ni->ksnp_closing ||
1165 (active && route->ksnr_deleted)) {
1166 /* peer_ni/route got closed under me */
1168 warn = "peer_ni/route removed";
1172 if (peer_ni->ksnp_proto == NULL) {
1173 /* Never connected before.
1174 * NB recv_hello may have returned EPROTO to signal my peer_ni
1175 * wants a different protocol than the one I asked for.
1177 LASSERT(list_empty(&peer_ni->ksnp_conns));
1179 peer_ni->ksnp_proto = conn->ksnc_proto;
1180 peer_ni->ksnp_incarnation = incarnation;
1183 if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1184 peer_ni->ksnp_incarnation != incarnation) {
1185 /* peer_ni rebooted or I've got the wrong protocol version */
1186 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1188 peer_ni->ksnp_proto = NULL;
1190 warn = peer_ni->ksnp_incarnation != incarnation ?
1191 "peer_ni rebooted" :
1192 "wrong proto version";
1202 warn = "lost conn race";
1205 warn = "retry with different protocol version";
1209 /* Refuse to duplicate an existing connection, unless this is a
1210 * loopback connection */
1211 if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1212 list_for_each(tmp, &peer_ni->ksnp_conns) {
1213 conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1215 if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1216 conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1217 conn2->ksnc_type != conn->ksnc_type)
1220 /* Reply on a passive connection attempt so the peer_ni
1221 * realises we're connected. */
1231 /* If the connection created by this route didn't bind to the IP
1232 * address the route connected to, the connection/route matching
1233 * code below probably isn't going to work. */
1235 route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1236 CERROR("Route %s %pI4h connected to %pI4h\n",
1237 libcfs_id2str(peer_ni->ksnp_id),
1238 &route->ksnr_ipaddr,
1239 &conn->ksnc_ipaddr);
1242 /* Search for a route corresponding to the new connection and
1243 * create an association. This allows incoming connections created
1244 * by routes in my peer_ni to match my own route entries so I don't
1245 * continually create duplicate routes. */
1246 list_for_each(tmp, &peer_ni->ksnp_routes) {
1247 route = list_entry(tmp, ksock_route_t, ksnr_list);
1249 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1252 ksocknal_associate_route_conn_locked(route, conn);
1256 conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
1257 peer_ni->ksnp_last_alive = ktime_get_real_seconds();
1258 peer_ni->ksnp_send_keepalive = 0;
1259 peer_ni->ksnp_error = 0;
1261 sched = ksocknal_choose_scheduler_locked(cpt);
1262 sched->kss_nconns++;
1263 conn->ksnc_scheduler = sched;
1265 conn->ksnc_tx_last_post = ktime_get_real_seconds();
1266 /* Set the deadline for the outgoing HELLO to drain */
1267 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1268 conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1269 smp_mb(); /* order with adding to peer_ni's conn list */
1271 list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1272 ksocknal_conn_addref(conn);
1274 ksocknal_new_packet(conn, 0);
1276 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1278 /* Take packets blocking for this connection. */
1279 list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1280 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1284 list_del(&tx->tx_list);
1285 ksocknal_queue_tx_locked(tx, conn);
1288 write_unlock_bh(global_lock);
1290 /* We've now got a new connection. Any errors from here on are just
1291 * like "normal" comms errors and we close the connection normally.
1292 * NB (a) we still have to send the reply HELLO for passive
1294 * (b) normal I/O on the conn is blocked until I setup and call the
1298 CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1299 " incarnation:%lld sched[%d:%d]\n",
1300 libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1301 &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1302 conn->ksnc_port, incarnation, cpt,
1303 (int)(sched - &sched->kss_info->ksi_scheds[0]));
1306 /* additional routes after interface exchange? */
1307 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1308 hello->kshm_ips, hello->kshm_nips);
1310 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1312 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1315 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1316 kshm_ips[LNET_MAX_INTERFACES]));
1318 /* setup the socket AFTER I've received hello (it disables
1319 * SO_LINGER). I might call back to the acceptor who may want
1320 * to send a protocol version response and then close the
1321 * socket; this ensures the socket only tears down after the
1322 * response has been sent. */
1324 rc = ksocknal_lib_setup_sock(sock);
1326 write_lock_bh(global_lock);
1328 /* NB my callbacks block while I hold ksnd_global_lock */
1329 ksocknal_lib_set_callback(sock, conn);
1332 peer_ni->ksnp_accepting--;
1334 write_unlock_bh(global_lock);
1337 write_lock_bh(global_lock);
1338 if (!conn->ksnc_closing) {
1339 /* could be closed by another thread */
1340 ksocknal_close_conn_locked(conn, rc);
1342 write_unlock_bh(global_lock);
1343 } else if (ksocknal_connsock_addref(conn) == 0) {
1344 /* Allow I/O to proceed. */
1345 ksocknal_read_callback(conn);
1346 ksocknal_write_callback(conn);
1347 ksocknal_connsock_decref(conn);
1350 ksocknal_connsock_decref(conn);
1351 ksocknal_conn_decref(conn);
1355 if (!peer_ni->ksnp_closing &&
1356 list_empty(&peer_ni->ksnp_conns) &&
1357 list_empty(&peer_ni->ksnp_routes)) {
1358 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1359 list_del_init(&peer_ni->ksnp_tx_queue);
1360 ksocknal_unlink_peer_locked(peer_ni);
1363 write_unlock_bh(global_lock);
1367 CERROR("Not creating conn %s type %d: %s\n",
1368 libcfs_id2str(peerid), conn->ksnc_type, warn);
1370 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1371 libcfs_id2str(peerid), conn->ksnc_type, warn);
1376 /* Request retry by replying with CONN_NONE
1377 * ksnc_proto has been set already */
1378 conn->ksnc_type = SOCKLND_CONN_NONE;
1379 hello->kshm_nips = 0;
1380 ksocknal_send_hello(ni, conn, peerid.nid, hello);
1383 write_lock_bh(global_lock);
1384 peer_ni->ksnp_accepting--;
1385 write_unlock_bh(global_lock);
1389 * If we get here without an error code, just use -EALREADY.
1390 * Depending on how we got here, the error may be positive
1391 * or negative. Normalize the value for ksocknal_txlist_done().
1393 rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1394 ksocknal_txlist_done(ni, &zombies, rc2);
1395 ksocknal_peer_decref(peer_ni);
1399 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1400 kshm_ips[LNET_MAX_INTERFACES]));
1402 LIBCFS_FREE(conn, sizeof(*conn));
1410 ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1412 /* This just does the immmediate housekeeping, and queues the
1413 * connection for the reaper to terminate.
1414 * Caller holds ksnd_global_lock exclusively in irq context */
1415 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1416 ksock_route_t *route;
1417 ksock_conn_t *conn2;
1418 struct list_head *tmp;
1420 LASSERT(peer_ni->ksnp_error == 0);
1421 LASSERT(!conn->ksnc_closing);
1422 conn->ksnc_closing = 1;
1424 /* ksnd_deathrow_conns takes over peer_ni's ref */
1425 list_del(&conn->ksnc_list);
1427 route = conn->ksnc_route;
1428 if (route != NULL) {
1429 /* dissociate conn from route... */
1430 LASSERT(!route->ksnr_deleted);
1431 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1434 list_for_each(tmp, &peer_ni->ksnp_conns) {
1435 conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1437 if (conn2->ksnc_route == route &&
1438 conn2->ksnc_type == conn->ksnc_type)
1444 route->ksnr_connected &= ~(1 << conn->ksnc_type);
1446 conn->ksnc_route = NULL;
1448 ksocknal_route_decref(route); /* drop conn's ref on route */
1451 if (list_empty(&peer_ni->ksnp_conns)) {
1452 /* No more connections to this peer_ni */
1454 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1457 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1459 /* throw them to the last connection...,
1460 * these TXs will be send to /dev/null by scheduler */
1461 list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1463 ksocknal_tx_prep(conn, tx);
1465 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1466 list_splice_init(&peer_ni->ksnp_tx_queue,
1467 &conn->ksnc_tx_queue);
1468 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1471 /* renegotiate protocol version */
1472 peer_ni->ksnp_proto = NULL;
1473 /* stash last conn close reason */
1474 peer_ni->ksnp_error = error;
1476 if (list_empty(&peer_ni->ksnp_routes)) {
1477 /* I've just closed last conn belonging to a
1478 * peer_ni with no routes to it */
1479 ksocknal_unlink_peer_locked(peer_ni);
1483 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1485 list_add_tail(&conn->ksnc_list,
1486 &ksocknal_data.ksnd_deathrow_conns);
1487 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1489 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1493 ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
1496 cfs_time_t last_alive = 0;
1498 /* There has been a connection failure or comms error; but I'll only
1499 * tell LNET I think the peer_ni is dead if it's to another kernel and
1500 * there are no connections or connection attempts in existence. */
1502 read_lock(&ksocknal_data.ksnd_global_lock);
1504 if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1505 list_empty(&peer_ni->ksnp_conns) &&
1506 peer_ni->ksnp_accepting == 0 &&
1507 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1509 last_alive = peer_ni->ksnp_last_alive;
1512 read_unlock(&ksocknal_data.ksnd_global_lock);
1515 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
1520 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1522 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1525 struct list_head zlist = LIST_HEAD_INIT(zlist);
1527 /* NB safe to finalize TXs because closing of socket will
1528 * abort all buffered data */
1529 LASSERT(conn->ksnc_sock == NULL);
1531 spin_lock(&peer_ni->ksnp_lock);
1533 list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1534 if (tx->tx_conn != conn)
1537 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1539 tx->tx_msg.ksm_zc_cookies[0] = 0;
1540 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1541 list_del(&tx->tx_zc_list);
1542 list_add(&tx->tx_zc_list, &zlist);
1545 spin_unlock(&peer_ni->ksnp_lock);
1547 while (!list_empty(&zlist)) {
1548 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1550 list_del(&tx->tx_zc_list);
1551 ksocknal_tx_decref(tx);
1556 ksocknal_terminate_conn(ksock_conn_t *conn)
1558 /* This gets called by the reaper (guaranteed thread context) to
1559 * disengage the socket from its callbacks and close it.
1560 * ksnc_refcount will eventually hit zero, and then the reaper will
1562 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1563 ksock_sched_t *sched = conn->ksnc_scheduler;
1566 LASSERT(conn->ksnc_closing);
1568 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1569 spin_lock_bh(&sched->kss_lock);
1571 /* a closing conn is always ready to tx */
1572 conn->ksnc_tx_ready = 1;
1574 if (!conn->ksnc_tx_scheduled &&
1575 !list_empty(&conn->ksnc_tx_queue)) {
1576 list_add_tail(&conn->ksnc_tx_list,
1577 &sched->kss_tx_conns);
1578 conn->ksnc_tx_scheduled = 1;
1579 /* extra ref for scheduler */
1580 ksocknal_conn_addref(conn);
1582 wake_up (&sched->kss_waitq);
1585 spin_unlock_bh(&sched->kss_lock);
1587 /* serialise with callbacks */
1588 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1590 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1592 /* OK, so this conn may not be completely disengaged from its
1593 * scheduler yet, but it _has_ committed to terminate... */
1594 conn->ksnc_scheduler->kss_nconns--;
1596 if (peer_ni->ksnp_error != 0) {
1597 /* peer_ni's last conn closed in error */
1598 LASSERT(list_empty(&peer_ni->ksnp_conns));
1600 peer_ni->ksnp_error = 0; /* avoid multiple notifications */
1603 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1606 ksocknal_peer_failed(peer_ni);
1608 /* The socket is closed on the final put; either here, or in
1609 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1610 * when the connection was established, this will close the socket
1611 * immediately, aborting anything buffered in it. Any hung
1612 * zero-copy transmits will therefore complete in finite time. */
1613 ksocknal_connsock_decref(conn);
1617 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1619 /* Queue the conn for the reaper to destroy */
1621 LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1622 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1624 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1625 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1627 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1631 ksocknal_destroy_conn (ksock_conn_t *conn)
1633 cfs_time_t last_rcv;
1635 /* Final coup-de-grace of the reaper */
1636 CDEBUG (D_NET, "connection %p\n", conn);
1638 LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1639 LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1640 LASSERT (conn->ksnc_sock == NULL);
1641 LASSERT (conn->ksnc_route == NULL);
1642 LASSERT (!conn->ksnc_tx_scheduled);
1643 LASSERT (!conn->ksnc_rx_scheduled);
1644 LASSERT(list_empty(&conn->ksnc_tx_queue));
1646 /* complete current receive if any */
1647 switch (conn->ksnc_rx_state) {
1648 case SOCKNAL_RX_LNET_PAYLOAD:
1649 last_rcv = conn->ksnc_rx_deadline -
1650 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1651 CERROR("Completing partial receive from %s[%d], "
1652 "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1653 "last alive is %ld secs ago\n",
1654 libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1655 &conn->ksnc_ipaddr, conn->ksnc_port,
1656 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1657 cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(),
1659 lnet_finalize(conn->ksnc_cookie, -EIO);
1661 case SOCKNAL_RX_LNET_HEADER:
1662 if (conn->ksnc_rx_started)
1663 CERROR("Incomplete receive of lnet header from %s, "
1664 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1665 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1666 &conn->ksnc_ipaddr, conn->ksnc_port,
1667 conn->ksnc_proto->pro_version);
1669 case SOCKNAL_RX_KSM_HEADER:
1670 if (conn->ksnc_rx_started)
1671 CERROR("Incomplete receive of ksock message from %s, "
1672 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1673 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1674 &conn->ksnc_ipaddr, conn->ksnc_port,
1675 conn->ksnc_proto->pro_version);
1677 case SOCKNAL_RX_SLOP:
1678 if (conn->ksnc_rx_started)
1679 CERROR("Incomplete receive of slops from %s, "
1680 "ip %pI4h:%d, with error\n",
1681 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1682 &conn->ksnc_ipaddr, conn->ksnc_port);
1689 ksocknal_peer_decref(conn->ksnc_peer);
1691 LIBCFS_FREE (conn, sizeof (*conn));
1695 ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why)
1698 struct list_head *ctmp;
1699 struct list_head *cnxt;
1702 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1703 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1706 conn->ksnc_ipaddr == ipaddr) {
1708 ksocknal_close_conn_locked (conn, why);
1716 ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1718 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1719 __u32 ipaddr = conn->ksnc_ipaddr;
1722 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1724 count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1726 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1732 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1734 ksock_peer_ni_t *peer_ni;
1735 struct list_head *ptmp;
1736 struct list_head *pnxt;
1742 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1744 if (id.nid != LNET_NID_ANY)
1745 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1748 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1751 for (i = lo; i <= hi; i++) {
1752 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1754 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
1756 if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1757 (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1760 count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1764 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1766 /* wildcards always succeed */
1767 if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1770 return (count == 0 ? -ENOENT : 0);
1774 ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
1776 /* The router is telling me she's been notified of a change in
1779 struct lnet_process_id id = {
1781 .pid = LNET_PID_ANY,
1784 CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1785 alive ? "up" : "down");
1788 /* If the gateway crashed, close all open connections... */
1789 ksocknal_close_matching_conns (id, 0);
1793 /* ...otherwise do nothing. We can only establish new connections
1794 * if we have autroutes, and these connect on demand. */
1798 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when)
1801 time64_t last_alive = 0;
1802 time64_t now = ktime_get_real_seconds();
1803 ksock_peer_ni_t *peer_ni = NULL;
1804 rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1805 struct lnet_process_id id = {
1807 .pid = LNET_PID_LUSTRE,
1812 peer_ni = ksocknal_find_peer_locked(ni, id);
1813 if (peer_ni != NULL) {
1814 struct list_head *tmp;
1818 list_for_each(tmp, &peer_ni->ksnp_conns) {
1819 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1820 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1822 if (bufnob < conn->ksnc_tx_bufnob) {
1823 /* something got ACKed */
1824 conn->ksnc_tx_deadline =
1825 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1826 peer_ni->ksnp_last_alive = now;
1827 conn->ksnc_tx_bufnob = bufnob;
1831 last_alive = peer_ni->ksnp_last_alive;
1832 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1838 if (last_alive != 0)
1841 CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago, connect %d\n",
1842 libcfs_nid2str(nid), peer_ni,
1843 last_alive ? cfs_duration_sec(now - last_alive) : -1,
1849 ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1851 write_lock_bh(glock);
1853 peer_ni = ksocknal_find_peer_locked(ni, id);
1854 if (peer_ni != NULL)
1855 ksocknal_launch_all_connections_locked(peer_ni);
1857 write_unlock_bh(glock);
1862 ksocknal_push_peer (ksock_peer_ni_t *peer_ni)
1866 struct list_head *tmp;
1869 for (index = 0; ; index++) {
1870 read_lock(&ksocknal_data.ksnd_global_lock);
1875 list_for_each(tmp, &peer_ni->ksnp_conns) {
1877 conn = list_entry(tmp, ksock_conn_t,
1879 ksocknal_conn_addref(conn);
1884 read_unlock(&ksocknal_data.ksnd_global_lock);
1889 ksocknal_lib_push_conn (conn);
1890 ksocknal_conn_decref(conn);
1895 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1897 struct list_head *start;
1898 struct list_head *end;
1899 struct list_head *tmp;
1901 unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
1903 if (id.nid == LNET_NID_ANY) {
1904 start = &ksocknal_data.ksnd_peers[0];
1905 end = &ksocknal_data.ksnd_peers[hsize - 1];
1907 start = end = ksocknal_nid2peerlist(id.nid);
1910 for (tmp = start; tmp <= end; tmp++) {
1911 int peer_off; /* searching offset in peer_ni hash table */
1913 for (peer_off = 0; ; peer_off++) {
1914 ksock_peer_ni_t *peer_ni;
1917 read_lock(&ksocknal_data.ksnd_global_lock);
1918 list_for_each_entry(peer_ni, tmp, ksnp_list) {
1919 if (!((id.nid == LNET_NID_ANY ||
1920 id.nid == peer_ni->ksnp_id.nid) &&
1921 (id.pid == LNET_PID_ANY ||
1922 id.pid == peer_ni->ksnp_id.pid)))
1925 if (i++ == peer_off) {
1926 ksocknal_peer_addref(peer_ni);
1930 read_unlock(&ksocknal_data.ksnd_global_lock);
1932 if (i == 0) /* no match */
1936 ksocknal_push_peer(peer_ni);
1937 ksocknal_peer_decref(peer_ni);
1944 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1946 ksock_net_t *net = ni->ni_data;
1947 ksock_interface_t *iface;
1951 struct list_head *ptmp;
1952 ksock_peer_ni_t *peer_ni;
1953 struct list_head *rtmp;
1954 ksock_route_t *route;
1956 if (ipaddress == 0 ||
1960 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1962 iface = ksocknal_ip2iface(ni, ipaddress);
1963 if (iface != NULL) {
1964 /* silently ignore dups */
1966 } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1969 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1971 iface->ksni_ipaddr = ipaddress;
1972 iface->ksni_netmask = netmask;
1973 iface->ksni_nroutes = 0;
1974 iface->ksni_npeers = 0;
1976 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1977 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1978 peer_ni = list_entry(ptmp, ksock_peer_ni_t,
1981 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1982 if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1983 iface->ksni_npeers++;
1985 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1986 route = list_entry(rtmp,
1990 if (route->ksnr_myipaddr == ipaddress)
1991 iface->ksni_nroutes++;
1997 /* NB only new connections will pay attention to the new interface! */
2000 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2006 ksocknal_peer_del_interface_locked(ksock_peer_ni_t *peer_ni, __u32 ipaddr)
2008 struct list_head *tmp;
2009 struct list_head *nxt;
2010 ksock_route_t *route;
2015 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2016 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2017 for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2018 peer_ni->ksnp_passive_ips[j-1] =
2019 peer_ni->ksnp_passive_ips[j];
2020 peer_ni->ksnp_n_passive_ips--;
2024 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2025 route = list_entry(tmp, ksock_route_t, ksnr_list);
2027 if (route->ksnr_myipaddr != ipaddr)
2030 if (route->ksnr_share_count != 0) {
2031 /* Manually created; keep, but unbind */
2032 route->ksnr_myipaddr = 0;
2034 ksocknal_del_route_locked(route);
2038 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2039 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2041 if (conn->ksnc_myipaddr == ipaddr)
2042 ksocknal_close_conn_locked (conn, 0);
2047 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2049 ksock_net_t *net = ni->ni_data;
2051 struct list_head *tmp;
2052 struct list_head *nxt;
2053 ksock_peer_ni_t *peer_ni;
2058 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2060 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2061 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2063 if (!(ipaddress == 0 ||
2064 ipaddress == this_ip))
2069 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2070 net->ksnn_interfaces[j-1] =
2071 net->ksnn_interfaces[j];
2073 net->ksnn_ninterfaces--;
2075 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2076 list_for_each_safe(tmp, nxt,
2077 &ksocknal_data.ksnd_peers[j]) {
2078 peer_ni = list_entry(tmp, ksock_peer_ni_t,
2081 if (peer_ni->ksnp_ni != ni)
2084 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2089 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2095 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2097 struct lnet_process_id id = {0};
2098 struct libcfs_ioctl_data *data = arg;
2102 case IOC_LIBCFS_GET_INTERFACE: {
2103 ksock_net_t *net = ni->ni_data;
2104 ksock_interface_t *iface;
2106 read_lock(&ksocknal_data.ksnd_global_lock);
2108 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2112 iface = &net->ksnn_interfaces[data->ioc_count];
2114 data->ioc_u32[0] = iface->ksni_ipaddr;
2115 data->ioc_u32[1] = iface->ksni_netmask;
2116 data->ioc_u32[2] = iface->ksni_npeers;
2117 data->ioc_u32[3] = iface->ksni_nroutes;
2120 read_unlock(&ksocknal_data.ksnd_global_lock);
2124 case IOC_LIBCFS_ADD_INTERFACE:
2125 return ksocknal_add_interface(ni,
2126 data->ioc_u32[0], /* IP address */
2127 data->ioc_u32[1]); /* net mask */
2129 case IOC_LIBCFS_DEL_INTERFACE:
2130 return ksocknal_del_interface(ni,
2131 data->ioc_u32[0]); /* IP address */
2133 case IOC_LIBCFS_GET_PEER: {
2138 int share_count = 0;
2140 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2141 &id, &myip, &ip, &port,
2142 &conn_count, &share_count);
2146 data->ioc_nid = id.nid;
2147 data->ioc_count = share_count;
2148 data->ioc_u32[0] = ip;
2149 data->ioc_u32[1] = port;
2150 data->ioc_u32[2] = myip;
2151 data->ioc_u32[3] = conn_count;
2152 data->ioc_u32[4] = id.pid;
2156 case IOC_LIBCFS_ADD_PEER:
2157 id.nid = data->ioc_nid;
2158 id.pid = LNET_PID_LUSTRE;
2159 return ksocknal_add_peer (ni, id,
2160 data->ioc_u32[0], /* IP */
2161 data->ioc_u32[1]); /* port */
2163 case IOC_LIBCFS_DEL_PEER:
2164 id.nid = data->ioc_nid;
2165 id.pid = LNET_PID_ANY;
2166 return ksocknal_del_peer (ni, id,
2167 data->ioc_u32[0]); /* IP */
2169 case IOC_LIBCFS_GET_CONN: {
2173 ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2178 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2180 data->ioc_count = txmem;
2181 data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
2182 data->ioc_flags = nagle;
2183 data->ioc_u32[0] = conn->ksnc_ipaddr;
2184 data->ioc_u32[1] = conn->ksnc_port;
2185 data->ioc_u32[2] = conn->ksnc_myipaddr;
2186 data->ioc_u32[3] = conn->ksnc_type;
2187 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2188 data->ioc_u32[5] = rxmem;
2189 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2190 ksocknal_conn_decref(conn);
2194 case IOC_LIBCFS_CLOSE_CONNECTION:
2195 id.nid = data->ioc_nid;
2196 id.pid = LNET_PID_ANY;
2197 return ksocknal_close_matching_conns (id,
2200 case IOC_LIBCFS_REGISTER_MYNID:
2201 /* Ignore if this is a noop */
2202 if (data->ioc_nid == ni->ni_nid)
2205 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2206 libcfs_nid2str(data->ioc_nid),
2207 libcfs_nid2str(ni->ni_nid));
2210 case IOC_LIBCFS_PUSH_CONNECTION:
2211 id.nid = data->ioc_nid;
2212 id.pid = LNET_PID_ANY;
2213 return ksocknal_push(ni, id);
2222 ksocknal_free_buffers (void)
2224 LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2226 if (ksocknal_data.ksnd_sched_info != NULL) {
2227 struct ksock_sched_info *info;
2230 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2231 if (info->ksi_scheds != NULL) {
2232 LIBCFS_FREE(info->ksi_scheds,
2233 info->ksi_nthreads_max *
2234 sizeof(info->ksi_scheds[0]));
2237 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2240 LIBCFS_FREE (ksocknal_data.ksnd_peers,
2241 sizeof(struct list_head) *
2242 ksocknal_data.ksnd_peer_hash_size);
2244 spin_lock(&ksocknal_data.ksnd_tx_lock);
2246 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2247 struct list_head zlist;
2250 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2251 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2252 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2254 while (!list_empty(&zlist)) {
2255 tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2256 list_del(&tx->tx_list);
2257 LIBCFS_FREE(tx, tx->tx_desc_size);
2260 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2265 ksocknal_base_shutdown(void)
2267 struct ksock_sched_info *info;
2268 ksock_sched_t *sched;
2272 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2273 atomic_read (&libcfs_kmemory));
2274 LASSERT (ksocknal_data.ksnd_nnets == 0);
2276 switch (ksocknal_data.ksnd_init) {
2280 case SOCKNAL_INIT_ALL:
2281 case SOCKNAL_INIT_DATA:
2282 LASSERT (ksocknal_data.ksnd_peers != NULL);
2283 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2284 LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2287 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2288 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2289 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2290 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2291 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2293 if (ksocknal_data.ksnd_sched_info != NULL) {
2294 cfs_percpt_for_each(info, i,
2295 ksocknal_data.ksnd_sched_info) {
2296 if (info->ksi_scheds == NULL)
2299 for (j = 0; j < info->ksi_nthreads_max; j++) {
2301 sched = &info->ksi_scheds[j];
2302 LASSERT(list_empty(&sched->\
2304 LASSERT(list_empty(&sched->\
2306 LASSERT(list_empty(&sched-> \
2307 kss_zombie_noop_txs));
2308 LASSERT(sched->kss_nconns == 0);
2313 /* flag threads to terminate; wake and wait for them to die */
2314 ksocknal_data.ksnd_shuttingdown = 1;
2315 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2316 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2318 if (ksocknal_data.ksnd_sched_info != NULL) {
2319 cfs_percpt_for_each(info, i,
2320 ksocknal_data.ksnd_sched_info) {
2321 if (info->ksi_scheds == NULL)
2324 for (j = 0; j < info->ksi_nthreads_max; j++) {
2325 sched = &info->ksi_scheds[j];
2326 wake_up_all(&sched->kss_waitq);
2332 read_lock(&ksocknal_data.ksnd_global_lock);
2333 while (ksocknal_data.ksnd_nthreads != 0) {
2336 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2337 "waiting for %d threads to terminate\n",
2338 ksocknal_data.ksnd_nthreads);
2339 read_unlock(&ksocknal_data.ksnd_global_lock);
2340 set_current_state(TASK_UNINTERRUPTIBLE);
2341 schedule_timeout(cfs_time_seconds(1));
2342 read_lock(&ksocknal_data.ksnd_global_lock);
2344 read_unlock(&ksocknal_data.ksnd_global_lock);
2346 ksocknal_free_buffers();
2348 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2352 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2353 atomic_read (&libcfs_kmemory));
2355 module_put(THIS_MODULE);
2359 ksocknal_base_startup(void)
2361 struct ksock_sched_info *info;
2365 LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2366 LASSERT (ksocknal_data.ksnd_nnets == 0);
2368 memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2370 ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2371 LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2372 sizeof(struct list_head) *
2373 ksocknal_data.ksnd_peer_hash_size);
2374 if (ksocknal_data.ksnd_peers == NULL)
2377 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2378 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2380 rwlock_init(&ksocknal_data.ksnd_global_lock);
2381 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2383 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2384 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2385 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2386 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2387 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2389 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2390 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2391 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2392 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2394 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2395 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2397 /* NB memset above zeros whole of ksocknal_data */
2399 /* flag lists/ptrs/locks initialised */
2400 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2401 try_module_get(THIS_MODULE);
2403 ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2405 if (ksocknal_data.ksnd_sched_info == NULL)
2408 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2409 ksock_sched_t *sched;
2412 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2413 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2414 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2416 /* max to half of CPUs, assume another half should be
2417 * reserved for upper layer modules */
2418 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2421 info->ksi_nthreads_max = nthrs;
2424 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2425 info->ksi_nthreads_max * sizeof(*sched));
2426 if (info->ksi_scheds == NULL)
2429 for (; nthrs > 0; nthrs--) {
2430 sched = &info->ksi_scheds[nthrs - 1];
2432 sched->kss_info = info;
2433 spin_lock_init(&sched->kss_lock);
2434 INIT_LIST_HEAD(&sched->kss_rx_conns);
2435 INIT_LIST_HEAD(&sched->kss_tx_conns);
2436 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2437 init_waitqueue_head(&sched->kss_waitq);
2441 ksocknal_data.ksnd_connd_starting = 0;
2442 ksocknal_data.ksnd_connd_failed_stamp = 0;
2443 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2444 /* must have at least 2 connds to remain responsive to accepts while
2446 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2447 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2449 if (*ksocknal_tunables.ksnd_nconnds_max <
2450 *ksocknal_tunables.ksnd_nconnds) {
2451 ksocknal_tunables.ksnd_nconnds_max =
2452 ksocknal_tunables.ksnd_nconnds;
2455 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2457 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2458 ksocknal_data.ksnd_connd_starting++;
2459 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2462 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2463 rc = ksocknal_thread_start(ksocknal_connd,
2464 (void *)((uintptr_t)i), name);
2466 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2467 ksocknal_data.ksnd_connd_starting--;
2468 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2469 CERROR("Can't spawn socknal connd: %d\n", rc);
2474 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2476 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2480 /* flag everything initialised */
2481 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2486 ksocknal_base_shutdown();
2491 ksocknal_debug_peerhash(struct lnet_ni *ni)
2493 ksock_peer_ni_t *peer_ni = NULL;
2494 struct list_head *tmp;
2497 read_lock(&ksocknal_data.ksnd_global_lock);
2499 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2500 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2501 peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
2503 if (peer_ni->ksnp_ni == ni) break;
2509 if (peer_ni != NULL) {
2510 ksock_route_t *route;
2513 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2514 "closing %d, accepting %d, err %d, zcookie %llu, "
2515 "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2516 atomic_read(&peer_ni->ksnp_refcount),
2517 peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2518 peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2519 peer_ni->ksnp_zc_next_cookie,
2520 !list_empty(&peer_ni->ksnp_tx_queue),
2521 !list_empty(&peer_ni->ksnp_zc_req_list));
2523 list_for_each(tmp, &peer_ni->ksnp_routes) {
2524 route = list_entry(tmp, ksock_route_t, ksnr_list);
2525 CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2526 "del %d\n", atomic_read(&route->ksnr_refcount),
2527 route->ksnr_scheduled, route->ksnr_connecting,
2528 route->ksnr_connected, route->ksnr_deleted);
2531 list_for_each(tmp, &peer_ni->ksnp_conns) {
2532 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2533 CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2534 atomic_read(&conn->ksnc_conn_refcount),
2535 atomic_read(&conn->ksnc_sock_refcount),
2536 conn->ksnc_type, conn->ksnc_closing);
2540 read_unlock(&ksocknal_data.ksnd_global_lock);
2545 ksocknal_shutdown(struct lnet_ni *ni)
2547 ksock_net_t *net = ni->ni_data;
2548 struct lnet_process_id anyid = {
2549 .nid = LNET_NID_ANY,
2550 .pid = LNET_PID_ANY,
2554 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2555 LASSERT(ksocknal_data.ksnd_nnets > 0);
2557 spin_lock_bh(&net->ksnn_lock);
2558 net->ksnn_shutdown = 1; /* prevent new peers */
2559 spin_unlock_bh(&net->ksnn_lock);
2561 /* Delete all peers */
2562 ksocknal_del_peer(ni, anyid, 0);
2564 /* Wait for all peer_ni state to clean up */
2566 spin_lock_bh(&net->ksnn_lock);
2567 while (net->ksnn_npeers != 0) {
2568 spin_unlock_bh(&net->ksnn_lock);
2571 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2572 "waiting for %d peers to disconnect\n",
2574 set_current_state(TASK_UNINTERRUPTIBLE);
2575 schedule_timeout(cfs_time_seconds(1));
2577 ksocknal_debug_peerhash(ni);
2579 spin_lock_bh(&net->ksnn_lock);
2581 spin_unlock_bh(&net->ksnn_lock);
2583 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2584 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2585 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2588 list_del(&net->ksnn_list);
2589 LIBCFS_FREE(net, sizeof(*net));
2591 ksocknal_data.ksnd_nnets--;
2592 if (ksocknal_data.ksnd_nnets == 0)
2593 ksocknal_base_shutdown();
2597 ksocknal_enumerate_interfaces(ksock_net_t *net)
2605 n = lnet_ipif_enumerate(&names);
2607 CERROR("Can't enumerate interfaces: %d\n", n);
2611 for (i = j = 0; i < n; i++) {
2616 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2619 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2621 CWARN("Can't get interface %s info: %d\n",
2627 CWARN("Ignoring interface %s (down)\n",
2632 if (j == LNET_MAX_INTERFACES) {
2633 CWARN("Ignoring interface %s (too many interfaces)\n",
2638 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2639 net->ksnn_interfaces[j].ksni_netmask = mask;
2640 strlcpy(net->ksnn_interfaces[j].ksni_name,
2641 names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2645 lnet_ipif_free_enumeration(names, n);
2648 CERROR("Can't find any usable interfaces\n");
2654 ksocknal_search_new_ipif(ksock_net_t *net)
2659 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2660 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2661 char *colon = strchr(ifnam, ':');
2666 if (colon != NULL) /* ignore alias device */
2669 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2671 for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2672 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2674 char *colon2 = strchr(ifnam2, ':');
2679 found = strcmp(ifnam, ifnam2) == 0;
2696 ksocknal_start_schedulers(struct ksock_sched_info *info)
2702 if (info->ksi_nthreads == 0) {
2703 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2704 nthrs = info->ksi_nthreads_max;
2706 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2708 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2709 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2711 nthrs = min(nthrs, info->ksi_nthreads_max);
2713 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2714 /* increase two threads if there is new interface */
2715 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2718 for (i = 0; i < nthrs; i++) {
2721 ksock_sched_t *sched;
2722 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2723 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2724 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2725 info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2727 rc = ksocknal_thread_start(ksocknal_scheduler,
2732 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2733 info->ksi_cpt, info->ksi_nthreads + i, rc);
2737 info->ksi_nthreads += i;
2742 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2744 int newif = ksocknal_search_new_ipif(net);
2748 if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2751 for (i = 0; i < ncpts; i++) {
2752 struct ksock_sched_info *info;
2753 int cpt = (cpts == NULL) ? i : cpts[i];
2755 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2756 info = ksocknal_data.ksnd_sched_info[cpt];
2758 if (!newif && info->ksi_nthreads > 0)
2761 rc = ksocknal_start_schedulers(info);
2769 ksocknal_startup(struct lnet_ni *ni)
2774 struct net_device *net_dev;
2777 LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2779 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2780 rc = ksocknal_base_startup();
2785 LIBCFS_ALLOC(net, sizeof(*net));
2789 spin_lock_init(&net->ksnn_lock);
2790 net->ksnn_incarnation = ktime_get_real_ns();
2792 if (!ni->ni_net->net_tunables_set) {
2793 ni->ni_net->net_tunables.lct_peer_timeout =
2794 *ksocknal_tunables.ksnd_peertimeout;
2795 ni->ni_net->net_tunables.lct_max_tx_credits =
2796 *ksocknal_tunables.ksnd_credits;
2797 ni->ni_net->net_tunables.lct_peer_tx_credits =
2798 *ksocknal_tunables.ksnd_peertxcredits;
2799 ni->ni_net->net_tunables.lct_peer_rtr_credits =
2800 *ksocknal_tunables.ksnd_peerrtrcredits;
2801 ni->ni_net->net_tunables_set = true;
2805 if (ni->ni_interfaces[0] == NULL) {
2806 rc = ksocknal_enumerate_interfaces(net);
2810 net->ksnn_ninterfaces = 1;
2812 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2815 if (ni->ni_interfaces[i] == NULL)
2818 rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2819 &net->ksnn_interfaces[i].ksni_ipaddr,
2820 &net->ksnn_interfaces[i].ksni_netmask);
2823 CERROR("Can't get interface %s info: %d\n",
2824 ni->ni_interfaces[i], rc);
2829 CERROR("Interface %s is down\n",
2830 ni->ni_interfaces[i]);
2834 strlcpy(net->ksnn_interfaces[i].ksni_name,
2835 ni->ni_interfaces[i],
2836 sizeof(net->ksnn_interfaces[i].ksni_name));
2839 net->ksnn_ninterfaces = i;
2842 net_dev = dev_get_by_name(&init_net,
2843 net->ksnn_interfaces[0].ksni_name);
2844 if (net_dev != NULL) {
2845 node_id = dev_to_node(&net_dev->dev);
2846 ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
2849 ni->ni_dev_cpt = CFS_CPT_ANY;
2852 /* call it before add it to ksocknal_data.ksnd_nets */
2853 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2857 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2858 net->ksnn_interfaces[0].ksni_ipaddr);
2859 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2861 ksocknal_data.ksnd_nnets++;
2866 LIBCFS_FREE(net, sizeof(*net));
2868 if (ksocknal_data.ksnd_nnets == 0)
2869 ksocknal_base_shutdown();
2875 static void __exit ksocklnd_exit(void)
2877 lnet_unregister_lnd(&the_ksocklnd);
2880 static int __init ksocklnd_init(void)
2884 /* check ksnr_connected/connecting field large enough */
2885 CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2886 CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2888 /* initialize the_ksocklnd */
2889 the_ksocklnd.lnd_type = SOCKLND;
2890 the_ksocklnd.lnd_startup = ksocknal_startup;
2891 the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2892 the_ksocklnd.lnd_ctl = ksocknal_ctl;
2893 the_ksocklnd.lnd_send = ksocknal_send;
2894 the_ksocklnd.lnd_recv = ksocknal_recv;
2895 the_ksocklnd.lnd_notify = ksocknal_notify;
2896 the_ksocklnd.lnd_query = ksocknal_query;
2897 the_ksocklnd.lnd_accept = ksocknal_accept;
2899 rc = ksocknal_tunables_init();
2903 lnet_register_lnd(&the_ksocklnd);
2908 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2909 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2910 MODULE_VERSION("2.8.0");
2911 MODULE_LICENSE("GPL");
2913 module_init(ksocklnd_init);
2914 module_exit(ksocklnd_exit);