4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/socklnd/socklnd.c
34 * Author: Zach Brown <zab@zabbo.net>
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eric@bartonsoftware.com>
41 #include <linux/inetdevice.h>
43 static const struct lnet_lnd the_ksocklnd;
44 struct ksock_nal_data ksocknal_data;
46 static struct ksock_interface *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
49 struct ksock_net *net = ni->ni_data;
51 struct ksock_interface *iface;
53 for (i = 0; i < net->ksnn_ninterfaces; i++) {
54 LASSERT(i < LNET_INTERFACES_NUM);
55 iface = &net->ksnn_interfaces[i];
57 if (iface->ksni_ipaddr == ip)
64 static struct ksock_route *
65 ksocknal_create_route(__u32 ipaddr, int port)
67 struct ksock_route *route;
69 LIBCFS_ALLOC (route, sizeof (*route));
73 atomic_set (&route->ksnr_refcount, 1);
74 route->ksnr_peer = NULL;
75 route->ksnr_retry_interval = 0; /* OK to connect at any time */
76 route->ksnr_ipaddr = ipaddr;
77 route->ksnr_port = port;
78 route->ksnr_scheduled = 0;
79 route->ksnr_connecting = 0;
80 route->ksnr_connected = 0;
81 route->ksnr_deleted = 0;
82 route->ksnr_conn_count = 0;
83 route->ksnr_share_count = 0;
89 ksocknal_destroy_route(struct ksock_route *route)
91 LASSERT (atomic_read(&route->ksnr_refcount) == 0);
93 if (route->ksnr_peer != NULL)
94 ksocknal_peer_decref(route->ksnr_peer);
96 LIBCFS_FREE (route, sizeof (*route));
99 static struct ksock_peer_ni *
100 ksocknal_create_peer(struct lnet_ni *ni, struct lnet_process_id id)
102 int cpt = lnet_cpt_of_nid(id.nid, ni);
103 struct ksock_net *net = ni->ni_data;
104 struct ksock_peer_ni *peer_ni;
106 LASSERT(id.nid != LNET_NID_ANY);
107 LASSERT(id.pid != LNET_PID_ANY);
108 LASSERT(!in_interrupt());
110 if (!atomic_inc_unless_negative(&net->ksnn_npeers)) {
111 CERROR("Can't create peer_ni: network shutdown\n");
112 return ERR_PTR(-ESHUTDOWN);
115 LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
117 atomic_dec(&net->ksnn_npeers);
118 return ERR_PTR(-ENOMEM);
121 peer_ni->ksnp_ni = ni;
122 peer_ni->ksnp_id = id;
123 atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
124 peer_ni->ksnp_closing = 0;
125 peer_ni->ksnp_accepting = 0;
126 peer_ni->ksnp_proto = NULL;
127 peer_ni->ksnp_last_alive = 0;
128 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
130 INIT_LIST_HEAD(&peer_ni->ksnp_conns);
131 INIT_LIST_HEAD(&peer_ni->ksnp_routes);
132 INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
133 INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
134 spin_lock_init(&peer_ni->ksnp_lock);
140 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
142 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
144 CDEBUG (D_NET, "peer_ni %s %p deleted\n",
145 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
147 LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
148 LASSERT(peer_ni->ksnp_accepting == 0);
149 LASSERT(list_empty(&peer_ni->ksnp_conns));
150 LASSERT(list_empty(&peer_ni->ksnp_routes));
151 LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
152 LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
154 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
156 /* NB a peer_ni's connections and routes keep a reference on their
157 * peer_ni until they are destroyed, so we can be assured that _all_
158 * state to do with this peer_ni has been cleaned up when its refcount
161 atomic_dec(&net->ksnn_npeers);
164 struct ksock_peer_ni *
165 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
167 struct ksock_peer_ni *peer_ni;
169 hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni,
171 LASSERT(!peer_ni->ksnp_closing);
173 if (peer_ni->ksnp_ni != ni)
176 if (peer_ni->ksnp_id.nid != id.nid ||
177 peer_ni->ksnp_id.pid != id.pid)
180 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
181 peer_ni, libcfs_id2str(id),
182 atomic_read(&peer_ni->ksnp_refcount));
188 struct ksock_peer_ni *
189 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
191 struct ksock_peer_ni *peer_ni;
193 read_lock(&ksocknal_data.ksnd_global_lock);
194 peer_ni = ksocknal_find_peer_locked(ni, id);
195 if (peer_ni != NULL) /* +1 ref for caller? */
196 ksocknal_peer_addref(peer_ni);
197 read_unlock(&ksocknal_data.ksnd_global_lock);
203 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
207 struct ksock_interface *iface;
209 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
210 LASSERT(i < LNET_INTERFACES_NUM);
211 ip = peer_ni->ksnp_passive_ips[i];
213 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
215 * All IPs in peer_ni->ksnp_passive_ips[] come from the
216 * interface list, therefore the call must succeed.
218 LASSERT(iface != NULL);
220 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
221 peer_ni, iface, iface->ksni_nroutes);
222 iface->ksni_npeers--;
225 LASSERT(list_empty(&peer_ni->ksnp_conns));
226 LASSERT(list_empty(&peer_ni->ksnp_routes));
227 LASSERT(!peer_ni->ksnp_closing);
228 peer_ni->ksnp_closing = 1;
229 hlist_del(&peer_ni->ksnp_list);
230 /* lose peerlist's ref */
231 ksocknal_peer_decref(peer_ni);
235 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
236 struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
237 int *port, int *conn_count, int *share_count)
239 struct ksock_peer_ni *peer_ni;
240 struct ksock_route *route;
241 struct list_head *rtmp;
246 read_lock(&ksocknal_data.ksnd_global_lock);
248 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
250 if (peer_ni->ksnp_ni != ni)
253 if (peer_ni->ksnp_n_passive_ips == 0 &&
254 list_empty(&peer_ni->ksnp_routes)) {
258 *id = peer_ni->ksnp_id;
268 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
272 *id = peer_ni->ksnp_id;
273 *myip = peer_ni->ksnp_passive_ips[j];
282 list_for_each(rtmp, &peer_ni->ksnp_routes) {
286 route = list_entry(rtmp, struct ksock_route,
289 *id = peer_ni->ksnp_id;
290 *myip = route->ksnr_myipaddr;
291 *peer_ip = route->ksnr_ipaddr;
292 *port = route->ksnr_port;
293 *conn_count = route->ksnr_conn_count;
294 *share_count = route->ksnr_share_count;
300 read_unlock(&ksocknal_data.ksnd_global_lock);
305 ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
307 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
308 int type = conn->ksnc_type;
309 struct ksock_interface *iface;
311 conn->ksnc_route = route;
312 ksocknal_route_addref(route);
314 if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
315 if (route->ksnr_myipaddr == 0) {
316 /* route wasn't bound locally yet (the initial route) */
317 CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
318 libcfs_id2str(peer_ni->ksnp_id),
320 &conn->ksnc_myipaddr);
322 CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
323 "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
325 &route->ksnr_myipaddr,
326 &conn->ksnc_myipaddr);
328 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
329 route->ksnr_myipaddr);
331 iface->ksni_nroutes--;
333 route->ksnr_myipaddr = conn->ksnc_myipaddr;
334 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
335 route->ksnr_myipaddr);
337 iface->ksni_nroutes++;
340 route->ksnr_connected |= (1<<type);
341 route->ksnr_conn_count++;
343 /* Successful connection => further attempts can
344 * proceed immediately */
345 route->ksnr_retry_interval = 0;
349 ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
351 struct list_head *tmp;
352 struct ksock_conn *conn;
353 struct ksock_route *route2;
355 LASSERT(!peer_ni->ksnp_closing);
356 LASSERT(route->ksnr_peer == NULL);
357 LASSERT(!route->ksnr_scheduled);
358 LASSERT(!route->ksnr_connecting);
359 LASSERT(route->ksnr_connected == 0);
361 /* LASSERT(unique) */
362 list_for_each(tmp, &peer_ni->ksnp_routes) {
363 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
365 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
366 CERROR("Duplicate route %s %pI4h\n",
367 libcfs_id2str(peer_ni->ksnp_id),
368 &route->ksnr_ipaddr);
373 route->ksnr_peer = peer_ni;
374 ksocknal_peer_addref(peer_ni);
375 /* peer_ni's routelist takes over my ref on 'route' */
376 list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
378 list_for_each(tmp, &peer_ni->ksnp_conns) {
379 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
381 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
384 ksocknal_associate_route_conn_locked(route, conn);
385 /* keep going (typed routes) */
390 ksocknal_del_route_locked(struct ksock_route *route)
392 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
393 struct ksock_interface *iface;
394 struct ksock_conn *conn;
395 struct list_head *ctmp;
396 struct list_head *cnxt;
398 LASSERT(!route->ksnr_deleted);
400 /* Close associated conns */
401 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
402 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
404 if (conn->ksnc_route != route)
407 ksocknal_close_conn_locked(conn, 0);
410 if (route->ksnr_myipaddr != 0) {
411 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
412 route->ksnr_myipaddr);
414 iface->ksni_nroutes--;
417 route->ksnr_deleted = 1;
418 list_del(&route->ksnr_list);
419 ksocknal_route_decref(route); /* drop peer_ni's ref */
421 if (list_empty(&peer_ni->ksnp_routes) &&
422 list_empty(&peer_ni->ksnp_conns)) {
423 /* I've just removed the last route to a peer_ni with no active
425 ksocknal_unlink_peer_locked(peer_ni);
430 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
433 struct list_head *tmp;
434 struct ksock_peer_ni *peer_ni;
435 struct ksock_peer_ni *peer2;
436 struct ksock_route *route;
437 struct ksock_route *route2;
439 if (id.nid == LNET_NID_ANY ||
440 id.pid == LNET_PID_ANY)
443 /* Have a brand new peer_ni ready... */
444 peer_ni = ksocknal_create_peer(ni, id);
446 return PTR_ERR(peer_ni);
448 route = ksocknal_create_route (ipaddr, port);
450 ksocknal_peer_decref(peer_ni);
454 write_lock_bh(&ksocknal_data.ksnd_global_lock);
456 /* always called with a ref on ni, so shutdown can't have started */
457 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers)
460 peer2 = ksocknal_find_peer_locked(ni, id);
462 ksocknal_peer_decref(peer_ni);
465 /* peer_ni table takes my ref on peer_ni */
466 hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list, id.nid);
470 list_for_each(tmp, &peer_ni->ksnp_routes) {
471 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
473 if (route2->ksnr_ipaddr == ipaddr)
478 if (route2 == NULL) {
479 ksocknal_add_route_locked(peer_ni, route);
480 route->ksnr_share_count++;
482 ksocknal_route_decref(route);
483 route2->ksnr_share_count++;
486 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
492 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
494 struct ksock_conn *conn;
495 struct ksock_route *route;
496 struct list_head *tmp;
497 struct list_head *nxt;
500 LASSERT(!peer_ni->ksnp_closing);
502 /* Extra ref prevents peer_ni disappearing until I'm done with it */
503 ksocknal_peer_addref(peer_ni);
505 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
506 route = list_entry(tmp, struct ksock_route, ksnr_list);
509 if (!(ip == 0 || route->ksnr_ipaddr == ip))
512 route->ksnr_share_count = 0;
513 /* This deletes associated conns too */
514 ksocknal_del_route_locked(route);
518 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
519 route = list_entry(tmp, struct ksock_route, ksnr_list);
520 nshared += route->ksnr_share_count;
524 /* remove everything else if there are no explicit entries
527 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
528 route = list_entry(tmp, struct ksock_route, ksnr_list);
530 /* we should only be removing auto-entries */
531 LASSERT(route->ksnr_share_count == 0);
532 ksocknal_del_route_locked(route);
535 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
536 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
538 ksocknal_close_conn_locked(conn, 0);
542 ksocknal_peer_decref(peer_ni);
543 /* NB peer_ni unlinks itself when last conn/route is removed */
547 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
550 struct hlist_node *pnxt;
551 struct ksock_peer_ni *peer_ni;
557 write_lock_bh(&ksocknal_data.ksnd_global_lock);
559 if (id.nid != LNET_NID_ANY) {
560 lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
564 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
567 for (i = lo; i <= hi; i++) {
568 hlist_for_each_entry_safe(peer_ni, pnxt,
569 &ksocknal_data.ksnd_peers[i],
571 if (peer_ni->ksnp_ni != ni)
574 if (!((id.nid == LNET_NID_ANY ||
575 peer_ni->ksnp_id.nid == id.nid) &&
576 (id.pid == LNET_PID_ANY ||
577 peer_ni->ksnp_id.pid == id.pid)))
580 ksocknal_peer_addref(peer_ni); /* a ref for me... */
582 ksocknal_del_peer_locked(peer_ni, ip);
584 if (peer_ni->ksnp_closing &&
585 !list_empty(&peer_ni->ksnp_tx_queue)) {
586 LASSERT(list_empty(&peer_ni->ksnp_conns));
587 LASSERT(list_empty(&peer_ni->ksnp_routes));
589 list_splice_init(&peer_ni->ksnp_tx_queue,
593 ksocknal_peer_decref(peer_ni); /* ...till here */
595 rc = 0; /* matched! */
599 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
601 ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
606 static struct ksock_conn *
607 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
609 struct ksock_peer_ni *peer_ni;
610 struct ksock_conn *conn;
611 struct list_head *ctmp;
614 read_lock(&ksocknal_data.ksnd_global_lock);
616 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
617 LASSERT(!peer_ni->ksnp_closing);
619 if (peer_ni->ksnp_ni != ni)
622 list_for_each(ctmp, &peer_ni->ksnp_conns) {
626 conn = list_entry(ctmp, struct ksock_conn,
628 ksocknal_conn_addref(conn);
629 read_unlock(&ksocknal_data.ksnd_global_lock);
634 read_unlock(&ksocknal_data.ksnd_global_lock);
638 static struct ksock_sched *
639 ksocknal_choose_scheduler_locked(unsigned int cpt)
641 struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
644 if (sched->kss_nthreads == 0) {
645 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
646 if (sched->kss_nthreads > 0) {
647 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
648 cpt, sched->kss_cpt);
659 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
661 struct ksock_net *net = ni->ni_data;
665 read_lock(&ksocknal_data.ksnd_global_lock);
667 nip = net->ksnn_ninterfaces;
668 LASSERT(nip <= LNET_INTERFACES_NUM);
671 * Only offer interfaces for additional connections if I have
675 read_unlock(&ksocknal_data.ksnd_global_lock);
679 for (i = 0; i < nip; i++) {
680 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
681 LASSERT(ipaddrs[i] != 0);
684 read_unlock(&ksocknal_data.ksnd_global_lock);
689 ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
691 int best_netmatch = 0;
698 for (i = 0; i < nips; i++) {
702 this_xor = (ips[i] ^ iface->ksni_ipaddr);
703 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
706 best_netmatch < this_netmatch ||
707 (best_netmatch == this_netmatch &&
708 best_xor > this_xor)))
712 best_netmatch = this_netmatch;
721 ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
723 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
724 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
725 struct ksock_interface *iface;
726 struct ksock_interface *best_iface;
737 /* CAVEAT EMPTOR: We do all our interface matching with an
738 * exclusive hold of global lock at IRQ priority. We're only
739 * expecting to be dealing with small numbers of interfaces, so the
740 * O(n**3)-ness shouldn't matter */
742 /* Also note that I'm not going to return more than n_peerips
743 * interfaces, even if I have more myself */
745 write_lock_bh(global_lock);
747 LASSERT(n_peerips <= LNET_INTERFACES_NUM);
748 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
750 /* Only match interfaces for additional connections
751 * if I have > 1 interface */
752 n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
753 MIN(n_peerips, net->ksnn_ninterfaces);
755 for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
756 /* ^ yes really... */
758 /* If we have any new interfaces, first tick off all the
759 * peer_ni IPs that match old interfaces, then choose new
760 * interfaces to match the remaining peer_ni IPS.
761 * We don't forget interfaces we've stopped using; we might
762 * start using them again... */
764 if (i < peer_ni->ksnp_n_passive_ips) {
766 ip = peer_ni->ksnp_passive_ips[i];
767 best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
769 /* peer_ni passive ips are kept up to date */
770 LASSERT(best_iface != NULL);
772 /* choose a new interface */
773 LASSERT (i == peer_ni->ksnp_n_passive_ips);
779 for (j = 0; j < net->ksnn_ninterfaces; j++) {
780 iface = &net->ksnn_interfaces[j];
781 ip = iface->ksni_ipaddr;
783 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
784 if (peer_ni->ksnp_passive_ips[k] == ip)
787 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
790 k = ksocknal_match_peerip(iface, peerips, n_peerips);
791 xor = (ip ^ peerips[k]);
792 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
794 if (!(best_iface == NULL ||
795 best_netmatch < this_netmatch ||
796 (best_netmatch == this_netmatch &&
797 best_npeers > iface->ksni_npeers)))
801 best_netmatch = this_netmatch;
802 best_npeers = iface->ksni_npeers;
805 LASSERT(best_iface != NULL);
807 best_iface->ksni_npeers++;
808 ip = best_iface->ksni_ipaddr;
809 peer_ni->ksnp_passive_ips[i] = ip;
810 peer_ni->ksnp_n_passive_ips = i+1;
813 /* mark the best matching peer_ni IP used */
814 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
818 /* Overwrite input peer_ni IP addresses */
819 memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
821 write_unlock_bh(global_lock);
827 ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
828 __u32 *peer_ipaddrs, int npeer_ipaddrs)
830 struct ksock_route *newroute = NULL;
831 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
832 struct lnet_ni *ni = peer_ni->ksnp_ni;
833 struct ksock_net *net = ni->ni_data;
834 struct list_head *rtmp;
835 struct ksock_route *route;
836 struct ksock_interface *iface;
837 struct ksock_interface *best_iface;
844 /* CAVEAT EMPTOR: We do all our interface matching with an
845 * exclusive hold of global lock at IRQ priority. We're only
846 * expecting to be dealing with small numbers of interfaces, so the
847 * O(n**3)-ness here shouldn't matter */
849 write_lock_bh(global_lock);
851 if (net->ksnn_ninterfaces < 2) {
852 /* Only create additional connections
853 * if I have > 1 interface */
854 write_unlock_bh(global_lock);
858 LASSERT(npeer_ipaddrs <= LNET_INTERFACES_NUM);
860 for (i = 0; i < npeer_ipaddrs; i++) {
861 if (newroute != NULL) {
862 newroute->ksnr_ipaddr = peer_ipaddrs[i];
864 write_unlock_bh(global_lock);
866 newroute = ksocknal_create_route(peer_ipaddrs[i], port);
867 if (newroute == NULL)
870 write_lock_bh(global_lock);
873 if (peer_ni->ksnp_closing) {
874 /* peer_ni got closed under me */
878 /* Already got a route? */
880 list_for_each(rtmp, &peer_ni->ksnp_routes) {
881 route = list_entry(rtmp, struct ksock_route, ksnr_list);
883 if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
895 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
897 /* Select interface to connect from */
898 for (j = 0; j < net->ksnn_ninterfaces; j++) {
899 iface = &net->ksnn_interfaces[j];
901 /* Using this interface already? */
902 list_for_each(rtmp, &peer_ni->ksnp_routes) {
903 route = list_entry(rtmp, struct ksock_route,
906 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
914 this_netmatch = (((iface->ksni_ipaddr ^
915 newroute->ksnr_ipaddr) &
916 iface->ksni_netmask) == 0) ? 1 : 0;
918 if (!(best_iface == NULL ||
919 best_netmatch < this_netmatch ||
920 (best_netmatch == this_netmatch &&
921 best_nroutes > iface->ksni_nroutes)))
925 best_netmatch = this_netmatch;
926 best_nroutes = iface->ksni_nroutes;
929 if (best_iface == NULL)
932 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
933 best_iface->ksni_nroutes++;
935 ksocknal_add_route_locked(peer_ni, newroute);
939 write_unlock_bh(global_lock);
940 if (newroute != NULL)
941 ksocknal_route_decref(newroute);
945 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
947 struct ksock_connreq *cr;
952 rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
953 LASSERT(rc == 0); /* we succeeded before */
955 LIBCFS_ALLOC(cr, sizeof(*cr));
957 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
958 "%pI4h: memory exhausted\n", &peer_ip);
964 cr->ksncr_sock = sock;
966 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
968 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
969 wake_up(&ksocknal_data.ksnd_connd_waitq);
971 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
976 ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
978 struct ksock_route *route;
980 list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
981 if (route->ksnr_ipaddr == ipaddr)
982 return route->ksnr_connecting;
988 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
989 struct socket *sock, int type)
991 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
993 struct lnet_process_id peerid;
994 struct list_head *tmp;
996 struct ksock_conn *conn;
997 struct ksock_conn *conn2;
998 struct ksock_peer_ni *peer_ni = NULL;
999 struct ksock_peer_ni *peer2;
1000 struct ksock_sched *sched;
1001 struct ksock_hello_msg *hello;
1003 struct ksock_tx *tx;
1004 struct ksock_tx *txtmp;
1010 active = (route != NULL);
1012 LASSERT (active == (type != SOCKLND_CONN_NONE));
1014 LIBCFS_ALLOC(conn, sizeof(*conn));
1020 conn->ksnc_peer = NULL;
1021 conn->ksnc_route = NULL;
1022 conn->ksnc_sock = sock;
1023 /* 2 ref, 1 for conn, another extra ref prevents socket
1024 * being closed before establishment of connection */
1025 atomic_set (&conn->ksnc_sock_refcount, 2);
1026 conn->ksnc_type = type;
1027 ksocknal_lib_save_callback(sock, conn);
1028 atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1030 conn->ksnc_rx_ready = 0;
1031 conn->ksnc_rx_scheduled = 0;
1033 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1034 conn->ksnc_tx_ready = 0;
1035 conn->ksnc_tx_scheduled = 0;
1036 conn->ksnc_tx_carrier = NULL;
1037 atomic_set (&conn->ksnc_tx_nob, 0);
1039 LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1040 kshm_ips[LNET_INTERFACES_NUM]));
1041 if (hello == NULL) {
1046 /* stash conn's local and remote addrs */
1047 rc = ksocknal_lib_get_conn_addrs (conn);
1051 /* Find out/confirm peer_ni's NID and connection type and get the
1052 * vector of interfaces she's willing to let me connect to.
1053 * Passive connections use the listener timeout since the peer_ni sends
1057 peer_ni = route->ksnr_peer;
1058 LASSERT(ni == peer_ni->ksnp_ni);
1060 /* Active connection sends HELLO eagerly */
1061 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1062 peerid = peer_ni->ksnp_id;
1064 write_lock_bh(global_lock);
1065 conn->ksnc_proto = peer_ni->ksnp_proto;
1066 write_unlock_bh(global_lock);
1068 if (conn->ksnc_proto == NULL) {
1069 conn->ksnc_proto = &ksocknal_protocol_v3x;
1070 #if SOCKNAL_VERSION_DEBUG
1071 if (*ksocknal_tunables.ksnd_protocol == 2)
1072 conn->ksnc_proto = &ksocknal_protocol_v2x;
1073 else if (*ksocknal_tunables.ksnd_protocol == 1)
1074 conn->ksnc_proto = &ksocknal_protocol_v1x;
1078 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1082 peerid.nid = LNET_NID_ANY;
1083 peerid.pid = LNET_PID_ANY;
1085 /* Passive, get protocol from peer_ni */
1086 conn->ksnc_proto = NULL;
1089 rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1093 LASSERT (rc == 0 || active);
1094 LASSERT (conn->ksnc_proto != NULL);
1095 LASSERT (peerid.nid != LNET_NID_ANY);
1097 cpt = lnet_cpt_of_nid(peerid.nid, ni);
1100 ksocknal_peer_addref(peer_ni);
1101 write_lock_bh(global_lock);
1103 peer_ni = ksocknal_create_peer(ni, peerid);
1104 if (IS_ERR(peer_ni)) {
1105 rc = PTR_ERR(peer_ni);
1109 write_lock_bh(global_lock);
1111 /* called with a ref on ni, so shutdown can't have started */
1112 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers) >= 0);
1114 peer2 = ksocknal_find_peer_locked(ni, peerid);
1115 if (peer2 == NULL) {
1116 /* NB this puts an "empty" peer_ni in the peer_ni
1117 * table (which takes my ref) */
1118 hash_add(ksocknal_data.ksnd_peers,
1119 &peer_ni->ksnp_list, peerid.nid);
1121 ksocknal_peer_decref(peer_ni);
1126 ksocknal_peer_addref(peer_ni);
1127 peer_ni->ksnp_accepting++;
1129 /* Am I already connecting to this guy? Resolve in
1130 * favour of higher NID... */
1131 if (peerid.nid < ni->ni_nid &&
1132 ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1134 warn = "connection race resolution";
1139 if (peer_ni->ksnp_closing ||
1140 (active && route->ksnr_deleted)) {
1141 /* peer_ni/route got closed under me */
1143 warn = "peer_ni/route removed";
1147 if (peer_ni->ksnp_proto == NULL) {
1148 /* Never connected before.
1149 * NB recv_hello may have returned EPROTO to signal my peer_ni
1150 * wants a different protocol than the one I asked for.
1152 LASSERT(list_empty(&peer_ni->ksnp_conns));
1154 peer_ni->ksnp_proto = conn->ksnc_proto;
1155 peer_ni->ksnp_incarnation = incarnation;
1158 if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1159 peer_ni->ksnp_incarnation != incarnation) {
1160 /* peer_ni rebooted or I've got the wrong protocol version */
1161 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1163 peer_ni->ksnp_proto = NULL;
1165 warn = peer_ni->ksnp_incarnation != incarnation ?
1166 "peer_ni rebooted" :
1167 "wrong proto version";
1177 warn = "lost conn race";
1180 warn = "retry with different protocol version";
1184 /* Refuse to duplicate an existing connection, unless this is a
1185 * loopback connection */
1186 if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1187 list_for_each(tmp, &peer_ni->ksnp_conns) {
1188 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1190 if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1191 conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1192 conn2->ksnc_type != conn->ksnc_type)
1195 /* Reply on a passive connection attempt so the peer_ni
1196 * realises we're connected. */
1206 /* If the connection created by this route didn't bind to the IP
1207 * address the route connected to, the connection/route matching
1208 * code below probably isn't going to work. */
1210 route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1211 CERROR("Route %s %pI4h connected to %pI4h\n",
1212 libcfs_id2str(peer_ni->ksnp_id),
1213 &route->ksnr_ipaddr,
1214 &conn->ksnc_ipaddr);
1217 /* Search for a route corresponding to the new connection and
1218 * create an association. This allows incoming connections created
1219 * by routes in my peer_ni to match my own route entries so I don't
1220 * continually create duplicate routes. */
1221 list_for_each(tmp, &peer_ni->ksnp_routes) {
1222 route = list_entry(tmp, struct ksock_route, ksnr_list);
1224 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1227 ksocknal_associate_route_conn_locked(route, conn);
1231 conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
1232 peer_ni->ksnp_last_alive = ktime_get_seconds();
1233 peer_ni->ksnp_send_keepalive = 0;
1234 peer_ni->ksnp_error = 0;
1236 sched = ksocknal_choose_scheduler_locked(cpt);
1238 CERROR("no schedulers available. node is unhealthy\n");
1242 * The cpt might have changed if we ended up selecting a non cpt
1243 * native scheduler. So use the scheduler's cpt instead.
1245 cpt = sched->kss_cpt;
1246 sched->kss_nconns++;
1247 conn->ksnc_scheduler = sched;
1249 conn->ksnc_tx_last_post = ktime_get_seconds();
1250 /* Set the deadline for the outgoing HELLO to drain */
1251 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1252 conn->ksnc_tx_deadline = ktime_get_seconds() +
1253 lnet_get_lnd_timeout();
1254 smp_mb(); /* order with adding to peer_ni's conn list */
1256 list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1257 ksocknal_conn_addref(conn);
1259 ksocknal_new_packet(conn, 0);
1261 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1263 /* Take packets blocking for this connection. */
1264 list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1265 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1269 list_del(&tx->tx_list);
1270 ksocknal_queue_tx_locked(tx, conn);
1273 write_unlock_bh(global_lock);
1275 /* We've now got a new connection. Any errors from here on are just
1276 * like "normal" comms errors and we close the connection normally.
1277 * NB (a) we still have to send the reply HELLO for passive
1279 * (b) normal I/O on the conn is blocked until I setup and call the
1283 CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1284 " incarnation:%lld sched[%d]\n",
1285 libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1286 &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1287 conn->ksnc_port, incarnation, cpt);
1290 /* additional routes after interface exchange? */
1291 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1292 hello->kshm_ips, hello->kshm_nips);
1294 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1296 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1299 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1300 kshm_ips[LNET_INTERFACES_NUM]));
1302 /* setup the socket AFTER I've received hello (it disables
1303 * SO_LINGER). I might call back to the acceptor who may want
1304 * to send a protocol version response and then close the
1305 * socket; this ensures the socket only tears down after the
1306 * response has been sent. */
1308 rc = ksocknal_lib_setup_sock(sock);
1310 write_lock_bh(global_lock);
1312 /* NB my callbacks block while I hold ksnd_global_lock */
1313 ksocknal_lib_set_callback(sock, conn);
1316 peer_ni->ksnp_accepting--;
1318 write_unlock_bh(global_lock);
1321 write_lock_bh(global_lock);
1322 if (!conn->ksnc_closing) {
1323 /* could be closed by another thread */
1324 ksocknal_close_conn_locked(conn, rc);
1326 write_unlock_bh(global_lock);
1327 } else if (ksocknal_connsock_addref(conn) == 0) {
1328 /* Allow I/O to proceed. */
1329 ksocknal_read_callback(conn);
1330 ksocknal_write_callback(conn);
1331 ksocknal_connsock_decref(conn);
1334 ksocknal_connsock_decref(conn);
1335 ksocknal_conn_decref(conn);
1339 if (!peer_ni->ksnp_closing &&
1340 list_empty(&peer_ni->ksnp_conns) &&
1341 list_empty(&peer_ni->ksnp_routes)) {
1342 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1343 list_del_init(&peer_ni->ksnp_tx_queue);
1344 ksocknal_unlink_peer_locked(peer_ni);
1347 write_unlock_bh(global_lock);
1351 CERROR("Not creating conn %s type %d: %s\n",
1352 libcfs_id2str(peerid), conn->ksnc_type, warn);
1354 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1355 libcfs_id2str(peerid), conn->ksnc_type, warn);
1360 /* Request retry by replying with CONN_NONE
1361 * ksnc_proto has been set already */
1362 conn->ksnc_type = SOCKLND_CONN_NONE;
1363 hello->kshm_nips = 0;
1364 ksocknal_send_hello(ni, conn, peerid.nid, hello);
1367 write_lock_bh(global_lock);
1368 peer_ni->ksnp_accepting--;
1369 write_unlock_bh(global_lock);
1373 * If we get here without an error code, just use -EALREADY.
1374 * Depending on how we got here, the error may be positive
1375 * or negative. Normalize the value for ksocknal_txlist_done().
1377 rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1378 ksocknal_txlist_done(ni, &zombies, rc2);
1379 ksocknal_peer_decref(peer_ni);
1383 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1384 kshm_ips[LNET_INTERFACES_NUM]));
1386 LIBCFS_FREE(conn, sizeof(*conn));
1394 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1396 /* This just does the immmediate housekeeping, and queues the
1397 * connection for the reaper to terminate.
1398 * Caller holds ksnd_global_lock exclusively in irq context */
1399 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1400 struct ksock_route *route;
1401 struct ksock_conn *conn2;
1402 struct list_head *tmp;
1404 LASSERT(peer_ni->ksnp_error == 0);
1405 LASSERT(!conn->ksnc_closing);
1406 conn->ksnc_closing = 1;
1408 /* ksnd_deathrow_conns takes over peer_ni's ref */
1409 list_del(&conn->ksnc_list);
1411 route = conn->ksnc_route;
1412 if (route != NULL) {
1413 /* dissociate conn from route... */
1414 LASSERT(!route->ksnr_deleted);
1415 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1418 list_for_each(tmp, &peer_ni->ksnp_conns) {
1419 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1421 if (conn2->ksnc_route == route &&
1422 conn2->ksnc_type == conn->ksnc_type)
1428 route->ksnr_connected &= ~(1 << conn->ksnc_type);
1430 conn->ksnc_route = NULL;
1432 ksocknal_route_decref(route); /* drop conn's ref on route */
1435 if (list_empty(&peer_ni->ksnp_conns)) {
1436 /* No more connections to this peer_ni */
1438 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1439 struct ksock_tx *tx;
1441 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1443 /* throw them to the last connection...,
1444 * these TXs will be send to /dev/null by scheduler */
1445 list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1447 ksocknal_tx_prep(conn, tx);
1449 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1450 list_splice_init(&peer_ni->ksnp_tx_queue,
1451 &conn->ksnc_tx_queue);
1452 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1455 /* renegotiate protocol version */
1456 peer_ni->ksnp_proto = NULL;
1457 /* stash last conn close reason */
1458 peer_ni->ksnp_error = error;
1460 if (list_empty(&peer_ni->ksnp_routes)) {
1461 /* I've just closed last conn belonging to a
1462 * peer_ni with no routes to it */
1463 ksocknal_unlink_peer_locked(peer_ni);
1467 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1469 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
1470 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1472 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1476 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1479 time64_t last_alive = 0;
1481 /* There has been a connection failure or comms error; but I'll only
1482 * tell LNET I think the peer_ni is dead if it's to another kernel and
1483 * there are no connections or connection attempts in existence. */
1485 read_lock(&ksocknal_data.ksnd_global_lock);
1487 if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1488 list_empty(&peer_ni->ksnp_conns) &&
1489 peer_ni->ksnp_accepting == 0 &&
1490 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1492 last_alive = peer_ni->ksnp_last_alive;
1495 read_unlock(&ksocknal_data.ksnd_global_lock);
1498 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid,
1499 false, false, last_alive);
1503 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1505 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1506 struct ksock_tx *tx;
1507 struct ksock_tx *tmp;
1510 /* NB safe to finalize TXs because closing of socket will
1511 * abort all buffered data */
1512 LASSERT(conn->ksnc_sock == NULL);
1514 spin_lock(&peer_ni->ksnp_lock);
1516 list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1517 if (tx->tx_conn != conn)
1520 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1522 tx->tx_msg.ksm_zc_cookies[0] = 0;
1523 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1524 list_move(&tx->tx_zc_list, &zlist);
1527 spin_unlock(&peer_ni->ksnp_lock);
1529 while (!list_empty(&zlist)) {
1530 tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
1532 list_del(&tx->tx_zc_list);
1533 ksocknal_tx_decref(tx);
1538 ksocknal_terminate_conn(struct ksock_conn *conn)
1540 /* This gets called by the reaper (guaranteed thread context) to
1541 * disengage the socket from its callbacks and close it.
1542 * ksnc_refcount will eventually hit zero, and then the reaper will
1544 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1545 struct ksock_sched *sched = conn->ksnc_scheduler;
1548 LASSERT(conn->ksnc_closing);
1550 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1551 spin_lock_bh(&sched->kss_lock);
1553 /* a closing conn is always ready to tx */
1554 conn->ksnc_tx_ready = 1;
1556 if (!conn->ksnc_tx_scheduled &&
1557 !list_empty(&conn->ksnc_tx_queue)) {
1558 list_add_tail(&conn->ksnc_tx_list,
1559 &sched->kss_tx_conns);
1560 conn->ksnc_tx_scheduled = 1;
1561 /* extra ref for scheduler */
1562 ksocknal_conn_addref(conn);
1564 wake_up (&sched->kss_waitq);
1567 spin_unlock_bh(&sched->kss_lock);
1569 /* serialise with callbacks */
1570 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1572 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1574 /* OK, so this conn may not be completely disengaged from its
1575 * scheduler yet, but it _has_ committed to terminate... */
1576 conn->ksnc_scheduler->kss_nconns--;
1578 if (peer_ni->ksnp_error != 0) {
1579 /* peer_ni's last conn closed in error */
1580 LASSERT(list_empty(&peer_ni->ksnp_conns));
1582 peer_ni->ksnp_error = 0; /* avoid multiple notifications */
1585 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1588 ksocknal_peer_failed(peer_ni);
1590 /* The socket is closed on the final put; either here, or in
1591 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1592 * when the connection was established, this will close the socket
1593 * immediately, aborting anything buffered in it. Any hung
1594 * zero-copy transmits will therefore complete in finite time. */
1595 ksocknal_connsock_decref(conn);
1599 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1601 /* Queue the conn for the reaper to destroy */
1602 LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1603 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1605 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1606 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1608 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1612 ksocknal_destroy_conn(struct ksock_conn *conn)
1616 /* Final coup-de-grace of the reaper */
1617 CDEBUG (D_NET, "connection %p\n", conn);
1619 LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1620 LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1621 LASSERT (conn->ksnc_sock == NULL);
1622 LASSERT (conn->ksnc_route == NULL);
1623 LASSERT (!conn->ksnc_tx_scheduled);
1624 LASSERT (!conn->ksnc_rx_scheduled);
1625 LASSERT(list_empty(&conn->ksnc_tx_queue));
1627 /* complete current receive if any */
1628 switch (conn->ksnc_rx_state) {
1629 case SOCKNAL_RX_LNET_PAYLOAD:
1630 last_rcv = conn->ksnc_rx_deadline -
1631 lnet_get_lnd_timeout();
1632 CERROR("Completing partial receive from %s[%d], "
1633 "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1634 "last alive is %lld secs ago\n",
1635 libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1636 &conn->ksnc_ipaddr, conn->ksnc_port,
1637 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1638 ktime_get_seconds() - last_rcv);
1639 if (conn->ksnc_lnet_msg)
1640 conn->ksnc_lnet_msg->msg_health_status =
1641 LNET_MSG_STATUS_REMOTE_ERROR;
1642 lnet_finalize(conn->ksnc_lnet_msg, -EIO);
1644 case SOCKNAL_RX_LNET_HEADER:
1645 if (conn->ksnc_rx_started)
1646 CERROR("Incomplete receive of lnet header from %s, "
1647 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1648 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1649 &conn->ksnc_ipaddr, conn->ksnc_port,
1650 conn->ksnc_proto->pro_version);
1652 case SOCKNAL_RX_KSM_HEADER:
1653 if (conn->ksnc_rx_started)
1654 CERROR("Incomplete receive of ksock message from %s, "
1655 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1656 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1657 &conn->ksnc_ipaddr, conn->ksnc_port,
1658 conn->ksnc_proto->pro_version);
1660 case SOCKNAL_RX_SLOP:
1661 if (conn->ksnc_rx_started)
1662 CERROR("Incomplete receive of slops from %s, "
1663 "ip %pI4h:%d, with error\n",
1664 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1665 &conn->ksnc_ipaddr, conn->ksnc_port);
1672 ksocknal_peer_decref(conn->ksnc_peer);
1674 LIBCFS_FREE (conn, sizeof (*conn));
1678 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
1680 struct ksock_conn *conn;
1681 struct list_head *ctmp;
1682 struct list_head *cnxt;
1685 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1686 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
1689 conn->ksnc_ipaddr == ipaddr) {
1691 ksocknal_close_conn_locked (conn, why);
1699 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1701 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1702 u32 ipaddr = conn->ksnc_ipaddr;
1705 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1707 count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1709 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1715 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1717 struct ksock_peer_ni *peer_ni;
1718 struct hlist_node *pnxt;
1724 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1726 if (id.nid != LNET_NID_ANY) {
1727 lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
1731 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1734 for (i = lo; i <= hi; i++) {
1735 hlist_for_each_entry_safe(peer_ni, pnxt,
1736 &ksocknal_data.ksnd_peers[i],
1739 if (!((id.nid == LNET_NID_ANY ||
1740 id.nid == peer_ni->ksnp_id.nid) &&
1741 (id.pid == LNET_PID_ANY ||
1742 id.pid == peer_ni->ksnp_id.pid)))
1745 count += ksocknal_close_peer_conns_locked(peer_ni,
1750 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1752 /* wildcards always succeed */
1753 if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1756 return (count == 0 ? -ENOENT : 0);
1760 ksocknal_notify_gw_down(lnet_nid_t gw_nid)
1762 /* The router is telling me she's been notified of a change in
1765 struct lnet_process_id id = {
1767 .pid = LNET_PID_ANY,
1770 CDEBUG(D_NET, "gw %s down\n", libcfs_nid2str(gw_nid));
1772 /* If the gateway crashed, close all open connections... */
1773 ksocknal_close_matching_conns(id, 0);
1776 /* We can only establish new connections
1777 * if we have autroutes, and these connect on demand. */
1781 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
1784 time64_t last_alive = 0;
1785 time64_t now = ktime_get_seconds();
1786 struct ksock_peer_ni *peer_ni = NULL;
1787 rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1788 struct lnet_process_id id = {
1790 .pid = LNET_PID_LUSTRE,
1795 peer_ni = ksocknal_find_peer_locked(ni, id);
1796 if (peer_ni != NULL) {
1797 struct list_head *tmp;
1798 struct ksock_conn *conn;
1801 list_for_each(tmp, &peer_ni->ksnp_conns) {
1802 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
1803 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1805 if (bufnob < conn->ksnc_tx_bufnob) {
1806 /* something got ACKed */
1807 conn->ksnc_tx_deadline = ktime_get_seconds() +
1808 lnet_get_lnd_timeout();
1809 peer_ni->ksnp_last_alive = now;
1810 conn->ksnc_tx_bufnob = bufnob;
1814 last_alive = peer_ni->ksnp_last_alive;
1815 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1821 if (last_alive != 0)
1824 CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
1825 libcfs_nid2str(nid), peer_ni,
1826 last_alive ? now - last_alive : -1,
1832 ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1834 write_lock_bh(glock);
1836 peer_ni = ksocknal_find_peer_locked(ni, id);
1837 if (peer_ni != NULL)
1838 ksocknal_launch_all_connections_locked(peer_ni);
1840 write_unlock_bh(glock);
1844 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1848 struct list_head *tmp;
1849 struct ksock_conn *conn;
1851 for (index = 0; ; index++) {
1852 read_lock(&ksocknal_data.ksnd_global_lock);
1857 list_for_each(tmp, &peer_ni->ksnp_conns) {
1859 conn = list_entry(tmp, struct ksock_conn,
1861 ksocknal_conn_addref(conn);
1866 read_unlock(&ksocknal_data.ksnd_global_lock);
1871 ksocknal_lib_push_conn (conn);
1872 ksocknal_conn_decref(conn);
1877 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1884 if (id.nid != LNET_NID_ANY) {
1885 lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
1889 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1892 for (bkt = lo; bkt <= hi; bkt++) {
1893 int peer_off; /* searching offset in peer_ni hash table */
1895 for (peer_off = 0; ; peer_off++) {
1896 struct ksock_peer_ni *peer_ni;
1899 read_lock(&ksocknal_data.ksnd_global_lock);
1900 hlist_for_each_entry(peer_ni,
1901 &ksocknal_data.ksnd_peers[bkt],
1903 if (!((id.nid == LNET_NID_ANY ||
1904 id.nid == peer_ni->ksnp_id.nid) &&
1905 (id.pid == LNET_PID_ANY ||
1906 id.pid == peer_ni->ksnp_id.pid)))
1909 if (i++ == peer_off) {
1910 ksocknal_peer_addref(peer_ni);
1914 read_unlock(&ksocknal_data.ksnd_global_lock);
1916 if (i <= peer_off) /* no match */
1920 ksocknal_push_peer(peer_ni);
1921 ksocknal_peer_decref(peer_ni);
1928 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1930 struct ksock_net *net = ni->ni_data;
1931 struct ksock_interface *iface;
1935 struct ksock_peer_ni *peer_ni;
1936 struct list_head *rtmp;
1937 struct ksock_route *route;
1939 if (ipaddress == 0 ||
1943 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1945 iface = ksocknal_ip2iface(ni, ipaddress);
1946 if (iface != NULL) {
1947 /* silently ignore dups */
1949 } else if (net->ksnn_ninterfaces == LNET_INTERFACES_NUM) {
1952 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1954 iface->ksni_ipaddr = ipaddress;
1955 iface->ksni_netmask = netmask;
1956 iface->ksni_nroutes = 0;
1957 iface->ksni_npeers = 0;
1959 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
1960 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1961 if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1962 iface->ksni_npeers++;
1964 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1965 route = list_entry(rtmp,
1969 if (route->ksnr_myipaddr == ipaddress)
1970 iface->ksni_nroutes++;
1975 /* NB only new connections will pay attention to the new
1980 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1986 ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
1988 struct list_head *tmp;
1989 struct list_head *nxt;
1990 struct ksock_route *route;
1991 struct ksock_conn *conn;
1995 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
1996 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
1997 for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
1998 peer_ni->ksnp_passive_ips[j-1] =
1999 peer_ni->ksnp_passive_ips[j];
2000 peer_ni->ksnp_n_passive_ips--;
2004 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2005 route = list_entry(tmp, struct ksock_route, ksnr_list);
2007 if (route->ksnr_myipaddr != ipaddr)
2010 if (route->ksnr_share_count != 0) {
2011 /* Manually created; keep, but unbind */
2012 route->ksnr_myipaddr = 0;
2014 ksocknal_del_route_locked(route);
2018 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2019 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2021 if (conn->ksnc_myipaddr == ipaddr)
2022 ksocknal_close_conn_locked (conn, 0);
2027 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2029 struct ksock_net *net = ni->ni_data;
2031 struct hlist_node *nxt;
2032 struct ksock_peer_ni *peer_ni;
2037 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2039 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2040 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2042 if (!(ipaddress == 0 ||
2043 ipaddress == this_ip))
2048 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2049 net->ksnn_interfaces[j-1] =
2050 net->ksnn_interfaces[j];
2052 net->ksnn_ninterfaces--;
2054 hash_for_each_safe(ksocknal_data.ksnd_peers, j,
2055 nxt, peer_ni, ksnp_list) {
2056 if (peer_ni->ksnp_ni != ni)
2059 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2063 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2069 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2071 struct lnet_process_id id = {0};
2072 struct libcfs_ioctl_data *data = arg;
2076 case IOC_LIBCFS_GET_INTERFACE: {
2077 struct ksock_net *net = ni->ni_data;
2078 struct ksock_interface *iface;
2080 read_lock(&ksocknal_data.ksnd_global_lock);
2082 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2086 iface = &net->ksnn_interfaces[data->ioc_count];
2088 data->ioc_u32[0] = iface->ksni_ipaddr;
2089 data->ioc_u32[1] = iface->ksni_netmask;
2090 data->ioc_u32[2] = iface->ksni_npeers;
2091 data->ioc_u32[3] = iface->ksni_nroutes;
2094 read_unlock(&ksocknal_data.ksnd_global_lock);
2098 case IOC_LIBCFS_ADD_INTERFACE:
2099 return ksocknal_add_interface(ni,
2100 data->ioc_u32[0], /* IP address */
2101 data->ioc_u32[1]); /* net mask */
2103 case IOC_LIBCFS_DEL_INTERFACE:
2104 return ksocknal_del_interface(ni,
2105 data->ioc_u32[0]); /* IP address */
2107 case IOC_LIBCFS_GET_PEER: {
2112 int share_count = 0;
2114 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2115 &id, &myip, &ip, &port,
2116 &conn_count, &share_count);
2120 data->ioc_nid = id.nid;
2121 data->ioc_count = share_count;
2122 data->ioc_u32[0] = ip;
2123 data->ioc_u32[1] = port;
2124 data->ioc_u32[2] = myip;
2125 data->ioc_u32[3] = conn_count;
2126 data->ioc_u32[4] = id.pid;
2130 case IOC_LIBCFS_ADD_PEER:
2131 id.nid = data->ioc_nid;
2132 id.pid = LNET_PID_LUSTRE;
2133 return ksocknal_add_peer (ni, id,
2134 data->ioc_u32[0], /* IP */
2135 data->ioc_u32[1]); /* port */
2137 case IOC_LIBCFS_DEL_PEER:
2138 id.nid = data->ioc_nid;
2139 id.pid = LNET_PID_ANY;
2140 return ksocknal_del_peer (ni, id,
2141 data->ioc_u32[0]); /* IP */
2143 case IOC_LIBCFS_GET_CONN: {
2147 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2152 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2154 data->ioc_count = txmem;
2155 data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
2156 data->ioc_flags = nagle;
2157 data->ioc_u32[0] = conn->ksnc_ipaddr;
2158 data->ioc_u32[1] = conn->ksnc_port;
2159 data->ioc_u32[2] = conn->ksnc_myipaddr;
2160 data->ioc_u32[3] = conn->ksnc_type;
2161 data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
2162 data->ioc_u32[5] = rxmem;
2163 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2164 ksocknal_conn_decref(conn);
2168 case IOC_LIBCFS_CLOSE_CONNECTION:
2169 id.nid = data->ioc_nid;
2170 id.pid = LNET_PID_ANY;
2171 return ksocknal_close_matching_conns (id,
2174 case IOC_LIBCFS_REGISTER_MYNID:
2175 /* Ignore if this is a noop */
2176 if (data->ioc_nid == ni->ni_nid)
2179 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2180 libcfs_nid2str(data->ioc_nid),
2181 libcfs_nid2str(ni->ni_nid));
2184 case IOC_LIBCFS_PUSH_CONNECTION:
2185 id.nid = data->ioc_nid;
2186 id.pid = LNET_PID_ANY;
2187 return ksocknal_push(ni, id);
2196 ksocknal_free_buffers (void)
2198 LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2200 if (ksocknal_data.ksnd_schedulers != NULL)
2201 cfs_percpt_free(ksocknal_data.ksnd_schedulers);
2203 spin_lock(&ksocknal_data.ksnd_tx_lock);
2205 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2206 struct list_head zlist;
2207 struct ksock_tx *tx;
2209 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2210 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2211 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2213 while (!list_empty(&zlist)) {
2214 tx = list_entry(zlist.next, struct ksock_tx, tx_list);
2215 list_del(&tx->tx_list);
2216 LIBCFS_FREE(tx, tx->tx_desc_size);
2219 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2224 ksocknal_base_shutdown(void)
2226 struct ksock_sched *sched;
2227 struct ksock_peer_ni *peer_ni;
2230 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2231 atomic_read (&libcfs_kmemory));
2232 LASSERT (ksocknal_data.ksnd_nnets == 0);
2234 switch (ksocknal_data.ksnd_init) {
2239 case SOCKNAL_INIT_ALL:
2240 case SOCKNAL_INIT_DATA:
2241 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list)
2244 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2245 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2246 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2247 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2248 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2250 if (ksocknal_data.ksnd_schedulers != NULL) {
2251 cfs_percpt_for_each(sched, i,
2252 ksocknal_data.ksnd_schedulers) {
2254 LASSERT(list_empty(&sched->kss_tx_conns));
2255 LASSERT(list_empty(&sched->kss_rx_conns));
2256 LASSERT(list_empty(&sched->kss_zombie_noop_txs));
2257 LASSERT(sched->kss_nconns == 0);
2261 /* flag threads to terminate; wake and wait for them to die */
2262 ksocknal_data.ksnd_shuttingdown = 1;
2263 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2264 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2266 if (ksocknal_data.ksnd_schedulers != NULL) {
2267 cfs_percpt_for_each(sched, i,
2268 ksocknal_data.ksnd_schedulers)
2269 wake_up_all(&sched->kss_waitq);
2273 read_lock(&ksocknal_data.ksnd_global_lock);
2274 while (ksocknal_data.ksnd_nthreads != 0) {
2277 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2278 "waiting for %d threads to terminate\n",
2279 ksocknal_data.ksnd_nthreads);
2280 read_unlock(&ksocknal_data.ksnd_global_lock);
2281 set_current_state(TASK_UNINTERRUPTIBLE);
2282 schedule_timeout(cfs_time_seconds(1));
2283 read_lock(&ksocknal_data.ksnd_global_lock);
2285 read_unlock(&ksocknal_data.ksnd_global_lock);
2287 ksocknal_free_buffers();
2289 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2293 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2294 atomic_read (&libcfs_kmemory));
2296 module_put(THIS_MODULE);
2300 ksocknal_base_startup(void)
2302 struct ksock_sched *sched;
2306 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2307 LASSERT(ksocknal_data.ksnd_nnets == 0);
2309 memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
2311 hash_init(ksocknal_data.ksnd_peers);
2313 rwlock_init(&ksocknal_data.ksnd_global_lock);
2314 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2316 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2317 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2318 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2319 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2320 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2322 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2323 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2324 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2325 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2327 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2328 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2330 /* NB memset above zeros whole of ksocknal_data */
2332 /* flag lists/ptrs/locks initialised */
2333 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2334 if (!try_module_get(THIS_MODULE))
2337 /* Create a scheduler block per available CPT */
2338 ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
2340 if (ksocknal_data.ksnd_schedulers == NULL)
2343 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
2347 * make sure not to allocate more threads than there are
2348 * cores/CPUs in teh CPT
2350 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2351 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2352 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2355 * max to half of CPUs, assume another half should be
2356 * reserved for upper layer modules
2358 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2361 sched->kss_nthreads_max = nthrs;
2364 spin_lock_init(&sched->kss_lock);
2365 INIT_LIST_HEAD(&sched->kss_rx_conns);
2366 INIT_LIST_HEAD(&sched->kss_tx_conns);
2367 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2368 init_waitqueue_head(&sched->kss_waitq);
2371 ksocknal_data.ksnd_connd_starting = 0;
2372 ksocknal_data.ksnd_connd_failed_stamp = 0;
2373 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2374 /* must have at least 2 connds to remain responsive to accepts while
2376 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2377 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2379 if (*ksocknal_tunables.ksnd_nconnds_max <
2380 *ksocknal_tunables.ksnd_nconnds) {
2381 ksocknal_tunables.ksnd_nconnds_max =
2382 ksocknal_tunables.ksnd_nconnds;
2385 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2387 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2388 ksocknal_data.ksnd_connd_starting++;
2389 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2392 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2393 rc = ksocknal_thread_start(ksocknal_connd,
2394 (void *)((uintptr_t)i), name);
2396 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2397 ksocknal_data.ksnd_connd_starting--;
2398 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2399 CERROR("Can't spawn socknal connd: %d\n", rc);
2404 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2406 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2410 /* flag everything initialised */
2411 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2416 ksocknal_base_shutdown();
2421 ksocknal_debug_peerhash(struct lnet_ni *ni)
2423 struct ksock_peer_ni *peer_ni;
2426 read_lock(&ksocknal_data.ksnd_global_lock);
2428 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
2429 struct ksock_route *route;
2430 struct ksock_conn *conn;
2432 if (peer_ni->ksnp_ni != ni)
2435 CWARN("Active peer_ni on shutdown: %s, ref %d, "
2436 "closing %d, accepting %d, err %d, zcookie %llu, "
2437 "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2438 atomic_read(&peer_ni->ksnp_refcount),
2439 peer_ni->ksnp_closing,
2440 peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2441 peer_ni->ksnp_zc_next_cookie,
2442 !list_empty(&peer_ni->ksnp_tx_queue),
2443 !list_empty(&peer_ni->ksnp_zc_req_list));
2445 list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
2446 CWARN("Route: ref %d, schd %d, conn %d, cnted %d, "
2447 "del %d\n", atomic_read(&route->ksnr_refcount),
2448 route->ksnr_scheduled, route->ksnr_connecting,
2449 route->ksnr_connected, route->ksnr_deleted);
2452 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
2453 CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
2454 atomic_read(&conn->ksnc_conn_refcount),
2455 atomic_read(&conn->ksnc_sock_refcount),
2456 conn->ksnc_type, conn->ksnc_closing);
2461 read_unlock(&ksocknal_data.ksnd_global_lock);
2465 ksocknal_shutdown(struct lnet_ni *ni)
2467 struct ksock_net *net = ni->ni_data;
2468 struct lnet_process_id anyid = {
2469 .nid = LNET_NID_ANY,
2470 .pid = LNET_PID_ANY,
2474 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2475 LASSERT(ksocknal_data.ksnd_nnets > 0);
2477 /* prevent new peers */
2478 atomic_add(SOCKNAL_SHUTDOWN_BIAS, &net->ksnn_npeers);
2480 /* Delete all peers */
2481 ksocknal_del_peer(ni, anyid, 0);
2483 /* Wait for all peer_ni state to clean up */
2485 while (atomic_read(&net->ksnn_npeers) > SOCKNAL_SHUTDOWN_BIAS) {
2487 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2488 "waiting for %d peers to disconnect\n",
2489 atomic_read(&net->ksnn_npeers) - SOCKNAL_SHUTDOWN_BIAS);
2490 set_current_state(TASK_UNINTERRUPTIBLE);
2491 schedule_timeout(cfs_time_seconds(1));
2493 ksocknal_debug_peerhash(ni);
2496 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2497 LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
2498 LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
2501 list_del(&net->ksnn_list);
2502 LIBCFS_FREE(net, sizeof(*net));
2504 ksocknal_data.ksnd_nnets--;
2505 if (ksocknal_data.ksnd_nnets == 0)
2506 ksocknal_base_shutdown();
2510 ksocknal_search_new_ipif(struct ksock_net *net)
2515 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2516 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2517 char *colon = strchr(ifnam, ':');
2519 struct ksock_net *tmp;
2522 if (colon != NULL) /* ignore alias device */
2525 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2527 for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2528 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2530 char *colon2 = strchr(ifnam2, ':');
2535 found = strcmp(ifnam, ifnam2) == 0;
2552 ksocknal_start_schedulers(struct ksock_sched *sched)
2558 if (sched->kss_nthreads == 0) {
2559 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2560 nthrs = sched->kss_nthreads_max;
2562 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2564 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2565 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2567 nthrs = min(nthrs, sched->kss_nthreads_max);
2569 LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
2570 /* increase two threads if there is new interface */
2571 nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
2574 for (i = 0; i < nthrs; i++) {
2578 id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
2579 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2580 sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
2582 rc = ksocknal_thread_start(ksocknal_scheduler,
2587 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2588 sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
2592 sched->kss_nthreads += i;
2597 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2599 int newif = ksocknal_search_new_ipif(net);
2603 if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2606 for (i = 0; i < ncpts; i++) {
2607 struct ksock_sched *sched;
2608 int cpt = (cpts == NULL) ? i : cpts[i];
2610 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2611 sched = ksocknal_data.ksnd_schedulers[cpt];
2613 if (!newif && sched->kss_nthreads > 0)
2616 rc = ksocknal_start_schedulers(sched);
2624 ksocknal_startup(struct lnet_ni *ni)
2626 struct ksock_net *net;
2627 struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
2628 struct ksock_interface *ksi = NULL;
2629 struct lnet_inetdev *ifaces = NULL;
2633 LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2635 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2636 rc = ksocknal_base_startup();
2641 LIBCFS_ALLOC(net, sizeof(*net));
2645 net->ksnn_incarnation = ktime_get_real_ns();
2647 net_tunables = &ni->ni_net->net_tunables;
2649 if (net_tunables->lct_peer_timeout == -1)
2650 net_tunables->lct_peer_timeout =
2651 *ksocknal_tunables.ksnd_peertimeout;
2653 if (net_tunables->lct_max_tx_credits == -1)
2654 net_tunables->lct_max_tx_credits =
2655 *ksocknal_tunables.ksnd_credits;
2657 if (net_tunables->lct_peer_tx_credits == -1)
2658 net_tunables->lct_peer_tx_credits =
2659 *ksocknal_tunables.ksnd_peertxcredits;
2661 if (net_tunables->lct_peer_tx_credits >
2662 net_tunables->lct_max_tx_credits)
2663 net_tunables->lct_peer_tx_credits =
2664 net_tunables->lct_max_tx_credits;
2666 if (net_tunables->lct_peer_rtr_credits == -1)
2667 net_tunables->lct_peer_rtr_credits =
2668 *ksocknal_tunables.ksnd_peerrtrcredits;
2670 rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns);
2674 if (!ni->ni_interfaces[0]) {
2675 ksi = &net->ksnn_interfaces[0];
2677 /* Use the first discovered interface */
2678 net->ksnn_ninterfaces = 1;
2679 ni->ni_dev_cpt = ifaces[0].li_cpt;
2680 ksi->ksni_ipaddr = ifaces[0].li_ipaddr;
2681 ksi->ksni_netmask = ifaces[0].li_netmask;
2682 strlcpy(ksi->ksni_name, ifaces[0].li_name,
2683 sizeof(ksi->ksni_name));
2685 /* Before Multi-Rail ksocklnd would manage
2686 * multiple interfaces with its own tcp bonding.
2687 * If we encounter an old configuration using
2688 * this tcp bonding approach then we need to
2689 * handle more than one ni_interfaces.
2691 * In Multi-Rail configuration only ONE ni_interface
2692 * should exist. Each IP alias should be mapped to
2693 * each 'struct net_ni'.
2695 for (i = 0; i < LNET_INTERFACES_NUM; i++) {
2698 if (!ni->ni_interfaces[i])
2701 for (j = 0; j < LNET_INTERFACES_NUM; j++) {
2702 if (i != j && ni->ni_interfaces[j] &&
2703 strcmp(ni->ni_interfaces[i],
2704 ni->ni_interfaces[j]) == 0) {
2706 CERROR("ksocklnd: found duplicate %s at %d and %d, rc = %d\n",
2707 ni->ni_interfaces[i], i, j, rc);
2712 for (j = 0; j < rc; j++) {
2713 if (strcmp(ifaces[j].li_name,
2714 ni->ni_interfaces[i]) != 0)
2717 ksi = &net->ksnn_interfaces[j];
2718 ni->ni_dev_cpt = ifaces[j].li_cpt;
2719 ksi->ksni_ipaddr = ifaces[j].li_ipaddr;
2720 ksi->ksni_netmask = ifaces[j].li_netmask;
2721 strlcpy(ksi->ksni_name, ifaces[j].li_name,
2722 sizeof(ksi->ksni_name));
2723 net->ksnn_ninterfaces++;
2727 /* ni_interfaces don't map to all network interfaces */
2728 if (!ksi || net->ksnn_ninterfaces != i) {
2729 CERROR("ksocklnd: requested %d but only %d interfaces found\n",
2730 i, net->ksnn_ninterfaces);
2735 /* call it before add it to ksocknal_data.ksnd_nets */
2736 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2741 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ksi->ksni_ipaddr);
2742 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2744 ksocknal_data.ksnd_nnets++;
2749 LIBCFS_FREE(net, sizeof(*net));
2751 if (ksocknal_data.ksnd_nnets == 0)
2752 ksocknal_base_shutdown();
2758 static void __exit ksocklnd_exit(void)
2760 lnet_unregister_lnd(&the_ksocklnd);
2763 static const struct lnet_lnd the_ksocklnd = {
2764 .lnd_type = SOCKLND,
2765 .lnd_startup = ksocknal_startup,
2766 .lnd_shutdown = ksocknal_shutdown,
2767 .lnd_ctl = ksocknal_ctl,
2768 .lnd_send = ksocknal_send,
2769 .lnd_recv = ksocknal_recv,
2770 .lnd_notify_peer_down = ksocknal_notify_gw_down,
2771 .lnd_query = ksocknal_query,
2772 .lnd_accept = ksocknal_accept,
2775 static int __init ksocklnd_init(void)
2779 /* check ksnr_connected/connecting field large enough */
2780 BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
2781 BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
2783 rc = ksocknal_tunables_init();
2787 lnet_register_lnd(&the_ksocklnd);
2792 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2793 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2794 MODULE_VERSION("2.8.0");
2795 MODULE_LICENSE("GPL");
2797 module_init(ksocklnd_init);
2798 module_exit(ksocklnd_exit);