4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/socklnd/socklnd.c
34 * Author: Zach Brown <zab@zabbo.net>
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eric@bartonsoftware.com>
40 #include <linux/inetdevice.h>
43 static const struct lnet_lnd the_ksocklnd;
44 struct ksock_nal_data ksocknal_data;
46 static struct ksock_interface *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
49 struct ksock_net *net = ni->ni_data;
51 struct ksock_interface *iface;
53 for (i = 0; i < net->ksnn_ninterfaces; i++) {
54 LASSERT(i < LNET_INTERFACES_NUM);
55 iface = &net->ksnn_interfaces[i];
57 if (iface->ksni_ipaddr == ip)
64 static struct ksock_interface *
65 ksocknal_index2iface(struct lnet_ni *ni, int index)
67 struct ksock_net *net = ni->ni_data;
69 struct ksock_interface *iface;
71 for (i = 0; i < net->ksnn_ninterfaces; i++) {
72 LASSERT(i < LNET_INTERFACES_NUM);
73 iface = &net->ksnn_interfaces[i];
75 if (iface->ksni_index == index)
82 static int ksocknal_ip2index(__u32 ipaddress, struct lnet_ni *ni)
84 struct net_device *dev;
86 DECLARE_CONST_IN_IFADDR(ifa);
89 for_each_netdev(ni->ni_net_ns, dev) {
90 int flags = dev_get_flags(dev);
91 struct in_device *in_dev;
93 if (flags & IFF_LOOPBACK) /* skip the loopback IF */
96 if (!(flags & IFF_UP))
99 in_dev = __in_dev_get_rcu(dev);
103 in_dev_for_each_ifa_rcu(ifa, in_dev) {
104 if (ntohl(ifa->ifa_local) == ipaddress)
116 static struct ksock_route *
117 ksocknal_create_route(__u32 ipaddr, int port)
119 struct ksock_route *route;
121 LIBCFS_ALLOC (route, sizeof (*route));
125 atomic_set (&route->ksnr_refcount, 1);
126 route->ksnr_peer = NULL;
127 route->ksnr_retry_interval = 0; /* OK to connect at any time */
128 route->ksnr_ipaddr = ipaddr;
129 route->ksnr_myiface = -1;
130 route->ksnr_port = port;
131 route->ksnr_scheduled = 0;
132 route->ksnr_connecting = 0;
133 route->ksnr_connected = 0;
134 route->ksnr_deleted = 0;
135 route->ksnr_conn_count = 0;
136 route->ksnr_share_count = 0;
142 ksocknal_destroy_route(struct ksock_route *route)
144 LASSERT (atomic_read(&route->ksnr_refcount) == 0);
146 if (route->ksnr_peer != NULL)
147 ksocknal_peer_decref(route->ksnr_peer);
149 LIBCFS_FREE (route, sizeof (*route));
152 static struct ksock_peer_ni *
153 ksocknal_create_peer(struct lnet_ni *ni, struct lnet_process_id id)
155 int cpt = lnet_cpt_of_nid(id.nid, ni);
156 struct ksock_net *net = ni->ni_data;
157 struct ksock_peer_ni *peer_ni;
159 LASSERT(id.nid != LNET_NID_ANY);
160 LASSERT(id.pid != LNET_PID_ANY);
161 LASSERT(!in_interrupt());
163 if (!atomic_inc_unless_negative(&net->ksnn_npeers)) {
164 CERROR("Can't create peer_ni: network shutdown\n");
165 return ERR_PTR(-ESHUTDOWN);
168 LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
170 atomic_dec(&net->ksnn_npeers);
171 return ERR_PTR(-ENOMEM);
174 peer_ni->ksnp_ni = ni;
175 peer_ni->ksnp_id = id;
176 atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
177 peer_ni->ksnp_closing = 0;
178 peer_ni->ksnp_accepting = 0;
179 peer_ni->ksnp_proto = NULL;
180 peer_ni->ksnp_last_alive = 0;
181 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
183 INIT_LIST_HEAD(&peer_ni->ksnp_conns);
184 INIT_LIST_HEAD(&peer_ni->ksnp_routes);
185 INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
186 INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
187 spin_lock_init(&peer_ni->ksnp_lock);
193 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
195 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
197 CDEBUG (D_NET, "peer_ni %s %p deleted\n",
198 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
200 LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
201 LASSERT(peer_ni->ksnp_accepting == 0);
202 LASSERT(list_empty(&peer_ni->ksnp_conns));
203 LASSERT(list_empty(&peer_ni->ksnp_routes));
204 LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
205 LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
207 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
209 /* NB a peer_ni's connections and routes keep a reference on their
210 * peer_ni until they are destroyed, so we can be assured that _all_
211 * state to do with this peer_ni has been cleaned up when its refcount
214 if (atomic_dec_and_test(&net->ksnn_npeers))
215 wake_up_var(&net->ksnn_npeers);
218 struct ksock_peer_ni *
219 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
221 struct ksock_peer_ni *peer_ni;
223 hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni,
225 LASSERT(!peer_ni->ksnp_closing);
227 if (peer_ni->ksnp_ni != ni)
230 if (peer_ni->ksnp_id.nid != id.nid ||
231 peer_ni->ksnp_id.pid != id.pid)
234 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
235 peer_ni, libcfs_id2str(id),
236 atomic_read(&peer_ni->ksnp_refcount));
242 struct ksock_peer_ni *
243 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
245 struct ksock_peer_ni *peer_ni;
247 read_lock(&ksocknal_data.ksnd_global_lock);
248 peer_ni = ksocknal_find_peer_locked(ni, id);
249 if (peer_ni != NULL) /* +1 ref for caller? */
250 ksocknal_peer_addref(peer_ni);
251 read_unlock(&ksocknal_data.ksnd_global_lock);
257 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
261 struct ksock_interface *iface;
263 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
264 LASSERT(i < LNET_INTERFACES_NUM);
265 ip = peer_ni->ksnp_passive_ips[i];
267 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
269 * All IPs in peer_ni->ksnp_passive_ips[] come from the
270 * interface list, therefore the call must succeed.
272 LASSERT(iface != NULL);
274 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
275 peer_ni, iface, iface->ksni_nroutes);
276 iface->ksni_npeers--;
279 LASSERT(list_empty(&peer_ni->ksnp_conns));
280 LASSERT(list_empty(&peer_ni->ksnp_routes));
281 LASSERT(!peer_ni->ksnp_closing);
282 peer_ni->ksnp_closing = 1;
283 hlist_del(&peer_ni->ksnp_list);
284 /* lose peerlist's ref */
285 ksocknal_peer_decref(peer_ni);
289 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
290 struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
291 int *port, int *conn_count, int *share_count)
293 struct ksock_peer_ni *peer_ni;
294 struct ksock_route *route;
295 struct list_head *rtmp;
300 read_lock(&ksocknal_data.ksnd_global_lock);
302 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
304 if (peer_ni->ksnp_ni != ni)
307 if (peer_ni->ksnp_n_passive_ips == 0 &&
308 list_empty(&peer_ni->ksnp_routes)) {
312 *id = peer_ni->ksnp_id;
322 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
326 *id = peer_ni->ksnp_id;
327 *myip = peer_ni->ksnp_passive_ips[j];
336 list_for_each(rtmp, &peer_ni->ksnp_routes) {
340 route = list_entry(rtmp, struct ksock_route,
343 *id = peer_ni->ksnp_id;
344 rc = choose_ipv4_src(myip, route->ksnr_myiface,
347 *peer_ip = route->ksnr_ipaddr;
348 *port = route->ksnr_port;
349 *conn_count = route->ksnr_conn_count;
350 *share_count = route->ksnr_share_count;
355 read_unlock(&ksocknal_data.ksnd_global_lock);
360 ksocknal_associate_route_conn_locked(struct ksock_route *route,
361 struct ksock_conn *conn)
363 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
364 int type = conn->ksnc_type;
365 struct ksock_interface *iface;
366 int conn_iface = ksocknal_ip2index(conn->ksnc_myipaddr,
367 route->ksnr_peer->ksnp_ni);
369 conn->ksnc_route = route;
370 ksocknal_route_addref(route);
372 if (route->ksnr_myiface != conn_iface) {
373 if (route->ksnr_myiface < 0) {
374 /* route wasn't bound locally yet (the initial route) */
375 CDEBUG(D_NET, "Binding %s %pI4h to interface %d\n",
376 libcfs_id2str(peer_ni->ksnp_id),
381 "Rebinding %s %pI4h from interface %d to %d\n",
382 libcfs_id2str(peer_ni->ksnp_id),
387 iface = ksocknal_index2iface(route->ksnr_peer->ksnp_ni,
388 route->ksnr_myiface);
390 iface->ksni_nroutes--;
392 route->ksnr_myiface = conn_iface;
393 iface = ksocknal_index2iface(route->ksnr_peer->ksnp_ni,
394 route->ksnr_myiface);
396 iface->ksni_nroutes++;
399 route->ksnr_connected |= (1<<type);
400 route->ksnr_conn_count++;
402 /* Successful connection => further attempts can
403 * proceed immediately
405 route->ksnr_retry_interval = 0;
409 ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
411 struct list_head *tmp;
412 struct ksock_conn *conn;
413 struct ksock_route *route2;
414 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
416 LASSERT(!peer_ni->ksnp_closing);
417 LASSERT(route->ksnr_peer == NULL);
418 LASSERT(!route->ksnr_scheduled);
419 LASSERT(!route->ksnr_connecting);
420 LASSERT(route->ksnr_connected == 0);
421 LASSERT(net->ksnn_ninterfaces > 0);
423 /* LASSERT(unique) */
424 list_for_each(tmp, &peer_ni->ksnp_routes) {
425 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
427 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
428 CERROR("Duplicate route %s %pI4h\n",
429 libcfs_id2str(peer_ni->ksnp_id),
430 &route->ksnr_ipaddr);
435 route->ksnr_peer = peer_ni;
436 ksocknal_peer_addref(peer_ni);
438 /* set the route's interface to the current net's interface */
439 route->ksnr_myiface = net->ksnn_interfaces[0].ksni_index;
440 net->ksnn_interfaces[0].ksni_nroutes++;
442 /* peer_ni's routelist takes over my ref on 'route' */
443 list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
445 list_for_each(tmp, &peer_ni->ksnp_conns) {
446 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
448 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
451 ksocknal_associate_route_conn_locked(route, conn);
452 /* keep going (typed routes) */
457 ksocknal_del_route_locked(struct ksock_route *route)
459 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
460 struct ksock_interface *iface;
461 struct ksock_conn *conn;
462 struct list_head *ctmp;
463 struct list_head *cnxt;
465 LASSERT(!route->ksnr_deleted);
467 /* Close associated conns */
468 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
469 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
471 if (conn->ksnc_route != route)
474 ksocknal_close_conn_locked(conn, 0);
477 if (route->ksnr_myiface >= 0) {
478 iface = ksocknal_index2iface(route->ksnr_peer->ksnp_ni,
479 route->ksnr_myiface);
481 iface->ksni_nroutes--;
484 route->ksnr_deleted = 1;
485 list_del(&route->ksnr_list);
486 ksocknal_route_decref(route); /* drop peer_ni's ref */
488 if (list_empty(&peer_ni->ksnp_routes) &&
489 list_empty(&peer_ni->ksnp_conns)) {
490 /* I've just removed the last route to a peer_ni with no active
492 ksocknal_unlink_peer_locked(peer_ni);
497 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
500 struct list_head *tmp;
501 struct ksock_peer_ni *peer_ni;
502 struct ksock_peer_ni *peer2;
503 struct ksock_route *route;
504 struct ksock_route *route2;
506 if (id.nid == LNET_NID_ANY ||
507 id.pid == LNET_PID_ANY)
510 /* Have a brand new peer_ni ready... */
511 peer_ni = ksocknal_create_peer(ni, id);
513 return PTR_ERR(peer_ni);
515 route = ksocknal_create_route (ipaddr, port);
517 ksocknal_peer_decref(peer_ni);
521 write_lock_bh(&ksocknal_data.ksnd_global_lock);
523 /* always called with a ref on ni, so shutdown can't have started */
524 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers)
527 peer2 = ksocknal_find_peer_locked(ni, id);
529 ksocknal_peer_decref(peer_ni);
532 /* peer_ni table takes my ref on peer_ni */
533 hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list, id.nid);
537 list_for_each(tmp, &peer_ni->ksnp_routes) {
538 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
540 if (route2->ksnr_ipaddr == ipaddr)
545 if (route2 == NULL) {
546 ksocknal_add_route_locked(peer_ni, route);
547 route->ksnr_share_count++;
549 ksocknal_route_decref(route);
550 route2->ksnr_share_count++;
553 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
559 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
561 struct ksock_conn *conn;
562 struct ksock_route *route;
563 struct list_head *tmp;
564 struct list_head *nxt;
567 LASSERT(!peer_ni->ksnp_closing);
569 /* Extra ref prevents peer_ni disappearing until I'm done with it */
570 ksocknal_peer_addref(peer_ni);
572 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
573 route = list_entry(tmp, struct ksock_route, ksnr_list);
576 if (!(ip == 0 || route->ksnr_ipaddr == ip))
579 route->ksnr_share_count = 0;
580 /* This deletes associated conns too */
581 ksocknal_del_route_locked(route);
585 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
586 route = list_entry(tmp, struct ksock_route, ksnr_list);
587 nshared += route->ksnr_share_count;
591 /* remove everything else if there are no explicit entries
594 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
595 route = list_entry(tmp, struct ksock_route, ksnr_list);
597 /* we should only be removing auto-entries */
598 LASSERT(route->ksnr_share_count == 0);
599 ksocknal_del_route_locked(route);
602 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
603 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
605 ksocknal_close_conn_locked(conn, 0);
609 ksocknal_peer_decref(peer_ni);
610 /* NB peer_ni unlinks itself when last conn/route is removed */
614 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
617 struct hlist_node *pnxt;
618 struct ksock_peer_ni *peer_ni;
624 write_lock_bh(&ksocknal_data.ksnd_global_lock);
626 if (id.nid != LNET_NID_ANY) {
627 lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
631 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
634 for (i = lo; i <= hi; i++) {
635 hlist_for_each_entry_safe(peer_ni, pnxt,
636 &ksocknal_data.ksnd_peers[i],
638 if (peer_ni->ksnp_ni != ni)
641 if (!((id.nid == LNET_NID_ANY ||
642 peer_ni->ksnp_id.nid == id.nid) &&
643 (id.pid == LNET_PID_ANY ||
644 peer_ni->ksnp_id.pid == id.pid)))
647 ksocknal_peer_addref(peer_ni); /* a ref for me... */
649 ksocknal_del_peer_locked(peer_ni, ip);
651 if (peer_ni->ksnp_closing &&
652 !list_empty(&peer_ni->ksnp_tx_queue)) {
653 LASSERT(list_empty(&peer_ni->ksnp_conns));
654 LASSERT(list_empty(&peer_ni->ksnp_routes));
656 list_splice_init(&peer_ni->ksnp_tx_queue,
660 ksocknal_peer_decref(peer_ni); /* ...till here */
662 rc = 0; /* matched! */
666 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
668 ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
673 static struct ksock_conn *
674 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
676 struct ksock_peer_ni *peer_ni;
677 struct ksock_conn *conn;
678 struct list_head *ctmp;
681 read_lock(&ksocknal_data.ksnd_global_lock);
683 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
684 LASSERT(!peer_ni->ksnp_closing);
686 if (peer_ni->ksnp_ni != ni)
689 list_for_each(ctmp, &peer_ni->ksnp_conns) {
693 conn = list_entry(ctmp, struct ksock_conn,
695 ksocknal_conn_addref(conn);
696 read_unlock(&ksocknal_data.ksnd_global_lock);
701 read_unlock(&ksocknal_data.ksnd_global_lock);
705 static struct ksock_sched *
706 ksocknal_choose_scheduler_locked(unsigned int cpt)
708 struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
711 if (sched->kss_nthreads == 0) {
712 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
713 if (sched->kss_nthreads > 0) {
714 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
715 cpt, sched->kss_cpt);
726 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
728 struct ksock_net *net = ni->ni_data;
732 read_lock(&ksocknal_data.ksnd_global_lock);
734 nip = net->ksnn_ninterfaces;
735 LASSERT(nip <= LNET_INTERFACES_NUM);
738 * Only offer interfaces for additional connections if I have
742 read_unlock(&ksocknal_data.ksnd_global_lock);
746 for (i = 0; i < nip; i++) {
747 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
748 LASSERT(ipaddrs[i] != 0);
751 read_unlock(&ksocknal_data.ksnd_global_lock);
756 ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
758 int best_netmatch = 0;
765 for (i = 0; i < nips; i++) {
769 this_xor = (ips[i] ^ iface->ksni_ipaddr);
770 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
773 best_netmatch < this_netmatch ||
774 (best_netmatch == this_netmatch &&
775 best_xor > this_xor)))
779 best_netmatch = this_netmatch;
788 ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
790 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
791 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
792 struct ksock_interface *iface;
793 struct ksock_interface *best_iface;
804 /* CAVEAT EMPTOR: We do all our interface matching with an
805 * exclusive hold of global lock at IRQ priority. We're only
806 * expecting to be dealing with small numbers of interfaces, so the
807 * O(n**3)-ness shouldn't matter */
809 /* Also note that I'm not going to return more than n_peerips
810 * interfaces, even if I have more myself */
812 write_lock_bh(global_lock);
814 LASSERT(n_peerips <= LNET_INTERFACES_NUM);
815 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
817 /* Only match interfaces for additional connections
818 * if I have > 1 interface
820 n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
821 min(n_peerips, net->ksnn_ninterfaces);
823 for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
824 /* ^ yes really... */
826 /* If we have any new interfaces, first tick off all the
827 * peer_ni IPs that match old interfaces, then choose new
828 * interfaces to match the remaining peer_ni IPS.
829 * We don't forget interfaces we've stopped using; we might
830 * start using them again... */
832 if (i < peer_ni->ksnp_n_passive_ips) {
834 ip = peer_ni->ksnp_passive_ips[i];
835 best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
837 /* peer_ni passive ips are kept up to date */
838 LASSERT(best_iface != NULL);
840 /* choose a new interface */
841 LASSERT (i == peer_ni->ksnp_n_passive_ips);
847 for (j = 0; j < net->ksnn_ninterfaces; j++) {
848 iface = &net->ksnn_interfaces[j];
849 ip = iface->ksni_ipaddr;
851 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
852 if (peer_ni->ksnp_passive_ips[k] == ip)
855 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
858 k = ksocknal_match_peerip(iface, peerips, n_peerips);
859 xor = (ip ^ peerips[k]);
860 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
862 if (!(best_iface == NULL ||
863 best_netmatch < this_netmatch ||
864 (best_netmatch == this_netmatch &&
865 best_npeers > iface->ksni_npeers)))
869 best_netmatch = this_netmatch;
870 best_npeers = iface->ksni_npeers;
873 LASSERT(best_iface != NULL);
875 best_iface->ksni_npeers++;
876 ip = best_iface->ksni_ipaddr;
877 peer_ni->ksnp_passive_ips[i] = ip;
878 peer_ni->ksnp_n_passive_ips = i+1;
881 /* mark the best matching peer_ni IP used */
882 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
886 /* Overwrite input peer_ni IP addresses */
887 memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
889 write_unlock_bh(global_lock);
895 ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
896 __u32 *peer_ipaddrs, int npeer_ipaddrs)
898 struct ksock_route *newroute = NULL;
899 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
900 struct lnet_ni *ni = peer_ni->ksnp_ni;
901 struct ksock_net *net = ni->ni_data;
902 struct list_head *rtmp;
903 struct ksock_route *route;
904 struct ksock_interface *iface;
905 struct ksock_interface *best_iface;
912 /* CAVEAT EMPTOR: We do all our interface matching with an
913 * exclusive hold of global lock at IRQ priority. We're only
914 * expecting to be dealing with small numbers of interfaces, so the
915 * O(n**3)-ness here shouldn't matter */
917 write_lock_bh(global_lock);
919 if (net->ksnn_ninterfaces < 2) {
920 /* Only create additional connections
921 * if I have > 1 interface */
922 write_unlock_bh(global_lock);
926 LASSERT(npeer_ipaddrs <= LNET_INTERFACES_NUM);
928 for (i = 0; i < npeer_ipaddrs; i++) {
929 if (newroute != NULL) {
930 newroute->ksnr_ipaddr = peer_ipaddrs[i];
932 write_unlock_bh(global_lock);
934 newroute = ksocknal_create_route(peer_ipaddrs[i], port);
935 if (newroute == NULL)
938 write_lock_bh(global_lock);
941 if (peer_ni->ksnp_closing) {
942 /* peer_ni got closed under me */
946 /* Already got a route? */
948 list_for_each(rtmp, &peer_ni->ksnp_routes) {
949 route = list_entry(rtmp, struct ksock_route, ksnr_list);
951 if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
963 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
965 /* Select interface to connect from */
966 for (j = 0; j < net->ksnn_ninterfaces; j++) {
967 iface = &net->ksnn_interfaces[j];
969 /* Using this interface already? */
970 list_for_each(rtmp, &peer_ni->ksnp_routes) {
971 route = list_entry(rtmp, struct ksock_route,
974 if (route->ksnr_myiface == iface->ksni_index)
982 this_netmatch = (((iface->ksni_ipaddr ^
983 newroute->ksnr_ipaddr) &
984 iface->ksni_netmask) == 0) ? 1 : 0;
986 if (!(best_iface == NULL ||
987 best_netmatch < this_netmatch ||
988 (best_netmatch == this_netmatch &&
989 best_nroutes > iface->ksni_nroutes)))
993 best_netmatch = this_netmatch;
994 best_nroutes = iface->ksni_nroutes;
997 if (best_iface == NULL)
1000 newroute->ksnr_myiface = best_iface->ksni_index;
1001 best_iface->ksni_nroutes++;
1003 ksocknal_add_route_locked(peer_ni, newroute);
1007 write_unlock_bh(global_lock);
1008 if (newroute != NULL)
1009 ksocknal_route_decref(newroute);
1013 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
1015 struct ksock_connreq *cr;
1020 rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
1021 LASSERT(rc == 0); /* we succeeded before */
1023 LIBCFS_ALLOC(cr, sizeof(*cr));
1025 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
1026 "%pI4h: memory exhausted\n", &peer_ip);
1032 cr->ksncr_sock = sock;
1034 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
1036 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
1037 wake_up(&ksocknal_data.ksnd_connd_waitq);
1039 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1044 ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
1046 struct ksock_route *route;
1048 list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1049 if (route->ksnr_ipaddr == ipaddr)
1050 return route->ksnr_connecting;
1056 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
1057 struct socket *sock, int type)
1059 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1061 struct lnet_process_id peerid;
1062 struct list_head *tmp;
1064 struct ksock_conn *conn;
1065 struct ksock_conn *conn2;
1066 struct ksock_peer_ni *peer_ni = NULL;
1067 struct ksock_peer_ni *peer2;
1068 struct ksock_sched *sched;
1069 struct ksock_hello_msg *hello;
1071 struct ksock_tx *tx;
1072 struct ksock_tx *txtmp;
1078 active = (route != NULL);
1080 LASSERT (active == (type != SOCKLND_CONN_NONE));
1082 LIBCFS_ALLOC(conn, sizeof(*conn));
1088 conn->ksnc_peer = NULL;
1089 conn->ksnc_route = NULL;
1090 conn->ksnc_sock = sock;
1091 /* 2 ref, 1 for conn, another extra ref prevents socket
1092 * being closed before establishment of connection */
1093 atomic_set (&conn->ksnc_sock_refcount, 2);
1094 conn->ksnc_type = type;
1095 ksocknal_lib_save_callback(sock, conn);
1096 atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1098 conn->ksnc_rx_ready = 0;
1099 conn->ksnc_rx_scheduled = 0;
1101 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1102 conn->ksnc_tx_ready = 0;
1103 conn->ksnc_tx_scheduled = 0;
1104 conn->ksnc_tx_carrier = NULL;
1105 atomic_set (&conn->ksnc_tx_nob, 0);
1107 LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1108 kshm_ips[LNET_INTERFACES_NUM]));
1109 if (hello == NULL) {
1114 /* stash conn's local and remote addrs */
1115 rc = ksocknal_lib_get_conn_addrs (conn);
1119 /* Find out/confirm peer_ni's NID and connection type and get the
1120 * vector of interfaces she's willing to let me connect to.
1121 * Passive connections use the listener timeout since the peer_ni sends
1125 peer_ni = route->ksnr_peer;
1126 LASSERT(ni == peer_ni->ksnp_ni);
1128 /* Active connection sends HELLO eagerly */
1129 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1130 peerid = peer_ni->ksnp_id;
1132 write_lock_bh(global_lock);
1133 conn->ksnc_proto = peer_ni->ksnp_proto;
1134 write_unlock_bh(global_lock);
1136 if (conn->ksnc_proto == NULL) {
1137 conn->ksnc_proto = &ksocknal_protocol_v3x;
1138 #if SOCKNAL_VERSION_DEBUG
1139 if (*ksocknal_tunables.ksnd_protocol == 2)
1140 conn->ksnc_proto = &ksocknal_protocol_v2x;
1141 else if (*ksocknal_tunables.ksnd_protocol == 1)
1142 conn->ksnc_proto = &ksocknal_protocol_v1x;
1146 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1150 peerid.nid = LNET_NID_ANY;
1151 peerid.pid = LNET_PID_ANY;
1153 /* Passive, get protocol from peer_ni */
1154 conn->ksnc_proto = NULL;
1157 rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1161 LASSERT (rc == 0 || active);
1162 LASSERT (conn->ksnc_proto != NULL);
1163 LASSERT (peerid.nid != LNET_NID_ANY);
1165 cpt = lnet_cpt_of_nid(peerid.nid, ni);
1168 ksocknal_peer_addref(peer_ni);
1169 write_lock_bh(global_lock);
1171 peer_ni = ksocknal_create_peer(ni, peerid);
1172 if (IS_ERR(peer_ni)) {
1173 rc = PTR_ERR(peer_ni);
1177 write_lock_bh(global_lock);
1179 /* called with a ref on ni, so shutdown can't have started */
1180 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers) >= 0);
1182 peer2 = ksocknal_find_peer_locked(ni, peerid);
1183 if (peer2 == NULL) {
1184 /* NB this puts an "empty" peer_ni in the peer_ni
1185 * table (which takes my ref) */
1186 hash_add(ksocknal_data.ksnd_peers,
1187 &peer_ni->ksnp_list, peerid.nid);
1189 ksocknal_peer_decref(peer_ni);
1194 ksocknal_peer_addref(peer_ni);
1195 peer_ni->ksnp_accepting++;
1197 /* Am I already connecting to this guy? Resolve in
1198 * favour of higher NID... */
1199 if (peerid.nid < ni->ni_nid &&
1200 ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1202 warn = "connection race resolution";
1207 if (peer_ni->ksnp_closing ||
1208 (active && route->ksnr_deleted)) {
1209 /* peer_ni/route got closed under me */
1211 warn = "peer_ni/route removed";
1215 if (peer_ni->ksnp_proto == NULL) {
1216 /* Never connected before.
1217 * NB recv_hello may have returned EPROTO to signal my peer_ni
1218 * wants a different protocol than the one I asked for.
1220 LASSERT(list_empty(&peer_ni->ksnp_conns));
1222 peer_ni->ksnp_proto = conn->ksnc_proto;
1223 peer_ni->ksnp_incarnation = incarnation;
1226 if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1227 peer_ni->ksnp_incarnation != incarnation) {
1228 /* peer_ni rebooted or I've got the wrong protocol version */
1229 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1231 peer_ni->ksnp_proto = NULL;
1233 warn = peer_ni->ksnp_incarnation != incarnation ?
1234 "peer_ni rebooted" :
1235 "wrong proto version";
1245 warn = "lost conn race";
1248 warn = "retry with different protocol version";
1252 /* Refuse to duplicate an existing connection, unless this is a
1253 * loopback connection */
1254 if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1255 list_for_each(tmp, &peer_ni->ksnp_conns) {
1256 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1258 if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1259 conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1260 conn2->ksnc_type != conn->ksnc_type)
1263 /* Reply on a passive connection attempt so the peer_ni
1264 * realises we're connected. */
1274 /* If the connection created by this route didn't bind to the IP
1275 * address the route connected to, the connection/route matching
1276 * code below probably isn't going to work. */
1278 route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1279 CERROR("Route %s %pI4h connected to %pI4h\n",
1280 libcfs_id2str(peer_ni->ksnp_id),
1281 &route->ksnr_ipaddr,
1282 &conn->ksnc_ipaddr);
1285 /* Search for a route corresponding to the new connection and
1286 * create an association. This allows incoming connections created
1287 * by routes in my peer_ni to match my own route entries so I don't
1288 * continually create duplicate routes. */
1289 list_for_each(tmp, &peer_ni->ksnp_routes) {
1290 route = list_entry(tmp, struct ksock_route, ksnr_list);
1292 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1295 ksocknal_associate_route_conn_locked(route, conn);
1299 conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
1300 peer_ni->ksnp_last_alive = ktime_get_seconds();
1301 peer_ni->ksnp_send_keepalive = 0;
1302 peer_ni->ksnp_error = 0;
1304 sched = ksocknal_choose_scheduler_locked(cpt);
1306 CERROR("no schedulers available. node is unhealthy\n");
1310 * The cpt might have changed if we ended up selecting a non cpt
1311 * native scheduler. So use the scheduler's cpt instead.
1313 cpt = sched->kss_cpt;
1314 sched->kss_nconns++;
1315 conn->ksnc_scheduler = sched;
1317 conn->ksnc_tx_last_post = ktime_get_seconds();
1318 /* Set the deadline for the outgoing HELLO to drain */
1319 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1320 conn->ksnc_tx_deadline = ktime_get_seconds() +
1322 smp_mb(); /* order with adding to peer_ni's conn list */
1324 list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1325 ksocknal_conn_addref(conn);
1327 ksocknal_new_packet(conn, 0);
1329 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1331 /* Take packets blocking for this connection. */
1332 list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1333 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1337 list_del(&tx->tx_list);
1338 ksocknal_queue_tx_locked(tx, conn);
1341 write_unlock_bh(global_lock);
1343 /* We've now got a new connection. Any errors from here on are just
1344 * like "normal" comms errors and we close the connection normally.
1345 * NB (a) we still have to send the reply HELLO for passive
1347 * (b) normal I/O on the conn is blocked until I setup and call the
1351 CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1352 " incarnation:%lld sched[%d]\n",
1353 libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1354 &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1355 conn->ksnc_port, incarnation, cpt);
1358 /* additional routes after interface exchange? */
1359 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1360 hello->kshm_ips, hello->kshm_nips);
1362 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1364 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1367 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1368 kshm_ips[LNET_INTERFACES_NUM]));
1370 /* setup the socket AFTER I've received hello (it disables
1371 * SO_LINGER). I might call back to the acceptor who may want
1372 * to send a protocol version response and then close the
1373 * socket; this ensures the socket only tears down after the
1374 * response has been sent. */
1376 rc = ksocknal_lib_setup_sock(sock);
1378 write_lock_bh(global_lock);
1380 /* NB my callbacks block while I hold ksnd_global_lock */
1381 ksocknal_lib_set_callback(sock, conn);
1384 peer_ni->ksnp_accepting--;
1386 write_unlock_bh(global_lock);
1389 write_lock_bh(global_lock);
1390 if (!conn->ksnc_closing) {
1391 /* could be closed by another thread */
1392 ksocknal_close_conn_locked(conn, rc);
1394 write_unlock_bh(global_lock);
1395 } else if (ksocknal_connsock_addref(conn) == 0) {
1396 /* Allow I/O to proceed. */
1397 ksocknal_read_callback(conn);
1398 ksocknal_write_callback(conn);
1399 ksocknal_connsock_decref(conn);
1402 ksocknal_connsock_decref(conn);
1403 ksocknal_conn_decref(conn);
1407 if (!peer_ni->ksnp_closing &&
1408 list_empty(&peer_ni->ksnp_conns) &&
1409 list_empty(&peer_ni->ksnp_routes)) {
1410 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
1411 ksocknal_unlink_peer_locked(peer_ni);
1414 write_unlock_bh(global_lock);
1418 CERROR("Not creating conn %s type %d: %s\n",
1419 libcfs_id2str(peerid), conn->ksnc_type, warn);
1421 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1422 libcfs_id2str(peerid), conn->ksnc_type, warn);
1427 /* Request retry by replying with CONN_NONE
1428 * ksnc_proto has been set already */
1429 conn->ksnc_type = SOCKLND_CONN_NONE;
1430 hello->kshm_nips = 0;
1431 ksocknal_send_hello(ni, conn, peerid.nid, hello);
1434 write_lock_bh(global_lock);
1435 peer_ni->ksnp_accepting--;
1436 write_unlock_bh(global_lock);
1440 * If we get here without an error code, just use -EALREADY.
1441 * Depending on how we got here, the error may be positive
1442 * or negative. Normalize the value for ksocknal_txlist_done().
1444 rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1445 ksocknal_txlist_done(ni, &zombies, rc2);
1446 ksocknal_peer_decref(peer_ni);
1450 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1451 kshm_ips[LNET_INTERFACES_NUM]));
1453 LIBCFS_FREE(conn, sizeof(*conn));
1461 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1463 /* This just does the immmediate housekeeping, and queues the
1464 * connection for the reaper to terminate.
1465 * Caller holds ksnd_global_lock exclusively in irq context */
1466 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1467 struct ksock_route *route;
1468 struct ksock_conn *conn2;
1469 struct list_head *tmp;
1471 LASSERT(peer_ni->ksnp_error == 0);
1472 LASSERT(!conn->ksnc_closing);
1473 conn->ksnc_closing = 1;
1475 /* ksnd_deathrow_conns takes over peer_ni's ref */
1476 list_del(&conn->ksnc_list);
1478 route = conn->ksnc_route;
1479 if (route != NULL) {
1480 /* dissociate conn from route... */
1481 LASSERT(!route->ksnr_deleted);
1482 LASSERT((route->ksnr_connected & BIT(conn->ksnc_type)) != 0);
1485 list_for_each(tmp, &peer_ni->ksnp_conns) {
1486 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1488 if (conn2->ksnc_route == route &&
1489 conn2->ksnc_type == conn->ksnc_type)
1495 route->ksnr_connected &= ~BIT(conn->ksnc_type);
1497 conn->ksnc_route = NULL;
1499 ksocknal_route_decref(route); /* drop conn's ref on route */
1502 if (list_empty(&peer_ni->ksnp_conns)) {
1503 /* No more connections to this peer_ni */
1505 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1506 struct ksock_tx *tx;
1508 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1510 /* throw them to the last connection...,
1511 * these TXs will be send to /dev/null by scheduler */
1512 list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1514 ksocknal_tx_prep(conn, tx);
1516 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1517 list_splice_init(&peer_ni->ksnp_tx_queue,
1518 &conn->ksnc_tx_queue);
1519 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1522 /* renegotiate protocol version */
1523 peer_ni->ksnp_proto = NULL;
1524 /* stash last conn close reason */
1525 peer_ni->ksnp_error = error;
1527 if (list_empty(&peer_ni->ksnp_routes)) {
1528 /* I've just closed last conn belonging to a
1529 * peer_ni with no routes to it */
1530 ksocknal_unlink_peer_locked(peer_ni);
1534 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1536 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
1537 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1539 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1543 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1546 time64_t last_alive = 0;
1548 /* There has been a connection failure or comms error; but I'll only
1549 * tell LNET I think the peer_ni is dead if it's to another kernel and
1550 * there are no connections or connection attempts in existence. */
1552 read_lock(&ksocknal_data.ksnd_global_lock);
1554 if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1555 list_empty(&peer_ni->ksnp_conns) &&
1556 peer_ni->ksnp_accepting == 0 &&
1557 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1559 last_alive = peer_ni->ksnp_last_alive;
1562 read_unlock(&ksocknal_data.ksnd_global_lock);
1565 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid,
1566 false, false, last_alive);
1570 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1572 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1573 struct ksock_tx *tx;
1574 struct ksock_tx *tmp;
1577 /* NB safe to finalize TXs because closing of socket will
1578 * abort all buffered data */
1579 LASSERT(conn->ksnc_sock == NULL);
1581 spin_lock(&peer_ni->ksnp_lock);
1583 list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1584 if (tx->tx_conn != conn)
1587 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1589 tx->tx_msg.ksm_zc_cookies[0] = 0;
1590 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1591 list_move(&tx->tx_zc_list, &zlist);
1594 spin_unlock(&peer_ni->ksnp_lock);
1596 while (!list_empty(&zlist)) {
1597 tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
1599 list_del(&tx->tx_zc_list);
1600 ksocknal_tx_decref(tx);
1605 ksocknal_terminate_conn(struct ksock_conn *conn)
1607 /* This gets called by the reaper (guaranteed thread context) to
1608 * disengage the socket from its callbacks and close it.
1609 * ksnc_refcount will eventually hit zero, and then the reaper will
1611 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1612 struct ksock_sched *sched = conn->ksnc_scheduler;
1615 LASSERT(conn->ksnc_closing);
1617 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1618 spin_lock_bh(&sched->kss_lock);
1620 /* a closing conn is always ready to tx */
1621 conn->ksnc_tx_ready = 1;
1623 if (!conn->ksnc_tx_scheduled &&
1624 !list_empty(&conn->ksnc_tx_queue)) {
1625 list_add_tail(&conn->ksnc_tx_list,
1626 &sched->kss_tx_conns);
1627 conn->ksnc_tx_scheduled = 1;
1628 /* extra ref for scheduler */
1629 ksocknal_conn_addref(conn);
1631 wake_up (&sched->kss_waitq);
1634 spin_unlock_bh(&sched->kss_lock);
1636 /* serialise with callbacks */
1637 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1639 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1641 /* OK, so this conn may not be completely disengaged from its
1642 * scheduler yet, but it _has_ committed to terminate... */
1643 conn->ksnc_scheduler->kss_nconns--;
1645 if (peer_ni->ksnp_error != 0) {
1646 /* peer_ni's last conn closed in error */
1647 LASSERT(list_empty(&peer_ni->ksnp_conns));
1649 peer_ni->ksnp_error = 0; /* avoid multiple notifications */
1652 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1655 ksocknal_peer_failed(peer_ni);
1657 /* The socket is closed on the final put; either here, or in
1658 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1659 * when the connection was established, this will close the socket
1660 * immediately, aborting anything buffered in it. Any hung
1661 * zero-copy transmits will therefore complete in finite time. */
1662 ksocknal_connsock_decref(conn);
1666 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1668 /* Queue the conn for the reaper to destroy */
1669 LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1670 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1672 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1673 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1675 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1679 ksocknal_destroy_conn(struct ksock_conn *conn)
1683 /* Final coup-de-grace of the reaper */
1684 CDEBUG (D_NET, "connection %p\n", conn);
1686 LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1687 LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1688 LASSERT (conn->ksnc_sock == NULL);
1689 LASSERT (conn->ksnc_route == NULL);
1690 LASSERT (!conn->ksnc_tx_scheduled);
1691 LASSERT (!conn->ksnc_rx_scheduled);
1692 LASSERT(list_empty(&conn->ksnc_tx_queue));
1694 /* complete current receive if any */
1695 switch (conn->ksnc_rx_state) {
1696 case SOCKNAL_RX_LNET_PAYLOAD:
1697 last_rcv = conn->ksnc_rx_deadline -
1699 CERROR("Completing partial receive from %s[%d], "
1700 "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1701 "last alive is %lld secs ago\n",
1702 libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1703 &conn->ksnc_ipaddr, conn->ksnc_port,
1704 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1705 ktime_get_seconds() - last_rcv);
1706 if (conn->ksnc_lnet_msg)
1707 conn->ksnc_lnet_msg->msg_health_status =
1708 LNET_MSG_STATUS_REMOTE_ERROR;
1709 lnet_finalize(conn->ksnc_lnet_msg, -EIO);
1711 case SOCKNAL_RX_LNET_HEADER:
1712 if (conn->ksnc_rx_started)
1713 CERROR("Incomplete receive of lnet header from %s, "
1714 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1715 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1716 &conn->ksnc_ipaddr, conn->ksnc_port,
1717 conn->ksnc_proto->pro_version);
1719 case SOCKNAL_RX_KSM_HEADER:
1720 if (conn->ksnc_rx_started)
1721 CERROR("Incomplete receive of ksock message from %s, "
1722 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1723 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1724 &conn->ksnc_ipaddr, conn->ksnc_port,
1725 conn->ksnc_proto->pro_version);
1727 case SOCKNAL_RX_SLOP:
1728 if (conn->ksnc_rx_started)
1729 CERROR("Incomplete receive of slops from %s, "
1730 "ip %pI4h:%d, with error\n",
1731 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1732 &conn->ksnc_ipaddr, conn->ksnc_port);
1739 ksocknal_peer_decref(conn->ksnc_peer);
1741 LIBCFS_FREE (conn, sizeof (*conn));
1745 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
1747 struct ksock_conn *conn;
1748 struct list_head *ctmp;
1749 struct list_head *cnxt;
1752 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1753 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
1756 conn->ksnc_ipaddr == ipaddr) {
1758 ksocknal_close_conn_locked (conn, why);
1766 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1768 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1769 u32 ipaddr = conn->ksnc_ipaddr;
1772 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1774 count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1776 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1782 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1784 struct ksock_peer_ni *peer_ni;
1785 struct hlist_node *pnxt;
1791 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1793 if (id.nid != LNET_NID_ANY) {
1794 lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
1798 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1801 for (i = lo; i <= hi; i++) {
1802 hlist_for_each_entry_safe(peer_ni, pnxt,
1803 &ksocknal_data.ksnd_peers[i],
1806 if (!((id.nid == LNET_NID_ANY ||
1807 id.nid == peer_ni->ksnp_id.nid) &&
1808 (id.pid == LNET_PID_ANY ||
1809 id.pid == peer_ni->ksnp_id.pid)))
1812 count += ksocknal_close_peer_conns_locked(peer_ni,
1817 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1819 /* wildcards always succeed */
1820 if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1823 return (count == 0 ? -ENOENT : 0);
1827 ksocknal_notify_gw_down(lnet_nid_t gw_nid)
1829 /* The router is telling me she's been notified of a change in
1832 struct lnet_process_id id = {
1834 .pid = LNET_PID_ANY,
1837 CDEBUG(D_NET, "gw %s down\n", libcfs_nid2str(gw_nid));
1839 /* If the gateway crashed, close all open connections... */
1840 ksocknal_close_matching_conns(id, 0);
1843 /* We can only establish new connections
1844 * if we have autroutes, and these connect on demand. */
1848 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1852 struct list_head *tmp;
1853 struct ksock_conn *conn;
1855 for (index = 0; ; index++) {
1856 read_lock(&ksocknal_data.ksnd_global_lock);
1861 list_for_each(tmp, &peer_ni->ksnp_conns) {
1863 conn = list_entry(tmp, struct ksock_conn,
1865 ksocknal_conn_addref(conn);
1870 read_unlock(&ksocknal_data.ksnd_global_lock);
1875 ksocknal_lib_push_conn (conn);
1876 ksocknal_conn_decref(conn);
1881 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1888 if (id.nid != LNET_NID_ANY) {
1889 lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
1893 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1896 for (bkt = lo; bkt <= hi; bkt++) {
1897 int peer_off; /* searching offset in peer_ni hash table */
1899 for (peer_off = 0; ; peer_off++) {
1900 struct ksock_peer_ni *peer_ni;
1903 read_lock(&ksocknal_data.ksnd_global_lock);
1904 hlist_for_each_entry(peer_ni,
1905 &ksocknal_data.ksnd_peers[bkt],
1907 if (!((id.nid == LNET_NID_ANY ||
1908 id.nid == peer_ni->ksnp_id.nid) &&
1909 (id.pid == LNET_PID_ANY ||
1910 id.pid == peer_ni->ksnp_id.pid)))
1913 if (i++ == peer_off) {
1914 ksocknal_peer_addref(peer_ni);
1918 read_unlock(&ksocknal_data.ksnd_global_lock);
1920 if (i <= peer_off) /* no match */
1924 ksocknal_push_peer(peer_ni);
1925 ksocknal_peer_decref(peer_ni);
1932 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1934 struct ksock_net *net = ni->ni_data;
1935 struct ksock_interface *iface;
1939 struct ksock_peer_ni *peer_ni;
1940 struct list_head *rtmp;
1941 struct ksock_route *route;
1943 if (ipaddress == 0 ||
1947 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1949 iface = ksocknal_ip2iface(ni, ipaddress);
1950 if (iface != NULL) {
1951 /* silently ignore dups */
1953 } else if (net->ksnn_ninterfaces == LNET_INTERFACES_NUM) {
1956 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1958 iface->ksni_index = ksocknal_ip2index(ipaddress, ni);
1959 iface->ksni_ipaddr = ipaddress;
1960 iface->ksni_netmask = netmask;
1961 iface->ksni_nroutes = 0;
1962 iface->ksni_npeers = 0;
1964 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
1965 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1966 if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1967 iface->ksni_npeers++;
1969 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1970 route = list_entry(rtmp,
1974 if (route->ksnr_myiface ==
1976 iface->ksni_nroutes++;
1981 /* NB only new connections will pay attention to the new
1986 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1992 ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni,
1993 __u32 ipaddr, int index)
1995 struct list_head *tmp;
1996 struct list_head *nxt;
1997 struct ksock_route *route;
1998 struct ksock_conn *conn;
2002 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2003 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2004 for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2005 peer_ni->ksnp_passive_ips[j-1] =
2006 peer_ni->ksnp_passive_ips[j];
2007 peer_ni->ksnp_n_passive_ips--;
2011 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2012 route = list_entry(tmp, struct ksock_route, ksnr_list);
2014 if (route->ksnr_myiface != index)
2017 if (route->ksnr_share_count != 0) {
2018 /* Manually created; keep, but unbind */
2019 route->ksnr_myiface = -1;
2021 ksocknal_del_route_locked(route);
2025 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2026 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2028 if (conn->ksnc_myipaddr == ipaddr)
2029 ksocknal_close_conn_locked (conn, 0);
2034 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2036 struct ksock_net *net = ni->ni_data;
2038 struct hlist_node *nxt;
2039 struct ksock_peer_ni *peer_ni;
2045 index = ksocknal_ip2index(ipaddress, ni);
2047 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2049 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2050 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2052 if (!(ipaddress == 0 ||
2053 ipaddress == this_ip))
2058 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2059 net->ksnn_interfaces[j-1] =
2060 net->ksnn_interfaces[j];
2062 net->ksnn_ninterfaces--;
2064 hash_for_each_safe(ksocknal_data.ksnd_peers, j,
2065 nxt, peer_ni, ksnp_list) {
2066 if (peer_ni->ksnp_ni != ni)
2069 ksocknal_peer_del_interface_locked(peer_ni,
2074 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2080 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2082 struct lnet_process_id id = {0};
2083 struct libcfs_ioctl_data *data = arg;
2087 case IOC_LIBCFS_GET_INTERFACE: {
2088 struct ksock_net *net = ni->ni_data;
2089 struct ksock_interface *iface;
2091 read_lock(&ksocknal_data.ksnd_global_lock);
2093 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2097 iface = &net->ksnn_interfaces[data->ioc_count];
2099 data->ioc_u32[0] = iface->ksni_ipaddr;
2100 data->ioc_u32[1] = iface->ksni_netmask;
2101 data->ioc_u32[2] = iface->ksni_npeers;
2102 data->ioc_u32[3] = iface->ksni_nroutes;
2105 read_unlock(&ksocknal_data.ksnd_global_lock);
2109 case IOC_LIBCFS_ADD_INTERFACE:
2110 return ksocknal_add_interface(ni,
2111 data->ioc_u32[0], /* IP address */
2112 data->ioc_u32[1]); /* net mask */
2114 case IOC_LIBCFS_DEL_INTERFACE:
2115 return ksocknal_del_interface(ni,
2116 data->ioc_u32[0]); /* IP address */
2118 case IOC_LIBCFS_GET_PEER: {
2123 int share_count = 0;
2125 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2126 &id, &myip, &ip, &port,
2127 &conn_count, &share_count);
2131 data->ioc_nid = id.nid;
2132 data->ioc_count = share_count;
2133 data->ioc_u32[0] = ip;
2134 data->ioc_u32[1] = port;
2135 data->ioc_u32[2] = myip;
2136 data->ioc_u32[3] = conn_count;
2137 data->ioc_u32[4] = id.pid;
2141 case IOC_LIBCFS_ADD_PEER:
2142 id.nid = data->ioc_nid;
2143 id.pid = LNET_PID_LUSTRE;
2144 return ksocknal_add_peer (ni, id,
2145 data->ioc_u32[0], /* IP */
2146 data->ioc_u32[1]); /* port */
2148 case IOC_LIBCFS_DEL_PEER:
2149 id.nid = data->ioc_nid;
2150 id.pid = LNET_PID_ANY;
2151 return ksocknal_del_peer (ni, id,
2152 data->ioc_u32[0]); /* IP */
2154 case IOC_LIBCFS_GET_CONN: {
2158 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2163 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2165 data->ioc_count = txmem;
2166 data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
2167 data->ioc_flags = nagle;
2168 data->ioc_u32[0] = conn->ksnc_ipaddr;
2169 data->ioc_u32[1] = conn->ksnc_port;
2170 data->ioc_u32[2] = conn->ksnc_myipaddr;
2171 data->ioc_u32[3] = conn->ksnc_type;
2172 data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
2173 data->ioc_u32[5] = rxmem;
2174 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2175 ksocknal_conn_decref(conn);
2179 case IOC_LIBCFS_CLOSE_CONNECTION:
2180 id.nid = data->ioc_nid;
2181 id.pid = LNET_PID_ANY;
2182 return ksocknal_close_matching_conns (id,
2185 case IOC_LIBCFS_REGISTER_MYNID:
2186 /* Ignore if this is a noop */
2187 if (data->ioc_nid == ni->ni_nid)
2190 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2191 libcfs_nid2str(data->ioc_nid),
2192 libcfs_nid2str(ni->ni_nid));
2195 case IOC_LIBCFS_PUSH_CONNECTION:
2196 id.nid = data->ioc_nid;
2197 id.pid = LNET_PID_ANY;
2198 return ksocknal_push(ni, id);
2207 ksocknal_free_buffers (void)
2209 LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2211 if (ksocknal_data.ksnd_schedulers != NULL)
2212 cfs_percpt_free(ksocknal_data.ksnd_schedulers);
2214 spin_lock(&ksocknal_data.ksnd_tx_lock);
2216 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2218 struct ksock_tx *tx;
2220 list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist);
2221 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2223 while (!list_empty(&zlist)) {
2224 tx = list_entry(zlist.next, struct ksock_tx, tx_list);
2225 list_del(&tx->tx_list);
2226 LIBCFS_FREE(tx, tx->tx_desc_size);
2229 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2234 ksocknal_base_shutdown(void)
2236 struct ksock_sched *sched;
2237 struct ksock_peer_ni *peer_ni;
2240 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2241 atomic_read (&libcfs_kmemory));
2242 LASSERT (ksocknal_data.ksnd_nnets == 0);
2244 switch (ksocknal_data.ksnd_init) {
2249 case SOCKNAL_INIT_ALL:
2250 case SOCKNAL_INIT_DATA:
2251 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list)
2254 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2255 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2256 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2257 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2258 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2260 if (ksocknal_data.ksnd_schedulers != NULL) {
2261 cfs_percpt_for_each(sched, i,
2262 ksocknal_data.ksnd_schedulers) {
2264 LASSERT(list_empty(&sched->kss_tx_conns));
2265 LASSERT(list_empty(&sched->kss_rx_conns));
2266 LASSERT(list_empty(&sched->kss_zombie_noop_txs));
2267 LASSERT(sched->kss_nconns == 0);
2271 /* flag threads to terminate; wake and wait for them to die */
2272 ksocknal_data.ksnd_shuttingdown = 1;
2273 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2274 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2276 if (ksocknal_data.ksnd_schedulers != NULL) {
2277 cfs_percpt_for_each(sched, i,
2278 ksocknal_data.ksnd_schedulers)
2279 wake_up_all(&sched->kss_waitq);
2282 wait_var_event_warning(&ksocknal_data.ksnd_nthreads,
2283 ksocknal_data.ksnd_nthreads == 0,
2284 "waiting for %d threads to terminate\n",
2285 ksocknal_data.ksnd_nthreads);
2287 ksocknal_free_buffers();
2289 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2293 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2294 atomic_read (&libcfs_kmemory));
2296 module_put(THIS_MODULE);
2300 ksocknal_base_startup(void)
2302 struct ksock_sched *sched;
2306 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2307 LASSERT(ksocknal_data.ksnd_nnets == 0);
2309 memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
2311 hash_init(ksocknal_data.ksnd_peers);
2313 rwlock_init(&ksocknal_data.ksnd_global_lock);
2314 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2316 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2317 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2318 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2319 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2320 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2322 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2323 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2324 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2325 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2327 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2328 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2330 /* NB memset above zeros whole of ksocknal_data */
2332 /* flag lists/ptrs/locks initialised */
2333 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2334 if (!try_module_get(THIS_MODULE))
2337 /* Create a scheduler block per available CPT */
2338 ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
2340 if (ksocknal_data.ksnd_schedulers == NULL)
2343 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
2347 * make sure not to allocate more threads than there are
2348 * cores/CPUs in teh CPT
2350 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2351 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2352 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2355 * max to half of CPUs, assume another half should be
2356 * reserved for upper layer modules
2358 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2361 sched->kss_nthreads_max = nthrs;
2364 spin_lock_init(&sched->kss_lock);
2365 INIT_LIST_HEAD(&sched->kss_rx_conns);
2366 INIT_LIST_HEAD(&sched->kss_tx_conns);
2367 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2368 init_waitqueue_head(&sched->kss_waitq);
2371 ksocknal_data.ksnd_connd_starting = 0;
2372 ksocknal_data.ksnd_connd_failed_stamp = 0;
2373 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2374 /* must have at least 2 connds to remain responsive to accepts while
2376 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2377 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2379 if (*ksocknal_tunables.ksnd_nconnds_max <
2380 *ksocknal_tunables.ksnd_nconnds) {
2381 ksocknal_tunables.ksnd_nconnds_max =
2382 ksocknal_tunables.ksnd_nconnds;
2385 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2387 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2388 ksocknal_data.ksnd_connd_starting++;
2389 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2392 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2393 rc = ksocknal_thread_start(ksocknal_connd,
2394 (void *)((uintptr_t)i), name);
2396 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2397 ksocknal_data.ksnd_connd_starting--;
2398 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2399 CERROR("Can't spawn socknal connd: %d\n", rc);
2404 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2406 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2410 /* flag everything initialised */
2411 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2416 ksocknal_base_shutdown();
2421 ksocknal_debug_peerhash(struct lnet_ni *ni)
2423 struct ksock_peer_ni *peer_ni;
2426 read_lock(&ksocknal_data.ksnd_global_lock);
2428 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
2429 struct ksock_route *route;
2430 struct ksock_conn *conn;
2432 if (peer_ni->ksnp_ni != ni)
2435 CWARN("Active peer_ni on shutdown: %s, ref %d, "
2436 "closing %d, accepting %d, err %d, zcookie %llu, "
2437 "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2438 atomic_read(&peer_ni->ksnp_refcount),
2439 peer_ni->ksnp_closing,
2440 peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2441 peer_ni->ksnp_zc_next_cookie,
2442 !list_empty(&peer_ni->ksnp_tx_queue),
2443 !list_empty(&peer_ni->ksnp_zc_req_list));
2445 list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
2446 CWARN("Route: ref %d, schd %d, conn %d, cnted %d, "
2447 "del %d\n", atomic_read(&route->ksnr_refcount),
2448 route->ksnr_scheduled, route->ksnr_connecting,
2449 route->ksnr_connected, route->ksnr_deleted);
2452 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
2453 CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
2454 atomic_read(&conn->ksnc_conn_refcount),
2455 atomic_read(&conn->ksnc_sock_refcount),
2456 conn->ksnc_type, conn->ksnc_closing);
2461 read_unlock(&ksocknal_data.ksnd_global_lock);
2466 ksocknal_shutdown(struct lnet_ni *ni)
2468 struct ksock_net *net = ni->ni_data;
2469 struct lnet_process_id anyid = {
2470 .nid = LNET_NID_ANY,
2471 .pid = LNET_PID_ANY,
2475 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2476 LASSERT(ksocknal_data.ksnd_nnets > 0);
2478 /* prevent new peers */
2479 atomic_add(SOCKNAL_SHUTDOWN_BIAS, &net->ksnn_npeers);
2481 /* Delete all peers */
2482 ksocknal_del_peer(ni, anyid, 0);
2484 /* Wait for all peer_ni state to clean up */
2485 wait_var_event_warning(&net->ksnn_npeers,
2486 atomic_read(&net->ksnn_npeers) ==
2487 SOCKNAL_SHUTDOWN_BIAS,
2488 "waiting for %d peers to disconnect\n",
2489 ksocknal_debug_peerhash(ni) +
2490 atomic_read(&net->ksnn_npeers) -
2491 SOCKNAL_SHUTDOWN_BIAS);
2493 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2494 LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
2495 LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
2498 list_del(&net->ksnn_list);
2499 LIBCFS_FREE(net, sizeof(*net));
2501 ksocknal_data.ksnd_nnets--;
2502 if (ksocknal_data.ksnd_nnets == 0)
2503 ksocknal_base_shutdown();
2507 ksocknal_search_new_ipif(struct ksock_net *net)
2512 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2513 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2514 char *colon = strchr(ifnam, ':');
2516 struct ksock_net *tmp;
2519 if (colon != NULL) /* ignore alias device */
2522 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2524 for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2525 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2527 char *colon2 = strchr(ifnam2, ':');
2532 found = strcmp(ifnam, ifnam2) == 0;
2549 ksocknal_start_schedulers(struct ksock_sched *sched)
2555 if (sched->kss_nthreads == 0) {
2556 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2557 nthrs = sched->kss_nthreads_max;
2559 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2561 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2562 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2564 nthrs = min(nthrs, sched->kss_nthreads_max);
2566 LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
2567 /* increase two threads if there is new interface */
2568 nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
2571 for (i = 0; i < nthrs; i++) {
2575 id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
2576 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2577 sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
2579 rc = ksocknal_thread_start(ksocknal_scheduler,
2584 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2585 sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
2589 sched->kss_nthreads += i;
2594 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2596 int newif = ksocknal_search_new_ipif(net);
2600 if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2603 for (i = 0; i < ncpts; i++) {
2604 struct ksock_sched *sched;
2605 int cpt = (cpts == NULL) ? i : cpts[i];
2607 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2608 sched = ksocknal_data.ksnd_schedulers[cpt];
2610 if (!newif && sched->kss_nthreads > 0)
2613 rc = ksocknal_start_schedulers(sched);
2621 ksocknal_startup(struct lnet_ni *ni)
2623 struct ksock_net *net;
2624 struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
2625 struct ksock_interface *ksi = NULL;
2626 struct lnet_inetdev *ifaces = NULL;
2630 LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2632 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2633 rc = ksocknal_base_startup();
2638 LIBCFS_ALLOC(net, sizeof(*net));
2642 net->ksnn_incarnation = ktime_get_real_ns();
2644 net_tunables = &ni->ni_net->net_tunables;
2646 if (net_tunables->lct_peer_timeout == -1)
2647 net_tunables->lct_peer_timeout =
2648 *ksocknal_tunables.ksnd_peertimeout;
2650 if (net_tunables->lct_max_tx_credits == -1)
2651 net_tunables->lct_max_tx_credits =
2652 *ksocknal_tunables.ksnd_credits;
2654 if (net_tunables->lct_peer_tx_credits == -1)
2655 net_tunables->lct_peer_tx_credits =
2656 *ksocknal_tunables.ksnd_peertxcredits;
2658 if (net_tunables->lct_peer_tx_credits >
2659 net_tunables->lct_max_tx_credits)
2660 net_tunables->lct_peer_tx_credits =
2661 net_tunables->lct_max_tx_credits;
2663 if (net_tunables->lct_peer_rtr_credits == -1)
2664 net_tunables->lct_peer_rtr_credits =
2665 *ksocknal_tunables.ksnd_peerrtrcredits;
2667 rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns);
2671 if (!ni->ni_interfaces[0]) {
2672 ksi = &net->ksnn_interfaces[0];
2674 /* Use the first discovered interface */
2675 net->ksnn_ninterfaces = 1;
2676 ni->ni_dev_cpt = ifaces[0].li_cpt;
2677 ksi->ksni_ipaddr = ifaces[0].li_ipaddr;
2678 ksi->ksni_index = ksocknal_ip2index(ksi->ksni_ipaddr, ni);
2679 ksi->ksni_netmask = ifaces[0].li_netmask;
2680 strlcpy(ksi->ksni_name, ifaces[0].li_name,
2681 sizeof(ksi->ksni_name));
2683 /* Before Multi-Rail ksocklnd would manage
2684 * multiple interfaces with its own tcp bonding.
2685 * If we encounter an old configuration using
2686 * this tcp bonding approach then we need to
2687 * handle more than one ni_interfaces.
2689 * In Multi-Rail configuration only ONE ni_interface
2690 * should exist. Each IP alias should be mapped to
2691 * each 'struct net_ni'.
2693 for (i = 0; i < LNET_INTERFACES_NUM; i++) {
2696 if (!ni->ni_interfaces[i])
2699 for (j = 0; j < LNET_INTERFACES_NUM; j++) {
2700 if (i != j && ni->ni_interfaces[j] &&
2701 strcmp(ni->ni_interfaces[i],
2702 ni->ni_interfaces[j]) == 0) {
2704 CERROR("ksocklnd: found duplicate %s at %d and %d, rc = %d\n",
2705 ni->ni_interfaces[i], i, j, rc);
2710 for (j = 0; j < rc; j++) {
2711 if (strcmp(ifaces[j].li_name,
2712 ni->ni_interfaces[i]) != 0)
2715 ksi = &net->ksnn_interfaces[j];
2716 ni->ni_dev_cpt = ifaces[j].li_cpt;
2717 ksi->ksni_ipaddr = ifaces[j].li_ipaddr;
2719 ksocknal_ip2index(ksi->ksni_ipaddr, ni);
2720 ksi->ksni_netmask = ifaces[j].li_netmask;
2721 strlcpy(ksi->ksni_name, ifaces[j].li_name,
2722 sizeof(ksi->ksni_name));
2723 net->ksnn_ninterfaces++;
2727 /* ni_interfaces don't map to all network interfaces */
2728 if (!ksi || net->ksnn_ninterfaces != i) {
2729 CERROR("ksocklnd: requested %d but only %d interfaces found\n",
2730 i, net->ksnn_ninterfaces);
2735 /* call it before add it to ksocknal_data.ksnd_nets */
2736 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2741 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ksi->ksni_ipaddr);
2742 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2744 ksocknal_data.ksnd_nnets++;
2749 LIBCFS_FREE(net, sizeof(*net));
2751 if (ksocknal_data.ksnd_nnets == 0)
2752 ksocknal_base_shutdown();
2758 static void __exit ksocklnd_exit(void)
2760 lnet_unregister_lnd(&the_ksocklnd);
2763 static const struct lnet_lnd the_ksocklnd = {
2764 .lnd_type = SOCKLND,
2765 .lnd_startup = ksocknal_startup,
2766 .lnd_shutdown = ksocknal_shutdown,
2767 .lnd_ctl = ksocknal_ctl,
2768 .lnd_send = ksocknal_send,
2769 .lnd_recv = ksocknal_recv,
2770 .lnd_notify_peer_down = ksocknal_notify_gw_down,
2771 .lnd_accept = ksocknal_accept,
2774 static int __init ksocklnd_init(void)
2778 /* check ksnr_connected/connecting field large enough */
2779 BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
2780 BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
2782 rc = ksocknal_tunables_init();
2786 lnet_register_lnd(&the_ksocklnd);
2791 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2792 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2793 MODULE_VERSION("2.8.0");
2794 MODULE_LICENSE("GPL");
2796 module_init(ksocklnd_init);
2797 module_exit(ksocklnd_exit);