4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/socklnd/socklnd.c
34 * Author: Zach Brown <zab@zabbo.net>
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eric@bartonsoftware.com>
41 #include <linux/inetdevice.h>
43 static struct lnet_lnd the_ksocklnd;
44 struct ksock_nal_data ksocknal_data;
46 static struct ksock_interface *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
49 struct ksock_net *net = ni->ni_data;
51 struct ksock_interface *iface;
53 for (i = 0; i < net->ksnn_ninterfaces; i++) {
54 LASSERT(i < LNET_INTERFACES_NUM);
55 iface = &net->ksnn_interfaces[i];
57 if (iface->ksni_ipaddr == ip)
64 static struct ksock_route *
65 ksocknal_create_route(__u32 ipaddr, int port)
67 struct ksock_route *route;
69 LIBCFS_ALLOC (route, sizeof (*route));
73 atomic_set (&route->ksnr_refcount, 1);
74 route->ksnr_peer = NULL;
75 route->ksnr_retry_interval = 0; /* OK to connect at any time */
76 route->ksnr_ipaddr = ipaddr;
77 route->ksnr_port = port;
78 route->ksnr_scheduled = 0;
79 route->ksnr_connecting = 0;
80 route->ksnr_connected = 0;
81 route->ksnr_deleted = 0;
82 route->ksnr_conn_count = 0;
83 route->ksnr_share_count = 0;
89 ksocknal_destroy_route(struct ksock_route *route)
91 LASSERT (atomic_read(&route->ksnr_refcount) == 0);
93 if (route->ksnr_peer != NULL)
94 ksocknal_peer_decref(route->ksnr_peer);
96 LIBCFS_FREE (route, sizeof (*route));
100 ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
101 struct lnet_process_id id)
103 int cpt = lnet_cpt_of_nid(id.nid, ni);
104 struct ksock_net *net = ni->ni_data;
105 struct ksock_peer_ni *peer_ni;
107 LASSERT(id.nid != LNET_NID_ANY);
108 LASSERT(id.pid != LNET_PID_ANY);
109 LASSERT(!in_interrupt());
111 LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
115 peer_ni->ksnp_ni = ni;
116 peer_ni->ksnp_id = id;
117 atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
118 peer_ni->ksnp_closing = 0;
119 peer_ni->ksnp_accepting = 0;
120 peer_ni->ksnp_proto = NULL;
121 peer_ni->ksnp_last_alive = 0;
122 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
124 INIT_LIST_HEAD(&peer_ni->ksnp_conns);
125 INIT_LIST_HEAD(&peer_ni->ksnp_routes);
126 INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
127 INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
128 spin_lock_init(&peer_ni->ksnp_lock);
130 spin_lock_bh(&net->ksnn_lock);
132 if (net->ksnn_shutdown) {
133 spin_unlock_bh(&net->ksnn_lock);
135 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
136 CERROR("Can't create peer_ni: network shutdown\n");
142 spin_unlock_bh(&net->ksnn_lock);
149 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
151 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
153 CDEBUG (D_NET, "peer_ni %s %p deleted\n",
154 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
156 LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
157 LASSERT(peer_ni->ksnp_accepting == 0);
158 LASSERT(list_empty(&peer_ni->ksnp_conns));
159 LASSERT(list_empty(&peer_ni->ksnp_routes));
160 LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
161 LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
163 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
165 /* NB a peer_ni's connections and routes keep a reference on their peer_ni
166 * until they are destroyed, so we can be assured that _all_ state to
167 * do with this peer_ni has been cleaned up when its refcount drops to
169 spin_lock_bh(&net->ksnn_lock);
171 spin_unlock_bh(&net->ksnn_lock);
174 struct ksock_peer_ni *
175 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
177 struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178 struct list_head *tmp;
179 struct ksock_peer_ni *peer_ni;
181 list_for_each(tmp, peer_list) {
182 peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
184 LASSERT(!peer_ni->ksnp_closing);
186 if (peer_ni->ksnp_ni != ni)
189 if (peer_ni->ksnp_id.nid != id.nid ||
190 peer_ni->ksnp_id.pid != id.pid)
193 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
194 peer_ni, libcfs_id2str(id),
195 atomic_read(&peer_ni->ksnp_refcount));
201 struct ksock_peer_ni *
202 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
204 struct ksock_peer_ni *peer_ni;
206 read_lock(&ksocknal_data.ksnd_global_lock);
207 peer_ni = ksocknal_find_peer_locked(ni, id);
208 if (peer_ni != NULL) /* +1 ref for caller? */
209 ksocknal_peer_addref(peer_ni);
210 read_unlock(&ksocknal_data.ksnd_global_lock);
216 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
220 struct ksock_interface *iface;
222 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
223 LASSERT(i < LNET_INTERFACES_NUM);
224 ip = peer_ni->ksnp_passive_ips[i];
226 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
228 * All IPs in peer_ni->ksnp_passive_ips[] come from the
229 * interface list, therefore the call must succeed.
231 LASSERT(iface != NULL);
233 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
234 peer_ni, iface, iface->ksni_nroutes);
235 iface->ksni_npeers--;
238 LASSERT(list_empty(&peer_ni->ksnp_conns));
239 LASSERT(list_empty(&peer_ni->ksnp_routes));
240 LASSERT(!peer_ni->ksnp_closing);
241 peer_ni->ksnp_closing = 1;
242 list_del(&peer_ni->ksnp_list);
243 /* lose peerlist's ref */
244 ksocknal_peer_decref(peer_ni);
248 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
249 struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
250 int *port, int *conn_count, int *share_count)
252 struct ksock_peer_ni *peer_ni;
253 struct list_head *ptmp;
254 struct ksock_route *route;
255 struct list_head *rtmp;
260 read_lock(&ksocknal_data.ksnd_global_lock);
262 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
263 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
264 peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
266 if (peer_ni->ksnp_ni != ni)
269 if (peer_ni->ksnp_n_passive_ips == 0 &&
270 list_empty(&peer_ni->ksnp_routes)) {
274 *id = peer_ni->ksnp_id;
284 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
288 *id = peer_ni->ksnp_id;
289 *myip = peer_ni->ksnp_passive_ips[j];
298 list_for_each(rtmp, &peer_ni->ksnp_routes) {
302 route = list_entry(rtmp, struct ksock_route,
305 *id = peer_ni->ksnp_id;
306 *myip = route->ksnr_myipaddr;
307 *peer_ip = route->ksnr_ipaddr;
308 *port = route->ksnr_port;
309 *conn_count = route->ksnr_conn_count;
310 *share_count = route->ksnr_share_count;
317 read_unlock(&ksocknal_data.ksnd_global_lock);
322 ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
324 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
325 int type = conn->ksnc_type;
326 struct ksock_interface *iface;
328 conn->ksnc_route = route;
329 ksocknal_route_addref(route);
331 if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
332 if (route->ksnr_myipaddr == 0) {
333 /* route wasn't bound locally yet (the initial route) */
334 CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
335 libcfs_id2str(peer_ni->ksnp_id),
337 &conn->ksnc_myipaddr);
339 CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
340 "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
342 &route->ksnr_myipaddr,
343 &conn->ksnc_myipaddr);
345 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
346 route->ksnr_myipaddr);
348 iface->ksni_nroutes--;
350 route->ksnr_myipaddr = conn->ksnc_myipaddr;
351 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
352 route->ksnr_myipaddr);
354 iface->ksni_nroutes++;
357 route->ksnr_connected |= (1<<type);
358 route->ksnr_conn_count++;
360 /* Successful connection => further attempts can
361 * proceed immediately */
362 route->ksnr_retry_interval = 0;
366 ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
368 struct list_head *tmp;
369 struct ksock_conn *conn;
370 struct ksock_route *route2;
372 LASSERT(!peer_ni->ksnp_closing);
373 LASSERT(route->ksnr_peer == NULL);
374 LASSERT(!route->ksnr_scheduled);
375 LASSERT(!route->ksnr_connecting);
376 LASSERT(route->ksnr_connected == 0);
378 /* LASSERT(unique) */
379 list_for_each(tmp, &peer_ni->ksnp_routes) {
380 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
382 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
383 CERROR("Duplicate route %s %pI4h\n",
384 libcfs_id2str(peer_ni->ksnp_id),
385 &route->ksnr_ipaddr);
390 route->ksnr_peer = peer_ni;
391 ksocknal_peer_addref(peer_ni);
392 /* peer_ni's routelist takes over my ref on 'route' */
393 list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
395 list_for_each(tmp, &peer_ni->ksnp_conns) {
396 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
398 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
401 ksocknal_associate_route_conn_locked(route, conn);
402 /* keep going (typed routes) */
407 ksocknal_del_route_locked(struct ksock_route *route)
409 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
410 struct ksock_interface *iface;
411 struct ksock_conn *conn;
412 struct list_head *ctmp;
413 struct list_head *cnxt;
415 LASSERT(!route->ksnr_deleted);
417 /* Close associated conns */
418 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
419 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
421 if (conn->ksnc_route != route)
424 ksocknal_close_conn_locked(conn, 0);
427 if (route->ksnr_myipaddr != 0) {
428 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
429 route->ksnr_myipaddr);
431 iface->ksni_nroutes--;
434 route->ksnr_deleted = 1;
435 list_del(&route->ksnr_list);
436 ksocknal_route_decref(route); /* drop peer_ni's ref */
438 if (list_empty(&peer_ni->ksnp_routes) &&
439 list_empty(&peer_ni->ksnp_conns)) {
440 /* I've just removed the last route to a peer_ni with no active
442 ksocknal_unlink_peer_locked(peer_ni);
447 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
450 struct list_head *tmp;
451 struct ksock_peer_ni *peer_ni;
452 struct ksock_peer_ni *peer2;
453 struct ksock_route *route;
454 struct ksock_route *route2;
457 if (id.nid == LNET_NID_ANY ||
458 id.pid == LNET_PID_ANY)
461 /* Have a brand new peer_ni ready... */
462 rc = ksocknal_create_peer(&peer_ni, ni, id);
466 route = ksocknal_create_route (ipaddr, port);
468 ksocknal_peer_decref(peer_ni);
472 write_lock_bh(&ksocknal_data.ksnd_global_lock);
474 /* always called with a ref on ni, so shutdown can't have started */
475 LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
477 peer2 = ksocknal_find_peer_locked(ni, id);
479 ksocknal_peer_decref(peer_ni);
482 /* peer_ni table takes my ref on peer_ni */
483 list_add_tail(&peer_ni->ksnp_list,
484 ksocknal_nid2peerlist(id.nid));
488 list_for_each(tmp, &peer_ni->ksnp_routes) {
489 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
491 if (route2->ksnr_ipaddr == ipaddr)
496 if (route2 == NULL) {
497 ksocknal_add_route_locked(peer_ni, route);
498 route->ksnr_share_count++;
500 ksocknal_route_decref(route);
501 route2->ksnr_share_count++;
504 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
510 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
512 struct ksock_conn *conn;
513 struct ksock_route *route;
514 struct list_head *tmp;
515 struct list_head *nxt;
518 LASSERT(!peer_ni->ksnp_closing);
520 /* Extra ref prevents peer_ni disappearing until I'm done with it */
521 ksocknal_peer_addref(peer_ni);
523 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
524 route = list_entry(tmp, struct ksock_route, ksnr_list);
527 if (!(ip == 0 || route->ksnr_ipaddr == ip))
530 route->ksnr_share_count = 0;
531 /* This deletes associated conns too */
532 ksocknal_del_route_locked(route);
536 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
537 route = list_entry(tmp, struct ksock_route, ksnr_list);
538 nshared += route->ksnr_share_count;
542 /* remove everything else if there are no explicit entries
545 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
546 route = list_entry(tmp, struct ksock_route, ksnr_list);
548 /* we should only be removing auto-entries */
549 LASSERT(route->ksnr_share_count == 0);
550 ksocknal_del_route_locked(route);
553 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
554 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
556 ksocknal_close_conn_locked(conn, 0);
560 ksocknal_peer_decref(peer_ni);
561 /* NB peer_ni unlinks itself when last conn/route is removed */
565 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
567 struct list_head zombies = LIST_HEAD_INIT(zombies);
568 struct list_head *ptmp;
569 struct list_head *pnxt;
570 struct ksock_peer_ni *peer_ni;
576 write_lock_bh(&ksocknal_data.ksnd_global_lock);
578 if (id.nid != LNET_NID_ANY) {
579 hi = (int)(ksocknal_nid2peerlist(id.nid) -
580 ksocknal_data.ksnd_peers);
584 hi = ksocknal_data.ksnd_peer_hash_size - 1;
587 for (i = lo; i <= hi; i++) {
588 list_for_each_safe(ptmp, pnxt,
589 &ksocknal_data.ksnd_peers[i]) {
590 peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
592 if (peer_ni->ksnp_ni != ni)
595 if (!((id.nid == LNET_NID_ANY ||
596 peer_ni->ksnp_id.nid == id.nid) &&
597 (id.pid == LNET_PID_ANY ||
598 peer_ni->ksnp_id.pid == id.pid)))
601 ksocknal_peer_addref(peer_ni); /* a ref for me... */
603 ksocknal_del_peer_locked(peer_ni, ip);
605 if (peer_ni->ksnp_closing &&
606 !list_empty(&peer_ni->ksnp_tx_queue)) {
607 LASSERT(list_empty(&peer_ni->ksnp_conns));
608 LASSERT(list_empty(&peer_ni->ksnp_routes));
610 list_splice_init(&peer_ni->ksnp_tx_queue,
614 ksocknal_peer_decref(peer_ni); /* ...till here */
616 rc = 0; /* matched! */
620 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
622 ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
627 static struct ksock_conn *
628 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
630 struct ksock_peer_ni *peer_ni;
631 struct list_head *ptmp;
632 struct ksock_conn *conn;
633 struct list_head *ctmp;
636 read_lock(&ksocknal_data.ksnd_global_lock);
638 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
639 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
640 peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
642 LASSERT(!peer_ni->ksnp_closing);
644 if (peer_ni->ksnp_ni != ni)
647 list_for_each(ctmp, &peer_ni->ksnp_conns) {
651 conn = list_entry(ctmp, struct ksock_conn,
653 ksocknal_conn_addref(conn);
654 read_unlock(&ksocknal_data. \
661 read_unlock(&ksocknal_data.ksnd_global_lock);
665 static struct ksock_sched *
666 ksocknal_choose_scheduler_locked(unsigned int cpt)
668 struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
671 if (sched->kss_nthreads == 0) {
672 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
673 if (sched->kss_nthreads > 0) {
674 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
675 cpt, sched->kss_cpt);
686 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
688 struct ksock_net *net = ni->ni_data;
692 read_lock(&ksocknal_data.ksnd_global_lock);
694 nip = net->ksnn_ninterfaces;
695 LASSERT(nip <= LNET_INTERFACES_NUM);
698 * Only offer interfaces for additional connections if I have
702 read_unlock(&ksocknal_data.ksnd_global_lock);
706 for (i = 0; i < nip; i++) {
707 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
708 LASSERT(ipaddrs[i] != 0);
711 read_unlock(&ksocknal_data.ksnd_global_lock);
716 ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
718 int best_netmatch = 0;
725 for (i = 0; i < nips; i++) {
729 this_xor = (ips[i] ^ iface->ksni_ipaddr);
730 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
733 best_netmatch < this_netmatch ||
734 (best_netmatch == this_netmatch &&
735 best_xor > this_xor)))
739 best_netmatch = this_netmatch;
748 ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
750 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
751 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
752 struct ksock_interface *iface;
753 struct ksock_interface *best_iface;
764 /* CAVEAT EMPTOR: We do all our interface matching with an
765 * exclusive hold of global lock at IRQ priority. We're only
766 * expecting to be dealing with small numbers of interfaces, so the
767 * O(n**3)-ness shouldn't matter */
769 /* Also note that I'm not going to return more than n_peerips
770 * interfaces, even if I have more myself */
772 write_lock_bh(global_lock);
774 LASSERT(n_peerips <= LNET_INTERFACES_NUM);
775 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
777 /* Only match interfaces for additional connections
778 * if I have > 1 interface */
779 n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
780 MIN(n_peerips, net->ksnn_ninterfaces);
782 for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
783 /* ^ yes really... */
785 /* If we have any new interfaces, first tick off all the
786 * peer_ni IPs that match old interfaces, then choose new
787 * interfaces to match the remaining peer_ni IPS.
788 * We don't forget interfaces we've stopped using; we might
789 * start using them again... */
791 if (i < peer_ni->ksnp_n_passive_ips) {
793 ip = peer_ni->ksnp_passive_ips[i];
794 best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
796 /* peer_ni passive ips are kept up to date */
797 LASSERT(best_iface != NULL);
799 /* choose a new interface */
800 LASSERT (i == peer_ni->ksnp_n_passive_ips);
806 for (j = 0; j < net->ksnn_ninterfaces; j++) {
807 iface = &net->ksnn_interfaces[j];
808 ip = iface->ksni_ipaddr;
810 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
811 if (peer_ni->ksnp_passive_ips[k] == ip)
814 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
817 k = ksocknal_match_peerip(iface, peerips, n_peerips);
818 xor = (ip ^ peerips[k]);
819 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
821 if (!(best_iface == NULL ||
822 best_netmatch < this_netmatch ||
823 (best_netmatch == this_netmatch &&
824 best_npeers > iface->ksni_npeers)))
828 best_netmatch = this_netmatch;
829 best_npeers = iface->ksni_npeers;
832 LASSERT(best_iface != NULL);
834 best_iface->ksni_npeers++;
835 ip = best_iface->ksni_ipaddr;
836 peer_ni->ksnp_passive_ips[i] = ip;
837 peer_ni->ksnp_n_passive_ips = i+1;
840 /* mark the best matching peer_ni IP used */
841 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
845 /* Overwrite input peer_ni IP addresses */
846 memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
848 write_unlock_bh(global_lock);
854 ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
855 __u32 *peer_ipaddrs, int npeer_ipaddrs)
857 struct ksock_route *newroute = NULL;
858 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
859 struct lnet_ni *ni = peer_ni->ksnp_ni;
860 struct ksock_net *net = ni->ni_data;
861 struct list_head *rtmp;
862 struct ksock_route *route;
863 struct ksock_interface *iface;
864 struct ksock_interface *best_iface;
871 /* CAVEAT EMPTOR: We do all our interface matching with an
872 * exclusive hold of global lock at IRQ priority. We're only
873 * expecting to be dealing with small numbers of interfaces, so the
874 * O(n**3)-ness here shouldn't matter */
876 write_lock_bh(global_lock);
878 if (net->ksnn_ninterfaces < 2) {
879 /* Only create additional connections
880 * if I have > 1 interface */
881 write_unlock_bh(global_lock);
885 LASSERT(npeer_ipaddrs <= LNET_INTERFACES_NUM);
887 for (i = 0; i < npeer_ipaddrs; i++) {
888 if (newroute != NULL) {
889 newroute->ksnr_ipaddr = peer_ipaddrs[i];
891 write_unlock_bh(global_lock);
893 newroute = ksocknal_create_route(peer_ipaddrs[i], port);
894 if (newroute == NULL)
897 write_lock_bh(global_lock);
900 if (peer_ni->ksnp_closing) {
901 /* peer_ni got closed under me */
905 /* Already got a route? */
907 list_for_each(rtmp, &peer_ni->ksnp_routes) {
908 route = list_entry(rtmp, struct ksock_route, ksnr_list);
910 if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
922 LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
924 /* Select interface to connect from */
925 for (j = 0; j < net->ksnn_ninterfaces; j++) {
926 iface = &net->ksnn_interfaces[j];
928 /* Using this interface already? */
929 list_for_each(rtmp, &peer_ni->ksnp_routes) {
930 route = list_entry(rtmp, struct ksock_route,
933 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
941 this_netmatch = (((iface->ksni_ipaddr ^
942 newroute->ksnr_ipaddr) &
943 iface->ksni_netmask) == 0) ? 1 : 0;
945 if (!(best_iface == NULL ||
946 best_netmatch < this_netmatch ||
947 (best_netmatch == this_netmatch &&
948 best_nroutes > iface->ksni_nroutes)))
952 best_netmatch = this_netmatch;
953 best_nroutes = iface->ksni_nroutes;
956 if (best_iface == NULL)
959 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
960 best_iface->ksni_nroutes++;
962 ksocknal_add_route_locked(peer_ni, newroute);
966 write_unlock_bh(global_lock);
967 if (newroute != NULL)
968 ksocknal_route_decref(newroute);
972 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
974 struct ksock_connreq *cr;
979 rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
980 LASSERT(rc == 0); /* we succeeded before */
982 LIBCFS_ALLOC(cr, sizeof(*cr));
984 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
985 "%pI4h: memory exhausted\n", &peer_ip);
991 cr->ksncr_sock = sock;
993 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
995 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
996 wake_up(&ksocknal_data.ksnd_connd_waitq);
998 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1003 ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
1005 struct ksock_route *route;
1007 list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1008 if (route->ksnr_ipaddr == ipaddr)
1009 return route->ksnr_connecting;
1015 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
1016 struct socket *sock, int type)
1018 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1019 struct list_head zombies = LIST_HEAD_INIT(zombies);
1020 struct lnet_process_id peerid;
1021 struct list_head *tmp;
1023 struct ksock_conn *conn;
1024 struct ksock_conn *conn2;
1025 struct ksock_peer_ni *peer_ni = NULL;
1026 struct ksock_peer_ni *peer2;
1027 struct ksock_sched *sched;
1028 struct ksock_hello_msg *hello;
1030 struct ksock_tx *tx;
1031 struct ksock_tx *txtmp;
1037 active = (route != NULL);
1039 LASSERT (active == (type != SOCKLND_CONN_NONE));
1041 LIBCFS_ALLOC(conn, sizeof(*conn));
1047 conn->ksnc_peer = NULL;
1048 conn->ksnc_route = NULL;
1049 conn->ksnc_sock = sock;
1050 /* 2 ref, 1 for conn, another extra ref prevents socket
1051 * being closed before establishment of connection */
1052 atomic_set (&conn->ksnc_sock_refcount, 2);
1053 conn->ksnc_type = type;
1054 ksocknal_lib_save_callback(sock, conn);
1055 atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1057 conn->ksnc_rx_ready = 0;
1058 conn->ksnc_rx_scheduled = 0;
1060 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1061 conn->ksnc_tx_ready = 0;
1062 conn->ksnc_tx_scheduled = 0;
1063 conn->ksnc_tx_carrier = NULL;
1064 atomic_set (&conn->ksnc_tx_nob, 0);
1066 LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1067 kshm_ips[LNET_INTERFACES_NUM]));
1068 if (hello == NULL) {
1073 /* stash conn's local and remote addrs */
1074 rc = ksocknal_lib_get_conn_addrs (conn);
1078 /* Find out/confirm peer_ni's NID and connection type and get the
1079 * vector of interfaces she's willing to let me connect to.
1080 * Passive connections use the listener timeout since the peer_ni sends
1084 peer_ni = route->ksnr_peer;
1085 LASSERT(ni == peer_ni->ksnp_ni);
1087 /* Active connection sends HELLO eagerly */
1088 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1089 peerid = peer_ni->ksnp_id;
1091 write_lock_bh(global_lock);
1092 conn->ksnc_proto = peer_ni->ksnp_proto;
1093 write_unlock_bh(global_lock);
1095 if (conn->ksnc_proto == NULL) {
1096 conn->ksnc_proto = &ksocknal_protocol_v3x;
1097 #if SOCKNAL_VERSION_DEBUG
1098 if (*ksocknal_tunables.ksnd_protocol == 2)
1099 conn->ksnc_proto = &ksocknal_protocol_v2x;
1100 else if (*ksocknal_tunables.ksnd_protocol == 1)
1101 conn->ksnc_proto = &ksocknal_protocol_v1x;
1105 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1109 peerid.nid = LNET_NID_ANY;
1110 peerid.pid = LNET_PID_ANY;
1112 /* Passive, get protocol from peer_ni */
1113 conn->ksnc_proto = NULL;
1116 rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1120 LASSERT (rc == 0 || active);
1121 LASSERT (conn->ksnc_proto != NULL);
1122 LASSERT (peerid.nid != LNET_NID_ANY);
1124 cpt = lnet_cpt_of_nid(peerid.nid, ni);
1127 ksocknal_peer_addref(peer_ni);
1128 write_lock_bh(global_lock);
1130 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1134 write_lock_bh(global_lock);
1136 /* called with a ref on ni, so shutdown can't have started */
1137 LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
1139 peer2 = ksocknal_find_peer_locked(ni, peerid);
1140 if (peer2 == NULL) {
1141 /* NB this puts an "empty" peer_ni in the peer_ni
1142 * table (which takes my ref) */
1143 list_add_tail(&peer_ni->ksnp_list,
1144 ksocknal_nid2peerlist(peerid.nid));
1146 ksocknal_peer_decref(peer_ni);
1151 ksocknal_peer_addref(peer_ni);
1152 peer_ni->ksnp_accepting++;
1154 /* Am I already connecting to this guy? Resolve in
1155 * favour of higher NID... */
1156 if (peerid.nid < ni->ni_nid &&
1157 ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1159 warn = "connection race resolution";
1164 if (peer_ni->ksnp_closing ||
1165 (active && route->ksnr_deleted)) {
1166 /* peer_ni/route got closed under me */
1168 warn = "peer_ni/route removed";
1172 if (peer_ni->ksnp_proto == NULL) {
1173 /* Never connected before.
1174 * NB recv_hello may have returned EPROTO to signal my peer_ni
1175 * wants a different protocol than the one I asked for.
1177 LASSERT(list_empty(&peer_ni->ksnp_conns));
1179 peer_ni->ksnp_proto = conn->ksnc_proto;
1180 peer_ni->ksnp_incarnation = incarnation;
1183 if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1184 peer_ni->ksnp_incarnation != incarnation) {
1185 /* peer_ni rebooted or I've got the wrong protocol version */
1186 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1188 peer_ni->ksnp_proto = NULL;
1190 warn = peer_ni->ksnp_incarnation != incarnation ?
1191 "peer_ni rebooted" :
1192 "wrong proto version";
1202 warn = "lost conn race";
1205 warn = "retry with different protocol version";
1209 /* Refuse to duplicate an existing connection, unless this is a
1210 * loopback connection */
1211 if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1212 list_for_each(tmp, &peer_ni->ksnp_conns) {
1213 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1215 if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1216 conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1217 conn2->ksnc_type != conn->ksnc_type)
1220 /* Reply on a passive connection attempt so the peer_ni
1221 * realises we're connected. */
1231 /* If the connection created by this route didn't bind to the IP
1232 * address the route connected to, the connection/route matching
1233 * code below probably isn't going to work. */
1235 route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1236 CERROR("Route %s %pI4h connected to %pI4h\n",
1237 libcfs_id2str(peer_ni->ksnp_id),
1238 &route->ksnr_ipaddr,
1239 &conn->ksnc_ipaddr);
1242 /* Search for a route corresponding to the new connection and
1243 * create an association. This allows incoming connections created
1244 * by routes in my peer_ni to match my own route entries so I don't
1245 * continually create duplicate routes. */
1246 list_for_each(tmp, &peer_ni->ksnp_routes) {
1247 route = list_entry(tmp, struct ksock_route, ksnr_list);
1249 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1252 ksocknal_associate_route_conn_locked(route, conn);
1256 conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
1257 peer_ni->ksnp_last_alive = ktime_get_seconds();
1258 peer_ni->ksnp_send_keepalive = 0;
1259 peer_ni->ksnp_error = 0;
1261 sched = ksocknal_choose_scheduler_locked(cpt);
1263 CERROR("no schedulers available. node is unhealthy\n");
1267 * The cpt might have changed if we ended up selecting a non cpt
1268 * native scheduler. So use the scheduler's cpt instead.
1270 cpt = sched->kss_cpt;
1271 sched->kss_nconns++;
1272 conn->ksnc_scheduler = sched;
1274 conn->ksnc_tx_last_post = ktime_get_seconds();
1275 /* Set the deadline for the outgoing HELLO to drain */
1276 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1277 conn->ksnc_tx_deadline = ktime_get_seconds() +
1278 lnet_get_lnd_timeout();
1279 smp_mb(); /* order with adding to peer_ni's conn list */
1281 list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1282 ksocknal_conn_addref(conn);
1284 ksocknal_new_packet(conn, 0);
1286 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1288 /* Take packets blocking for this connection. */
1289 list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1290 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1294 list_del(&tx->tx_list);
1295 ksocknal_queue_tx_locked(tx, conn);
1298 write_unlock_bh(global_lock);
1300 /* We've now got a new connection. Any errors from here on are just
1301 * like "normal" comms errors and we close the connection normally.
1302 * NB (a) we still have to send the reply HELLO for passive
1304 * (b) normal I/O on the conn is blocked until I setup and call the
1308 CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1309 " incarnation:%lld sched[%d]\n",
1310 libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1311 &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1312 conn->ksnc_port, incarnation, cpt);
1315 /* additional routes after interface exchange? */
1316 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1317 hello->kshm_ips, hello->kshm_nips);
1319 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1321 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1324 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1325 kshm_ips[LNET_INTERFACES_NUM]));
1327 /* setup the socket AFTER I've received hello (it disables
1328 * SO_LINGER). I might call back to the acceptor who may want
1329 * to send a protocol version response and then close the
1330 * socket; this ensures the socket only tears down after the
1331 * response has been sent. */
1333 rc = ksocknal_lib_setup_sock(sock);
1335 write_lock_bh(global_lock);
1337 /* NB my callbacks block while I hold ksnd_global_lock */
1338 ksocknal_lib_set_callback(sock, conn);
1341 peer_ni->ksnp_accepting--;
1343 write_unlock_bh(global_lock);
1346 write_lock_bh(global_lock);
1347 if (!conn->ksnc_closing) {
1348 /* could be closed by another thread */
1349 ksocknal_close_conn_locked(conn, rc);
1351 write_unlock_bh(global_lock);
1352 } else if (ksocknal_connsock_addref(conn) == 0) {
1353 /* Allow I/O to proceed. */
1354 ksocknal_read_callback(conn);
1355 ksocknal_write_callback(conn);
1356 ksocknal_connsock_decref(conn);
1359 ksocknal_connsock_decref(conn);
1360 ksocknal_conn_decref(conn);
1364 if (!peer_ni->ksnp_closing &&
1365 list_empty(&peer_ni->ksnp_conns) &&
1366 list_empty(&peer_ni->ksnp_routes)) {
1367 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1368 list_del_init(&peer_ni->ksnp_tx_queue);
1369 ksocknal_unlink_peer_locked(peer_ni);
1372 write_unlock_bh(global_lock);
1376 CERROR("Not creating conn %s type %d: %s\n",
1377 libcfs_id2str(peerid), conn->ksnc_type, warn);
1379 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1380 libcfs_id2str(peerid), conn->ksnc_type, warn);
1385 /* Request retry by replying with CONN_NONE
1386 * ksnc_proto has been set already */
1387 conn->ksnc_type = SOCKLND_CONN_NONE;
1388 hello->kshm_nips = 0;
1389 ksocknal_send_hello(ni, conn, peerid.nid, hello);
1392 write_lock_bh(global_lock);
1393 peer_ni->ksnp_accepting--;
1394 write_unlock_bh(global_lock);
1398 * If we get here without an error code, just use -EALREADY.
1399 * Depending on how we got here, the error may be positive
1400 * or negative. Normalize the value for ksocknal_txlist_done().
1402 rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1403 ksocknal_txlist_done(ni, &zombies, rc2);
1404 ksocknal_peer_decref(peer_ni);
1408 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1409 kshm_ips[LNET_INTERFACES_NUM]));
1411 LIBCFS_FREE(conn, sizeof(*conn));
1419 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1421 /* This just does the immmediate housekeeping, and queues the
1422 * connection for the reaper to terminate.
1423 * Caller holds ksnd_global_lock exclusively in irq context */
1424 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1425 struct ksock_route *route;
1426 struct ksock_conn *conn2;
1427 struct list_head *tmp;
1429 LASSERT(peer_ni->ksnp_error == 0);
1430 LASSERT(!conn->ksnc_closing);
1431 conn->ksnc_closing = 1;
1433 /* ksnd_deathrow_conns takes over peer_ni's ref */
1434 list_del(&conn->ksnc_list);
1436 route = conn->ksnc_route;
1437 if (route != NULL) {
1438 /* dissociate conn from route... */
1439 LASSERT(!route->ksnr_deleted);
1440 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1443 list_for_each(tmp, &peer_ni->ksnp_conns) {
1444 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1446 if (conn2->ksnc_route == route &&
1447 conn2->ksnc_type == conn->ksnc_type)
1453 route->ksnr_connected &= ~(1 << conn->ksnc_type);
1455 conn->ksnc_route = NULL;
1457 ksocknal_route_decref(route); /* drop conn's ref on route */
1460 if (list_empty(&peer_ni->ksnp_conns)) {
1461 /* No more connections to this peer_ni */
1463 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1464 struct ksock_tx *tx;
1466 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1468 /* throw them to the last connection...,
1469 * these TXs will be send to /dev/null by scheduler */
1470 list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1472 ksocknal_tx_prep(conn, tx);
1474 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1475 list_splice_init(&peer_ni->ksnp_tx_queue,
1476 &conn->ksnc_tx_queue);
1477 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1480 /* renegotiate protocol version */
1481 peer_ni->ksnp_proto = NULL;
1482 /* stash last conn close reason */
1483 peer_ni->ksnp_error = error;
1485 if (list_empty(&peer_ni->ksnp_routes)) {
1486 /* I've just closed last conn belonging to a
1487 * peer_ni with no routes to it */
1488 ksocknal_unlink_peer_locked(peer_ni);
1492 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1494 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
1495 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1497 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1501 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1504 time64_t last_alive = 0;
1506 /* There has been a connection failure or comms error; but I'll only
1507 * tell LNET I think the peer_ni is dead if it's to another kernel and
1508 * there are no connections or connection attempts in existence. */
1510 read_lock(&ksocknal_data.ksnd_global_lock);
1512 if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1513 list_empty(&peer_ni->ksnp_conns) &&
1514 peer_ni->ksnp_accepting == 0 &&
1515 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1517 last_alive = peer_ni->ksnp_last_alive;
1520 read_unlock(&ksocknal_data.ksnd_global_lock);
1523 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid,
1524 false, false, last_alive);
1528 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1530 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1531 struct ksock_tx *tx;
1532 struct ksock_tx *tmp;
1533 struct list_head zlist = LIST_HEAD_INIT(zlist);
1535 /* NB safe to finalize TXs because closing of socket will
1536 * abort all buffered data */
1537 LASSERT(conn->ksnc_sock == NULL);
1539 spin_lock(&peer_ni->ksnp_lock);
1541 list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1542 if (tx->tx_conn != conn)
1545 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1547 tx->tx_msg.ksm_zc_cookies[0] = 0;
1548 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1549 list_del(&tx->tx_zc_list);
1550 list_add(&tx->tx_zc_list, &zlist);
1553 spin_unlock(&peer_ni->ksnp_lock);
1555 while (!list_empty(&zlist)) {
1556 tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
1558 list_del(&tx->tx_zc_list);
1559 ksocknal_tx_decref(tx);
1564 ksocknal_terminate_conn(struct ksock_conn *conn)
1566 /* This gets called by the reaper (guaranteed thread context) to
1567 * disengage the socket from its callbacks and close it.
1568 * ksnc_refcount will eventually hit zero, and then the reaper will
1570 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1571 struct ksock_sched *sched = conn->ksnc_scheduler;
1574 LASSERT(conn->ksnc_closing);
1576 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1577 spin_lock_bh(&sched->kss_lock);
1579 /* a closing conn is always ready to tx */
1580 conn->ksnc_tx_ready = 1;
1582 if (!conn->ksnc_tx_scheduled &&
1583 !list_empty(&conn->ksnc_tx_queue)) {
1584 list_add_tail(&conn->ksnc_tx_list,
1585 &sched->kss_tx_conns);
1586 conn->ksnc_tx_scheduled = 1;
1587 /* extra ref for scheduler */
1588 ksocknal_conn_addref(conn);
1590 wake_up (&sched->kss_waitq);
1593 spin_unlock_bh(&sched->kss_lock);
1595 /* serialise with callbacks */
1596 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1598 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1600 /* OK, so this conn may not be completely disengaged from its
1601 * scheduler yet, but it _has_ committed to terminate... */
1602 conn->ksnc_scheduler->kss_nconns--;
1604 if (peer_ni->ksnp_error != 0) {
1605 /* peer_ni's last conn closed in error */
1606 LASSERT(list_empty(&peer_ni->ksnp_conns));
1608 peer_ni->ksnp_error = 0; /* avoid multiple notifications */
1611 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1614 ksocknal_peer_failed(peer_ni);
1616 /* The socket is closed on the final put; either here, or in
1617 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1618 * when the connection was established, this will close the socket
1619 * immediately, aborting anything buffered in it. Any hung
1620 * zero-copy transmits will therefore complete in finite time. */
1621 ksocknal_connsock_decref(conn);
1625 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1627 /* Queue the conn for the reaper to destroy */
1628 LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1629 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1631 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1632 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1634 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1638 ksocknal_destroy_conn(struct ksock_conn *conn)
1642 /* Final coup-de-grace of the reaper */
1643 CDEBUG (D_NET, "connection %p\n", conn);
1645 LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1646 LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1647 LASSERT (conn->ksnc_sock == NULL);
1648 LASSERT (conn->ksnc_route == NULL);
1649 LASSERT (!conn->ksnc_tx_scheduled);
1650 LASSERT (!conn->ksnc_rx_scheduled);
1651 LASSERT(list_empty(&conn->ksnc_tx_queue));
1653 /* complete current receive if any */
1654 switch (conn->ksnc_rx_state) {
1655 case SOCKNAL_RX_LNET_PAYLOAD:
1656 last_rcv = conn->ksnc_rx_deadline -
1657 lnet_get_lnd_timeout();
1658 CERROR("Completing partial receive from %s[%d], "
1659 "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1660 "last alive is %lld secs ago\n",
1661 libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1662 &conn->ksnc_ipaddr, conn->ksnc_port,
1663 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1664 ktime_get_seconds() - last_rcv);
1665 if (conn->ksnc_lnet_msg)
1666 conn->ksnc_lnet_msg->msg_health_status =
1667 LNET_MSG_STATUS_REMOTE_ERROR;
1668 lnet_finalize(conn->ksnc_lnet_msg, -EIO);
1670 case SOCKNAL_RX_LNET_HEADER:
1671 if (conn->ksnc_rx_started)
1672 CERROR("Incomplete receive of lnet header from %s, "
1673 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1674 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1675 &conn->ksnc_ipaddr, conn->ksnc_port,
1676 conn->ksnc_proto->pro_version);
1678 case SOCKNAL_RX_KSM_HEADER:
1679 if (conn->ksnc_rx_started)
1680 CERROR("Incomplete receive of ksock message from %s, "
1681 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1682 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1683 &conn->ksnc_ipaddr, conn->ksnc_port,
1684 conn->ksnc_proto->pro_version);
1686 case SOCKNAL_RX_SLOP:
1687 if (conn->ksnc_rx_started)
1688 CERROR("Incomplete receive of slops from %s, "
1689 "ip %pI4h:%d, with error\n",
1690 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1691 &conn->ksnc_ipaddr, conn->ksnc_port);
1698 ksocknal_peer_decref(conn->ksnc_peer);
1700 LIBCFS_FREE (conn, sizeof (*conn));
1704 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
1706 struct ksock_conn *conn;
1707 struct list_head *ctmp;
1708 struct list_head *cnxt;
1711 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1712 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
1715 conn->ksnc_ipaddr == ipaddr) {
1717 ksocknal_close_conn_locked (conn, why);
1725 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1727 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1728 u32 ipaddr = conn->ksnc_ipaddr;
1731 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1733 count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1735 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1741 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1743 struct ksock_peer_ni *peer_ni;
1744 struct list_head *ptmp;
1745 struct list_head *pnxt;
1751 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1753 if (id.nid != LNET_NID_ANY)
1754 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1757 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1760 for (i = lo; i <= hi; i++) {
1761 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1763 peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
1765 if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1766 (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1769 count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1773 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1775 /* wildcards always succeed */
1776 if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1779 return (count == 0 ? -ENOENT : 0);
1783 ksocknal_notify_gw_down(lnet_nid_t gw_nid)
1785 /* The router is telling me she's been notified of a change in
1788 struct lnet_process_id id = {
1790 .pid = LNET_PID_ANY,
1793 CDEBUG(D_NET, "gw %s down\n", libcfs_nid2str(gw_nid));
1795 /* If the gateway crashed, close all open connections... */
1796 ksocknal_close_matching_conns(id, 0);
1799 /* We can only establish new connections
1800 * if we have autroutes, and these connect on demand. */
1804 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
1807 time64_t last_alive = 0;
1808 time64_t now = ktime_get_seconds();
1809 struct ksock_peer_ni *peer_ni = NULL;
1810 rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1811 struct lnet_process_id id = {
1813 .pid = LNET_PID_LUSTRE,
1818 peer_ni = ksocknal_find_peer_locked(ni, id);
1819 if (peer_ni != NULL) {
1820 struct list_head *tmp;
1821 struct ksock_conn *conn;
1824 list_for_each(tmp, &peer_ni->ksnp_conns) {
1825 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
1826 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1828 if (bufnob < conn->ksnc_tx_bufnob) {
1829 /* something got ACKed */
1830 conn->ksnc_tx_deadline = ktime_get_seconds() +
1831 lnet_get_lnd_timeout();
1832 peer_ni->ksnp_last_alive = now;
1833 conn->ksnc_tx_bufnob = bufnob;
1837 last_alive = peer_ni->ksnp_last_alive;
1838 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1844 if (last_alive != 0)
1847 CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
1848 libcfs_nid2str(nid), peer_ni,
1849 last_alive ? now - last_alive : -1,
1855 ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1857 write_lock_bh(glock);
1859 peer_ni = ksocknal_find_peer_locked(ni, id);
1860 if (peer_ni != NULL)
1861 ksocknal_launch_all_connections_locked(peer_ni);
1863 write_unlock_bh(glock);
1868 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1872 struct list_head *tmp;
1873 struct ksock_conn *conn;
1875 for (index = 0; ; index++) {
1876 read_lock(&ksocknal_data.ksnd_global_lock);
1881 list_for_each(tmp, &peer_ni->ksnp_conns) {
1883 conn = list_entry(tmp, struct ksock_conn,
1885 ksocknal_conn_addref(conn);
1890 read_unlock(&ksocknal_data.ksnd_global_lock);
1895 ksocknal_lib_push_conn (conn);
1896 ksocknal_conn_decref(conn);
1901 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1903 struct list_head *start;
1904 struct list_head *end;
1905 struct list_head *tmp;
1907 unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
1909 if (id.nid == LNET_NID_ANY) {
1910 start = &ksocknal_data.ksnd_peers[0];
1911 end = &ksocknal_data.ksnd_peers[hsize - 1];
1913 start = end = ksocknal_nid2peerlist(id.nid);
1916 for (tmp = start; tmp <= end; tmp++) {
1917 int peer_off; /* searching offset in peer_ni hash table */
1919 for (peer_off = 0; ; peer_off++) {
1920 struct ksock_peer_ni *peer_ni;
1923 read_lock(&ksocknal_data.ksnd_global_lock);
1924 list_for_each_entry(peer_ni, tmp, ksnp_list) {
1925 if (!((id.nid == LNET_NID_ANY ||
1926 id.nid == peer_ni->ksnp_id.nid) &&
1927 (id.pid == LNET_PID_ANY ||
1928 id.pid == peer_ni->ksnp_id.pid)))
1931 if (i++ == peer_off) {
1932 ksocknal_peer_addref(peer_ni);
1936 read_unlock(&ksocknal_data.ksnd_global_lock);
1938 if (i <= peer_off) /* no match */
1942 ksocknal_push_peer(peer_ni);
1943 ksocknal_peer_decref(peer_ni);
1950 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1952 struct ksock_net *net = ni->ni_data;
1953 struct ksock_interface *iface;
1957 struct list_head *ptmp;
1958 struct ksock_peer_ni *peer_ni;
1959 struct list_head *rtmp;
1960 struct ksock_route *route;
1962 if (ipaddress == 0 ||
1966 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1968 iface = ksocknal_ip2iface(ni, ipaddress);
1969 if (iface != NULL) {
1970 /* silently ignore dups */
1972 } else if (net->ksnn_ninterfaces == LNET_INTERFACES_NUM) {
1975 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1977 iface->ksni_ipaddr = ipaddress;
1978 iface->ksni_netmask = netmask;
1979 iface->ksni_nroutes = 0;
1980 iface->ksni_npeers = 0;
1982 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1983 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1984 peer_ni = list_entry(ptmp, struct ksock_peer_ni,
1987 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1988 if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1989 iface->ksni_npeers++;
1991 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1992 route = list_entry(rtmp,
1996 if (route->ksnr_myipaddr == ipaddress)
1997 iface->ksni_nroutes++;
2003 /* NB only new connections will pay attention to the new interface! */
2006 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2012 ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
2014 struct list_head *tmp;
2015 struct list_head *nxt;
2016 struct ksock_route *route;
2017 struct ksock_conn *conn;
2021 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2022 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2023 for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2024 peer_ni->ksnp_passive_ips[j-1] =
2025 peer_ni->ksnp_passive_ips[j];
2026 peer_ni->ksnp_n_passive_ips--;
2030 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2031 route = list_entry(tmp, struct ksock_route, ksnr_list);
2033 if (route->ksnr_myipaddr != ipaddr)
2036 if (route->ksnr_share_count != 0) {
2037 /* Manually created; keep, but unbind */
2038 route->ksnr_myipaddr = 0;
2040 ksocknal_del_route_locked(route);
2044 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2045 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2047 if (conn->ksnc_myipaddr == ipaddr)
2048 ksocknal_close_conn_locked (conn, 0);
2053 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2055 struct ksock_net *net = ni->ni_data;
2057 struct list_head *tmp;
2058 struct list_head *nxt;
2059 struct ksock_peer_ni *peer_ni;
2064 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2066 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2067 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2069 if (!(ipaddress == 0 ||
2070 ipaddress == this_ip))
2075 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2076 net->ksnn_interfaces[j-1] =
2077 net->ksnn_interfaces[j];
2079 net->ksnn_ninterfaces--;
2081 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2082 list_for_each_safe(tmp, nxt,
2083 &ksocknal_data.ksnd_peers[j]) {
2084 peer_ni = list_entry(tmp, struct ksock_peer_ni,
2087 if (peer_ni->ksnp_ni != ni)
2090 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2095 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2101 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2103 struct lnet_process_id id = {0};
2104 struct libcfs_ioctl_data *data = arg;
2108 case IOC_LIBCFS_GET_INTERFACE: {
2109 struct ksock_net *net = ni->ni_data;
2110 struct ksock_interface *iface;
2112 read_lock(&ksocknal_data.ksnd_global_lock);
2114 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2118 iface = &net->ksnn_interfaces[data->ioc_count];
2120 data->ioc_u32[0] = iface->ksni_ipaddr;
2121 data->ioc_u32[1] = iface->ksni_netmask;
2122 data->ioc_u32[2] = iface->ksni_npeers;
2123 data->ioc_u32[3] = iface->ksni_nroutes;
2126 read_unlock(&ksocknal_data.ksnd_global_lock);
2130 case IOC_LIBCFS_ADD_INTERFACE:
2131 return ksocknal_add_interface(ni,
2132 data->ioc_u32[0], /* IP address */
2133 data->ioc_u32[1]); /* net mask */
2135 case IOC_LIBCFS_DEL_INTERFACE:
2136 return ksocknal_del_interface(ni,
2137 data->ioc_u32[0]); /* IP address */
2139 case IOC_LIBCFS_GET_PEER: {
2144 int share_count = 0;
2146 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2147 &id, &myip, &ip, &port,
2148 &conn_count, &share_count);
2152 data->ioc_nid = id.nid;
2153 data->ioc_count = share_count;
2154 data->ioc_u32[0] = ip;
2155 data->ioc_u32[1] = port;
2156 data->ioc_u32[2] = myip;
2157 data->ioc_u32[3] = conn_count;
2158 data->ioc_u32[4] = id.pid;
2162 case IOC_LIBCFS_ADD_PEER:
2163 id.nid = data->ioc_nid;
2164 id.pid = LNET_PID_LUSTRE;
2165 return ksocknal_add_peer (ni, id,
2166 data->ioc_u32[0], /* IP */
2167 data->ioc_u32[1]); /* port */
2169 case IOC_LIBCFS_DEL_PEER:
2170 id.nid = data->ioc_nid;
2171 id.pid = LNET_PID_ANY;
2172 return ksocknal_del_peer (ni, id,
2173 data->ioc_u32[0]); /* IP */
2175 case IOC_LIBCFS_GET_CONN: {
2179 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2184 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2186 data->ioc_count = txmem;
2187 data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
2188 data->ioc_flags = nagle;
2189 data->ioc_u32[0] = conn->ksnc_ipaddr;
2190 data->ioc_u32[1] = conn->ksnc_port;
2191 data->ioc_u32[2] = conn->ksnc_myipaddr;
2192 data->ioc_u32[3] = conn->ksnc_type;
2193 data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
2194 data->ioc_u32[5] = rxmem;
2195 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2196 ksocknal_conn_decref(conn);
2200 case IOC_LIBCFS_CLOSE_CONNECTION:
2201 id.nid = data->ioc_nid;
2202 id.pid = LNET_PID_ANY;
2203 return ksocknal_close_matching_conns (id,
2206 case IOC_LIBCFS_REGISTER_MYNID:
2207 /* Ignore if this is a noop */
2208 if (data->ioc_nid == ni->ni_nid)
2211 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2212 libcfs_nid2str(data->ioc_nid),
2213 libcfs_nid2str(ni->ni_nid));
2216 case IOC_LIBCFS_PUSH_CONNECTION:
2217 id.nid = data->ioc_nid;
2218 id.pid = LNET_PID_ANY;
2219 return ksocknal_push(ni, id);
2228 ksocknal_free_buffers (void)
2230 LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2232 if (ksocknal_data.ksnd_schedulers != NULL)
2233 cfs_percpt_free(ksocknal_data.ksnd_schedulers);
2235 LIBCFS_FREE (ksocknal_data.ksnd_peers,
2236 sizeof(struct list_head) *
2237 ksocknal_data.ksnd_peer_hash_size);
2239 spin_lock(&ksocknal_data.ksnd_tx_lock);
2241 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2242 struct list_head zlist;
2243 struct ksock_tx *tx;
2245 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2246 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2247 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2249 while (!list_empty(&zlist)) {
2250 tx = list_entry(zlist.next, struct ksock_tx, tx_list);
2251 list_del(&tx->tx_list);
2252 LIBCFS_FREE(tx, tx->tx_desc_size);
2255 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2260 ksocknal_base_shutdown(void)
2262 struct ksock_sched *sched;
2265 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2266 atomic_read (&libcfs_kmemory));
2267 LASSERT (ksocknal_data.ksnd_nnets == 0);
2269 switch (ksocknal_data.ksnd_init) {
2273 case SOCKNAL_INIT_ALL:
2274 case SOCKNAL_INIT_DATA:
2275 LASSERT (ksocknal_data.ksnd_peers != NULL);
2276 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2277 LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2280 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2281 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2282 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2283 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2284 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2286 if (ksocknal_data.ksnd_schedulers != NULL) {
2287 cfs_percpt_for_each(sched, i,
2288 ksocknal_data.ksnd_schedulers) {
2290 LASSERT(list_empty(&sched->kss_tx_conns));
2291 LASSERT(list_empty(&sched->kss_rx_conns));
2292 LASSERT(list_empty(&sched->kss_zombie_noop_txs));
2293 LASSERT(sched->kss_nconns == 0);
2297 /* flag threads to terminate; wake and wait for them to die */
2298 ksocknal_data.ksnd_shuttingdown = 1;
2299 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2300 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2302 if (ksocknal_data.ksnd_schedulers != NULL) {
2303 cfs_percpt_for_each(sched, i,
2304 ksocknal_data.ksnd_schedulers)
2305 wake_up_all(&sched->kss_waitq);
2309 read_lock(&ksocknal_data.ksnd_global_lock);
2310 while (ksocknal_data.ksnd_nthreads != 0) {
2313 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2314 "waiting for %d threads to terminate\n",
2315 ksocknal_data.ksnd_nthreads);
2316 read_unlock(&ksocknal_data.ksnd_global_lock);
2317 set_current_state(TASK_UNINTERRUPTIBLE);
2318 schedule_timeout(cfs_time_seconds(1));
2319 read_lock(&ksocknal_data.ksnd_global_lock);
2321 read_unlock(&ksocknal_data.ksnd_global_lock);
2323 ksocknal_free_buffers();
2325 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2329 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2330 atomic_read (&libcfs_kmemory));
2332 module_put(THIS_MODULE);
2336 ksocknal_base_startup(void)
2338 struct ksock_sched *sched;
2342 LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2343 LASSERT (ksocknal_data.ksnd_nnets == 0);
2345 memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2347 ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2348 LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2349 sizeof(struct list_head) *
2350 ksocknal_data.ksnd_peer_hash_size);
2351 if (ksocknal_data.ksnd_peers == NULL)
2354 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2355 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2357 rwlock_init(&ksocknal_data.ksnd_global_lock);
2358 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2360 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2361 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2362 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2363 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2364 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2366 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2367 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2368 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2369 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2371 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2372 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2374 /* NB memset above zeros whole of ksocknal_data */
2376 /* flag lists/ptrs/locks initialised */
2377 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2378 try_module_get(THIS_MODULE);
2380 /* Create a scheduler block per available CPT */
2381 ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
2383 if (ksocknal_data.ksnd_schedulers == NULL)
2386 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
2390 * make sure not to allocate more threads than there are
2391 * cores/CPUs in teh CPT
2393 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2394 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2395 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2398 * max to half of CPUs, assume another half should be
2399 * reserved for upper layer modules
2401 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2404 sched->kss_nthreads_max = nthrs;
2407 spin_lock_init(&sched->kss_lock);
2408 INIT_LIST_HEAD(&sched->kss_rx_conns);
2409 INIT_LIST_HEAD(&sched->kss_tx_conns);
2410 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2411 init_waitqueue_head(&sched->kss_waitq);
2414 ksocknal_data.ksnd_connd_starting = 0;
2415 ksocknal_data.ksnd_connd_failed_stamp = 0;
2416 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2417 /* must have at least 2 connds to remain responsive to accepts while
2419 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2420 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2422 if (*ksocknal_tunables.ksnd_nconnds_max <
2423 *ksocknal_tunables.ksnd_nconnds) {
2424 ksocknal_tunables.ksnd_nconnds_max =
2425 ksocknal_tunables.ksnd_nconnds;
2428 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2430 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2431 ksocknal_data.ksnd_connd_starting++;
2432 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2435 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2436 rc = ksocknal_thread_start(ksocknal_connd,
2437 (void *)((uintptr_t)i), name);
2439 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2440 ksocknal_data.ksnd_connd_starting--;
2441 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2442 CERROR("Can't spawn socknal connd: %d\n", rc);
2447 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2449 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2453 /* flag everything initialised */
2454 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2459 ksocknal_base_shutdown();
2464 ksocknal_debug_peerhash(struct lnet_ni *ni)
2466 struct ksock_peer_ni *peer_ni = NULL;
2467 struct list_head *tmp;
2470 read_lock(&ksocknal_data.ksnd_global_lock);
2472 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2473 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2474 peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
2476 if (peer_ni->ksnp_ni == ni) break;
2482 if (peer_ni != NULL) {
2483 struct ksock_route *route;
2484 struct ksock_conn *conn;
2486 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2487 "closing %d, accepting %d, err %d, zcookie %llu, "
2488 "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2489 atomic_read(&peer_ni->ksnp_refcount),
2490 peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2491 peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2492 peer_ni->ksnp_zc_next_cookie,
2493 !list_empty(&peer_ni->ksnp_tx_queue),
2494 !list_empty(&peer_ni->ksnp_zc_req_list));
2496 list_for_each(tmp, &peer_ni->ksnp_routes) {
2497 route = list_entry(tmp, struct ksock_route, ksnr_list);
2498 CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2499 "del %d\n", atomic_read(&route->ksnr_refcount),
2500 route->ksnr_scheduled, route->ksnr_connecting,
2501 route->ksnr_connected, route->ksnr_deleted);
2504 list_for_each(tmp, &peer_ni->ksnp_conns) {
2505 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2506 CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2507 atomic_read(&conn->ksnc_conn_refcount),
2508 atomic_read(&conn->ksnc_sock_refcount),
2509 conn->ksnc_type, conn->ksnc_closing);
2513 read_unlock(&ksocknal_data.ksnd_global_lock);
2518 ksocknal_shutdown(struct lnet_ni *ni)
2520 struct ksock_net *net = ni->ni_data;
2521 struct lnet_process_id anyid = {
2522 .nid = LNET_NID_ANY,
2523 .pid = LNET_PID_ANY,
2527 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2528 LASSERT(ksocknal_data.ksnd_nnets > 0);
2530 spin_lock_bh(&net->ksnn_lock);
2531 net->ksnn_shutdown = 1; /* prevent new peers */
2532 spin_unlock_bh(&net->ksnn_lock);
2534 /* Delete all peers */
2535 ksocknal_del_peer(ni, anyid, 0);
2537 /* Wait for all peer_ni state to clean up */
2539 spin_lock_bh(&net->ksnn_lock);
2540 while (net->ksnn_npeers != 0) {
2541 spin_unlock_bh(&net->ksnn_lock);
2544 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2545 "waiting for %d peers to disconnect\n",
2547 set_current_state(TASK_UNINTERRUPTIBLE);
2548 schedule_timeout(cfs_time_seconds(1));
2550 ksocknal_debug_peerhash(ni);
2552 spin_lock_bh(&net->ksnn_lock);
2554 spin_unlock_bh(&net->ksnn_lock);
2556 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2557 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2558 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2561 list_del(&net->ksnn_list);
2562 LIBCFS_FREE(net, sizeof(*net));
2564 ksocknal_data.ksnd_nnets--;
2565 if (ksocknal_data.ksnd_nnets == 0)
2566 ksocknal_base_shutdown();
2570 ksocknal_search_new_ipif(struct ksock_net *net)
2575 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2576 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2577 char *colon = strchr(ifnam, ':');
2579 struct ksock_net *tmp;
2582 if (colon != NULL) /* ignore alias device */
2585 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2587 for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2588 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2590 char *colon2 = strchr(ifnam2, ':');
2595 found = strcmp(ifnam, ifnam2) == 0;
2612 ksocknal_start_schedulers(struct ksock_sched *sched)
2618 if (sched->kss_nthreads == 0) {
2619 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2620 nthrs = sched->kss_nthreads_max;
2622 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2624 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2625 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2627 nthrs = min(nthrs, sched->kss_nthreads_max);
2629 LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
2630 /* increase two threads if there is new interface */
2631 nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
2634 for (i = 0; i < nthrs; i++) {
2638 id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
2639 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2640 sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
2642 rc = ksocknal_thread_start(ksocknal_scheduler,
2647 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2648 sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
2652 sched->kss_nthreads += i;
2657 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2659 int newif = ksocknal_search_new_ipif(net);
2663 if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2666 for (i = 0; i < ncpts; i++) {
2667 struct ksock_sched *sched;
2668 int cpt = (cpts == NULL) ? i : cpts[i];
2670 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2671 sched = ksocknal_data.ksnd_schedulers[cpt];
2673 if (!newif && sched->kss_nthreads > 0)
2676 rc = ksocknal_start_schedulers(sched);
2684 ksocknal_startup(struct lnet_ni *ni)
2686 struct ksock_net *net;
2687 struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
2688 struct ksock_interface *ksi = NULL;
2689 struct lnet_inetdev *ifaces = NULL;
2693 LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2695 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2696 rc = ksocknal_base_startup();
2701 LIBCFS_ALLOC(net, sizeof(*net));
2705 spin_lock_init(&net->ksnn_lock);
2706 net->ksnn_incarnation = ktime_get_real_ns();
2708 net_tunables = &ni->ni_net->net_tunables;
2710 if (net_tunables->lct_peer_timeout == -1)
2711 net_tunables->lct_peer_timeout =
2712 *ksocknal_tunables.ksnd_peertimeout;
2714 if (net_tunables->lct_max_tx_credits == -1)
2715 net_tunables->lct_max_tx_credits =
2716 *ksocknal_tunables.ksnd_credits;
2718 if (net_tunables->lct_peer_tx_credits == -1)
2719 net_tunables->lct_peer_tx_credits =
2720 *ksocknal_tunables.ksnd_peertxcredits;
2722 if (net_tunables->lct_peer_tx_credits >
2723 net_tunables->lct_max_tx_credits)
2724 net_tunables->lct_peer_tx_credits =
2725 net_tunables->lct_max_tx_credits;
2727 if (net_tunables->lct_peer_rtr_credits == -1)
2728 net_tunables->lct_peer_rtr_credits =
2729 *ksocknal_tunables.ksnd_peerrtrcredits;
2731 rc = lnet_inet_enumerate(&ifaces);
2735 if (!ni->ni_interfaces[0]) {
2736 ksi = &net->ksnn_interfaces[0];
2738 /* Use the first discovered interface */
2739 net->ksnn_ninterfaces = 1;
2740 ni->ni_dev_cpt = ifaces[0].li_cpt;
2741 ksi->ksni_ipaddr = ifaces[0].li_ipaddr;
2742 ksi->ksni_netmask = ifaces[0].li_netmask;
2743 strlcpy(ksi->ksni_name, ifaces[0].li_name,
2744 sizeof(ksi->ksni_name));
2746 /* Before Multi-Rail ksocklnd would manage
2747 * multiple interfaces with its own tcp bonding.
2748 * If we encounter an old configuration using
2749 * this tcp bonding approach then we need to
2750 * handle more than one ni_interfaces.
2752 * In Multi-Rail configuration only ONE ni_interface
2753 * should exist. Each IP alias should be mapped to
2754 * each 'struct net_ni'.
2756 for (i = 0; i < LNET_INTERFACES_NUM; i++) {
2759 if (!ni->ni_interfaces[i])
2762 for (j = 0; j < LNET_INTERFACES_NUM; j++) {
2763 if (i != j && ni->ni_interfaces[j] &&
2764 strcmp(ni->ni_interfaces[i],
2765 ni->ni_interfaces[j]) == 0) {
2767 CERROR("ksocklnd: found duplicate %s at %d and %d, rc = %d\n",
2768 ni->ni_interfaces[i], i, j, rc);
2773 for (j = 0; j < rc; j++) {
2774 if (strcmp(ifaces[j].li_name,
2775 ni->ni_interfaces[i]) != 0)
2778 ksi = &net->ksnn_interfaces[j];
2779 ni->ni_dev_cpt = ifaces[j].li_cpt;
2780 ksi->ksni_ipaddr = ifaces[j].li_ipaddr;
2781 ksi->ksni_netmask = ifaces[j].li_netmask;
2782 strlcpy(ksi->ksni_name, ifaces[j].li_name,
2783 sizeof(ksi->ksni_name));
2784 net->ksnn_ninterfaces++;
2788 /* ni_interfaces don't map to all network interfaces */
2789 if (!ksi || net->ksnn_ninterfaces != i) {
2790 CERROR("ksocklnd: requested %d but only %d interfaces found\n",
2791 i, net->ksnn_ninterfaces);
2796 /* call it before add it to ksocknal_data.ksnd_nets */
2797 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2802 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ksi->ksni_ipaddr);
2803 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2805 ksocknal_data.ksnd_nnets++;
2810 LIBCFS_FREE(net, sizeof(*net));
2812 if (ksocknal_data.ksnd_nnets == 0)
2813 ksocknal_base_shutdown();
2819 static void __exit ksocklnd_exit(void)
2821 lnet_unregister_lnd(&the_ksocklnd);
2824 static int __init ksocklnd_init(void)
2828 /* check ksnr_connected/connecting field large enough */
2829 CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2830 CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2832 /* initialize the_ksocklnd */
2833 the_ksocklnd.lnd_type = SOCKLND;
2834 the_ksocklnd.lnd_startup = ksocknal_startup;
2835 the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2836 the_ksocklnd.lnd_ctl = ksocknal_ctl;
2837 the_ksocklnd.lnd_send = ksocknal_send;
2838 the_ksocklnd.lnd_recv = ksocknal_recv;
2839 the_ksocklnd.lnd_notify_peer_down = ksocknal_notify_gw_down;
2840 the_ksocklnd.lnd_query = ksocknal_query;
2841 the_ksocklnd.lnd_accept = ksocknal_accept;
2843 rc = ksocknal_tunables_init();
2847 lnet_register_lnd(&the_ksocklnd);
2852 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2853 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2854 MODULE_VERSION("2.8.0");
2855 MODULE_LICENSE("GPL");
2857 module_init(ksocklnd_init);
2858 module_exit(ksocklnd_exit);