4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/socklnd/socklnd.c
34 * Author: Zach Brown <zab@zabbo.net>
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eric@bartonsoftware.com>
40 #include <linux/pci.h>
43 static struct lnet_lnd the_ksocklnd;
44 ksock_nal_data_t ksocknal_data;
46 static ksock_interface_t *
47 ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
49 ksock_net_t *net = ni->ni_data;
51 ksock_interface_t *iface;
53 for (i = 0; i < net->ksnn_ninterfaces; i++) {
54 LASSERT(i < LNET_NUM_INTERFACES);
55 iface = &net->ksnn_interfaces[i];
57 if (iface->ksni_ipaddr == ip)
64 static ksock_route_t *
65 ksocknal_create_route (__u32 ipaddr, int port)
69 LIBCFS_ALLOC (route, sizeof (*route));
73 atomic_set (&route->ksnr_refcount, 1);
74 route->ksnr_peer = NULL;
75 route->ksnr_retry_interval = 0; /* OK to connect at any time */
76 route->ksnr_ipaddr = ipaddr;
77 route->ksnr_port = port;
78 route->ksnr_scheduled = 0;
79 route->ksnr_connecting = 0;
80 route->ksnr_connected = 0;
81 route->ksnr_deleted = 0;
82 route->ksnr_conn_count = 0;
83 route->ksnr_share_count = 0;
89 ksocknal_destroy_route (ksock_route_t *route)
91 LASSERT (atomic_read(&route->ksnr_refcount) == 0);
93 if (route->ksnr_peer != NULL)
94 ksocknal_peer_decref(route->ksnr_peer);
96 LIBCFS_FREE (route, sizeof (*route));
100 ksocknal_create_peer(ksock_peer_ni_t **peerp, struct lnet_ni *ni,
101 struct lnet_process_id id)
103 int cpt = lnet_cpt_of_nid(id.nid, ni);
104 ksock_net_t *net = ni->ni_data;
105 ksock_peer_ni_t *peer_ni;
107 LASSERT(id.nid != LNET_NID_ANY);
108 LASSERT(id.pid != LNET_PID_ANY);
109 LASSERT(!in_interrupt());
111 LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
115 peer_ni->ksnp_ni = ni;
116 peer_ni->ksnp_id = id;
117 atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
118 peer_ni->ksnp_closing = 0;
119 peer_ni->ksnp_accepting = 0;
120 peer_ni->ksnp_proto = NULL;
121 peer_ni->ksnp_last_alive = 0;
122 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
124 INIT_LIST_HEAD(&peer_ni->ksnp_conns);
125 INIT_LIST_HEAD(&peer_ni->ksnp_routes);
126 INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
127 INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
128 spin_lock_init(&peer_ni->ksnp_lock);
130 spin_lock_bh(&net->ksnn_lock);
132 if (net->ksnn_shutdown) {
133 spin_unlock_bh(&net->ksnn_lock);
135 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
136 CERROR("Can't create peer_ni: network shutdown\n");
142 spin_unlock_bh(&net->ksnn_lock);
149 ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni)
151 ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
153 CDEBUG (D_NET, "peer_ni %s %p deleted\n",
154 libcfs_id2str(peer_ni->ksnp_id), peer_ni);
156 LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
157 LASSERT(peer_ni->ksnp_accepting == 0);
158 LASSERT(list_empty(&peer_ni->ksnp_conns));
159 LASSERT(list_empty(&peer_ni->ksnp_routes));
160 LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
161 LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
163 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
165 /* NB a peer_ni's connections and routes keep a reference on their peer_ni
166 * until they are destroyed, so we can be assured that _all_ state to
167 * do with this peer_ni has been cleaned up when its refcount drops to
169 spin_lock_bh(&net->ksnn_lock);
171 spin_unlock_bh(&net->ksnn_lock);
175 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
177 struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178 struct list_head *tmp;
179 ksock_peer_ni_t *peer_ni;
181 list_for_each(tmp, peer_list) {
183 peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
185 LASSERT(!peer_ni->ksnp_closing);
187 if (peer_ni->ksnp_ni != ni)
190 if (peer_ni->ksnp_id.nid != id.nid ||
191 peer_ni->ksnp_id.pid != id.pid)
194 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
195 peer_ni, libcfs_id2str(id),
196 atomic_read(&peer_ni->ksnp_refcount));
203 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
205 ksock_peer_ni_t *peer_ni;
207 read_lock(&ksocknal_data.ksnd_global_lock);
208 peer_ni = ksocknal_find_peer_locked(ni, id);
209 if (peer_ni != NULL) /* +1 ref for caller? */
210 ksocknal_peer_addref(peer_ni);
211 read_unlock(&ksocknal_data.ksnd_global_lock);
217 ksocknal_unlink_peer_locked(ksock_peer_ni_t *peer_ni)
221 ksock_interface_t *iface;
223 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
224 LASSERT(i < LNET_NUM_INTERFACES);
225 ip = peer_ni->ksnp_passive_ips[i];
227 iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
229 * All IPs in peer_ni->ksnp_passive_ips[] come from the
230 * interface list, therefore the call must succeed.
232 LASSERT(iface != NULL);
234 CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
235 peer_ni, iface, iface->ksni_nroutes);
236 iface->ksni_npeers--;
239 LASSERT(list_empty(&peer_ni->ksnp_conns));
240 LASSERT(list_empty(&peer_ni->ksnp_routes));
241 LASSERT(!peer_ni->ksnp_closing);
242 peer_ni->ksnp_closing = 1;
243 list_del(&peer_ni->ksnp_list);
244 /* lose peerlist's ref */
245 ksocknal_peer_decref(peer_ni);
249 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
250 struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
251 int *port, int *conn_count, int *share_count)
253 ksock_peer_ni_t *peer_ni;
254 struct list_head *ptmp;
255 ksock_route_t *route;
256 struct list_head *rtmp;
261 read_lock(&ksocknal_data.ksnd_global_lock);
263 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
264 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
265 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
267 if (peer_ni->ksnp_ni != ni)
270 if (peer_ni->ksnp_n_passive_ips == 0 &&
271 list_empty(&peer_ni->ksnp_routes)) {
275 *id = peer_ni->ksnp_id;
285 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
289 *id = peer_ni->ksnp_id;
290 *myip = peer_ni->ksnp_passive_ips[j];
299 list_for_each(rtmp, &peer_ni->ksnp_routes) {
303 route = list_entry(rtmp, ksock_route_t,
306 *id = peer_ni->ksnp_id;
307 *myip = route->ksnr_myipaddr;
308 *peer_ip = route->ksnr_ipaddr;
309 *port = route->ksnr_port;
310 *conn_count = route->ksnr_conn_count;
311 *share_count = route->ksnr_share_count;
318 read_unlock(&ksocknal_data.ksnd_global_lock);
323 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
325 ksock_peer_ni_t *peer_ni = route->ksnr_peer;
326 int type = conn->ksnc_type;
327 ksock_interface_t *iface;
329 conn->ksnc_route = route;
330 ksocknal_route_addref(route);
332 if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
333 if (route->ksnr_myipaddr == 0) {
334 /* route wasn't bound locally yet (the initial route) */
335 CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
336 libcfs_id2str(peer_ni->ksnp_id),
338 &conn->ksnc_myipaddr);
340 CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
341 "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
343 &route->ksnr_myipaddr,
344 &conn->ksnc_myipaddr);
346 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
347 route->ksnr_myipaddr);
349 iface->ksni_nroutes--;
351 route->ksnr_myipaddr = conn->ksnc_myipaddr;
352 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
353 route->ksnr_myipaddr);
355 iface->ksni_nroutes++;
358 route->ksnr_connected |= (1<<type);
359 route->ksnr_conn_count++;
361 /* Successful connection => further attempts can
362 * proceed immediately */
363 route->ksnr_retry_interval = 0;
367 ksocknal_add_route_locked (ksock_peer_ni_t *peer_ni, ksock_route_t *route)
369 struct list_head *tmp;
371 ksock_route_t *route2;
373 LASSERT(!peer_ni->ksnp_closing);
374 LASSERT(route->ksnr_peer == NULL);
375 LASSERT(!route->ksnr_scheduled);
376 LASSERT(!route->ksnr_connecting);
377 LASSERT(route->ksnr_connected == 0);
379 /* LASSERT(unique) */
380 list_for_each(tmp, &peer_ni->ksnp_routes) {
381 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
383 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
384 CERROR("Duplicate route %s %pI4h\n",
385 libcfs_id2str(peer_ni->ksnp_id),
386 &route->ksnr_ipaddr);
391 route->ksnr_peer = peer_ni;
392 ksocknal_peer_addref(peer_ni);
393 /* peer_ni's routelist takes over my ref on 'route' */
394 list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
396 list_for_each(tmp, &peer_ni->ksnp_conns) {
397 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
399 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
402 ksocknal_associate_route_conn_locked(route, conn);
403 /* keep going (typed routes) */
408 ksocknal_del_route_locked (ksock_route_t *route)
410 ksock_peer_ni_t *peer_ni = route->ksnr_peer;
411 ksock_interface_t *iface;
413 struct list_head *ctmp;
414 struct list_head *cnxt;
416 LASSERT(!route->ksnr_deleted);
418 /* Close associated conns */
419 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
420 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
422 if (conn->ksnc_route != route)
425 ksocknal_close_conn_locked(conn, 0);
428 if (route->ksnr_myipaddr != 0) {
429 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
430 route->ksnr_myipaddr);
432 iface->ksni_nroutes--;
435 route->ksnr_deleted = 1;
436 list_del(&route->ksnr_list);
437 ksocknal_route_decref(route); /* drop peer_ni's ref */
439 if (list_empty(&peer_ni->ksnp_routes) &&
440 list_empty(&peer_ni->ksnp_conns)) {
441 /* I've just removed the last route to a peer_ni with no active
443 ksocknal_unlink_peer_locked(peer_ni);
448 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
451 struct list_head *tmp;
452 ksock_peer_ni_t *peer_ni;
453 ksock_peer_ni_t *peer2;
454 ksock_route_t *route;
455 ksock_route_t *route2;
458 if (id.nid == LNET_NID_ANY ||
459 id.pid == LNET_PID_ANY)
462 /* Have a brand new peer_ni ready... */
463 rc = ksocknal_create_peer(&peer_ni, ni, id);
467 route = ksocknal_create_route (ipaddr, port);
469 ksocknal_peer_decref(peer_ni);
473 write_lock_bh(&ksocknal_data.ksnd_global_lock);
475 /* always called with a ref on ni, so shutdown can't have started */
476 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
478 peer2 = ksocknal_find_peer_locked(ni, id);
480 ksocknal_peer_decref(peer_ni);
483 /* peer_ni table takes my ref on peer_ni */
484 list_add_tail(&peer_ni->ksnp_list,
485 ksocknal_nid2peerlist(id.nid));
489 list_for_each(tmp, &peer_ni->ksnp_routes) {
490 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
492 if (route2->ksnr_ipaddr == ipaddr)
497 if (route2 == NULL) {
498 ksocknal_add_route_locked(peer_ni, route);
499 route->ksnr_share_count++;
501 ksocknal_route_decref(route);
502 route2->ksnr_share_count++;
505 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
511 ksocknal_del_peer_locked (ksock_peer_ni_t *peer_ni, __u32 ip)
514 ksock_route_t *route;
515 struct list_head *tmp;
516 struct list_head *nxt;
519 LASSERT(!peer_ni->ksnp_closing);
521 /* Extra ref prevents peer_ni disappearing until I'm done with it */
522 ksocknal_peer_addref(peer_ni);
524 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
525 route = list_entry(tmp, ksock_route_t, ksnr_list);
528 if (!(ip == 0 || route->ksnr_ipaddr == ip))
531 route->ksnr_share_count = 0;
532 /* This deletes associated conns too */
533 ksocknal_del_route_locked(route);
537 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
538 route = list_entry(tmp, ksock_route_t, ksnr_list);
539 nshared += route->ksnr_share_count;
543 /* remove everything else if there are no explicit entries
546 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
547 route = list_entry(tmp, ksock_route_t, ksnr_list);
549 /* we should only be removing auto-entries */
550 LASSERT(route->ksnr_share_count == 0);
551 ksocknal_del_route_locked(route);
554 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
555 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
557 ksocknal_close_conn_locked(conn, 0);
561 ksocknal_peer_decref(peer_ni);
562 /* NB peer_ni unlinks itself when last conn/route is removed */
566 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
568 struct list_head zombies = LIST_HEAD_INIT(zombies);
569 struct list_head *ptmp;
570 struct list_head *pnxt;
571 ksock_peer_ni_t *peer_ni;
577 write_lock_bh(&ksocknal_data.ksnd_global_lock);
579 if (id.nid != LNET_NID_ANY) {
580 hi = (int)(ksocknal_nid2peerlist(id.nid) -
581 ksocknal_data.ksnd_peers);
585 hi = ksocknal_data.ksnd_peer_hash_size - 1;
588 for (i = lo; i <= hi; i++) {
589 list_for_each_safe(ptmp, pnxt,
590 &ksocknal_data.ksnd_peers[i]) {
591 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
593 if (peer_ni->ksnp_ni != ni)
596 if (!((id.nid == LNET_NID_ANY ||
597 peer_ni->ksnp_id.nid == id.nid) &&
598 (id.pid == LNET_PID_ANY ||
599 peer_ni->ksnp_id.pid == id.pid)))
602 ksocknal_peer_addref(peer_ni); /* a ref for me... */
604 ksocknal_del_peer_locked(peer_ni, ip);
606 if (peer_ni->ksnp_closing &&
607 !list_empty(&peer_ni->ksnp_tx_queue)) {
608 LASSERT(list_empty(&peer_ni->ksnp_conns));
609 LASSERT(list_empty(&peer_ni->ksnp_routes));
611 list_splice_init(&peer_ni->ksnp_tx_queue,
615 ksocknal_peer_decref(peer_ni); /* ...till here */
617 rc = 0; /* matched! */
621 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
623 ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
628 static ksock_conn_t *
629 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
631 ksock_peer_ni_t *peer_ni;
632 struct list_head *ptmp;
634 struct list_head *ctmp;
637 read_lock(&ksocknal_data.ksnd_global_lock);
639 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
640 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
641 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
643 LASSERT(!peer_ni->ksnp_closing);
645 if (peer_ni->ksnp_ni != ni)
648 list_for_each(ctmp, &peer_ni->ksnp_conns) {
652 conn = list_entry(ctmp, ksock_conn_t,
654 ksocknal_conn_addref(conn);
655 read_unlock(&ksocknal_data. \
662 read_unlock(&ksocknal_data.ksnd_global_lock);
666 static ksock_sched_t *
667 ksocknal_choose_scheduler_locked(unsigned int cpt)
669 struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
670 ksock_sched_t *sched;
673 LASSERT(info->ksi_nthreads > 0);
675 sched = &info->ksi_scheds[0];
677 * NB: it's safe so far, but info->ksi_nthreads could be changed
678 * at runtime when we have dynamic LNet configuration, then we
679 * need to take care of this.
681 for (i = 1; i < info->ksi_nthreads; i++) {
682 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
683 sched = &info->ksi_scheds[i];
690 ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
692 ksock_net_t *net = ni->ni_data;
696 read_lock(&ksocknal_data.ksnd_global_lock);
698 nip = net->ksnn_ninterfaces;
699 LASSERT(nip <= LNET_NUM_INTERFACES);
702 * Only offer interfaces for additional connections if I have
706 read_unlock(&ksocknal_data.ksnd_global_lock);
710 for (i = 0; i < nip; i++) {
711 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
712 LASSERT(ipaddrs[i] != 0);
715 read_unlock(&ksocknal_data.ksnd_global_lock);
720 ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
722 int best_netmatch = 0;
729 for (i = 0; i < nips; i++) {
733 this_xor = (ips[i] ^ iface->ksni_ipaddr);
734 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
737 best_netmatch < this_netmatch ||
738 (best_netmatch == this_netmatch &&
739 best_xor > this_xor)))
743 best_netmatch = this_netmatch;
752 ksocknal_select_ips(ksock_peer_ni_t *peer_ni, __u32 *peerips, int n_peerips)
754 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
755 ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
756 ksock_interface_t *iface;
757 ksock_interface_t *best_iface;
768 /* CAVEAT EMPTOR: We do all our interface matching with an
769 * exclusive hold of global lock at IRQ priority. We're only
770 * expecting to be dealing with small numbers of interfaces, so the
771 * O(n**3)-ness shouldn't matter */
773 /* Also note that I'm not going to return more than n_peerips
774 * interfaces, even if I have more myself */
776 write_lock_bh(global_lock);
778 LASSERT(n_peerips <= LNET_NUM_INTERFACES);
779 LASSERT(net->ksnn_ninterfaces <= LNET_NUM_INTERFACES);
781 /* Only match interfaces for additional connections
782 * if I have > 1 interface */
783 n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
784 MIN(n_peerips, net->ksnn_ninterfaces);
786 for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
787 /* ^ yes really... */
789 /* If we have any new interfaces, first tick off all the
790 * peer_ni IPs that match old interfaces, then choose new
791 * interfaces to match the remaining peer_ni IPS.
792 * We don't forget interfaces we've stopped using; we might
793 * start using them again... */
795 if (i < peer_ni->ksnp_n_passive_ips) {
797 ip = peer_ni->ksnp_passive_ips[i];
798 best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
800 /* peer_ni passive ips are kept up to date */
801 LASSERT(best_iface != NULL);
803 /* choose a new interface */
804 LASSERT (i == peer_ni->ksnp_n_passive_ips);
810 for (j = 0; j < net->ksnn_ninterfaces; j++) {
811 iface = &net->ksnn_interfaces[j];
812 ip = iface->ksni_ipaddr;
814 for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
815 if (peer_ni->ksnp_passive_ips[k] == ip)
818 if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
821 k = ksocknal_match_peerip(iface, peerips, n_peerips);
822 xor = (ip ^ peerips[k]);
823 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
825 if (!(best_iface == NULL ||
826 best_netmatch < this_netmatch ||
827 (best_netmatch == this_netmatch &&
828 best_npeers > iface->ksni_npeers)))
832 best_netmatch = this_netmatch;
833 best_npeers = iface->ksni_npeers;
836 LASSERT(best_iface != NULL);
838 best_iface->ksni_npeers++;
839 ip = best_iface->ksni_ipaddr;
840 peer_ni->ksnp_passive_ips[i] = ip;
841 peer_ni->ksnp_n_passive_ips = i+1;
844 /* mark the best matching peer_ni IP used */
845 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
849 /* Overwrite input peer_ni IP addresses */
850 memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
852 write_unlock_bh(global_lock);
858 ksocknal_create_routes(ksock_peer_ni_t *peer_ni, int port,
859 __u32 *peer_ipaddrs, int npeer_ipaddrs)
861 ksock_route_t *newroute = NULL;
862 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
863 struct lnet_ni *ni = peer_ni->ksnp_ni;
864 ksock_net_t *net = ni->ni_data;
865 struct list_head *rtmp;
866 ksock_route_t *route;
867 ksock_interface_t *iface;
868 ksock_interface_t *best_iface;
875 /* CAVEAT EMPTOR: We do all our interface matching with an
876 * exclusive hold of global lock at IRQ priority. We're only
877 * expecting to be dealing with small numbers of interfaces, so the
878 * O(n**3)-ness here shouldn't matter */
880 write_lock_bh(global_lock);
882 if (net->ksnn_ninterfaces < 2) {
883 /* Only create additional connections
884 * if I have > 1 interface */
885 write_unlock_bh(global_lock);
889 LASSERT(npeer_ipaddrs <= LNET_NUM_INTERFACES);
891 for (i = 0; i < npeer_ipaddrs; i++) {
892 if (newroute != NULL) {
893 newroute->ksnr_ipaddr = peer_ipaddrs[i];
895 write_unlock_bh(global_lock);
897 newroute = ksocknal_create_route(peer_ipaddrs[i], port);
898 if (newroute == NULL)
901 write_lock_bh(global_lock);
904 if (peer_ni->ksnp_closing) {
905 /* peer_ni got closed under me */
909 /* Already got a route? */
911 list_for_each(rtmp, &peer_ni->ksnp_routes) {
912 route = list_entry(rtmp, ksock_route_t, ksnr_list);
914 if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
926 LASSERT(net->ksnn_ninterfaces <= LNET_NUM_INTERFACES);
928 /* Select interface to connect from */
929 for (j = 0; j < net->ksnn_ninterfaces; j++) {
930 iface = &net->ksnn_interfaces[j];
932 /* Using this interface already? */
933 list_for_each(rtmp, &peer_ni->ksnp_routes) {
934 route = list_entry(rtmp, ksock_route_t,
937 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
945 this_netmatch = (((iface->ksni_ipaddr ^
946 newroute->ksnr_ipaddr) &
947 iface->ksni_netmask) == 0) ? 1 : 0;
949 if (!(best_iface == NULL ||
950 best_netmatch < this_netmatch ||
951 (best_netmatch == this_netmatch &&
952 best_nroutes > iface->ksni_nroutes)))
956 best_netmatch = this_netmatch;
957 best_nroutes = iface->ksni_nroutes;
960 if (best_iface == NULL)
963 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
964 best_iface->ksni_nroutes++;
966 ksocknal_add_route_locked(peer_ni, newroute);
970 write_unlock_bh(global_lock);
971 if (newroute != NULL)
972 ksocknal_route_decref(newroute);
976 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
983 rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
984 LASSERT(rc == 0); /* we succeeded before */
986 LIBCFS_ALLOC(cr, sizeof(*cr));
988 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
989 "%pI4h: memory exhausted\n", &peer_ip);
995 cr->ksncr_sock = sock;
997 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
999 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
1000 wake_up(&ksocknal_data.ksnd_connd_waitq);
1002 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1007 ksocknal_connecting (ksock_peer_ni_t *peer_ni, __u32 ipaddr)
1009 ksock_route_t *route;
1011 list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
1012 if (route->ksnr_ipaddr == ipaddr)
1013 return route->ksnr_connecting;
1019 ksocknal_create_conn(struct lnet_ni *ni, ksock_route_t *route,
1020 struct socket *sock, int type)
1022 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1023 struct list_head zombies = LIST_HEAD_INIT(zombies);
1024 struct lnet_process_id peerid;
1025 struct list_head *tmp;
1028 ksock_conn_t *conn2;
1029 ksock_peer_ni_t *peer_ni = NULL;
1030 ksock_peer_ni_t *peer2;
1031 ksock_sched_t *sched;
1032 struct ksock_hello_msg *hello;
1041 active = (route != NULL);
1043 LASSERT (active == (type != SOCKLND_CONN_NONE));
1045 LIBCFS_ALLOC(conn, sizeof(*conn));
1051 conn->ksnc_peer = NULL;
1052 conn->ksnc_route = NULL;
1053 conn->ksnc_sock = sock;
1054 /* 2 ref, 1 for conn, another extra ref prevents socket
1055 * being closed before establishment of connection */
1056 atomic_set (&conn->ksnc_sock_refcount, 2);
1057 conn->ksnc_type = type;
1058 ksocknal_lib_save_callback(sock, conn);
1059 atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1061 conn->ksnc_rx_ready = 0;
1062 conn->ksnc_rx_scheduled = 0;
1064 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1065 conn->ksnc_tx_ready = 0;
1066 conn->ksnc_tx_scheduled = 0;
1067 conn->ksnc_tx_carrier = NULL;
1068 atomic_set (&conn->ksnc_tx_nob, 0);
1070 LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
1071 kshm_ips[LNET_NUM_INTERFACES]));
1072 if (hello == NULL) {
1077 /* stash conn's local and remote addrs */
1078 rc = ksocknal_lib_get_conn_addrs (conn);
1082 /* Find out/confirm peer_ni's NID and connection type and get the
1083 * vector of interfaces she's willing to let me connect to.
1084 * Passive connections use the listener timeout since the peer_ni sends
1088 peer_ni = route->ksnr_peer;
1089 LASSERT(ni == peer_ni->ksnp_ni);
1091 /* Active connection sends HELLO eagerly */
1092 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1093 peerid = peer_ni->ksnp_id;
1095 write_lock_bh(global_lock);
1096 conn->ksnc_proto = peer_ni->ksnp_proto;
1097 write_unlock_bh(global_lock);
1099 if (conn->ksnc_proto == NULL) {
1100 conn->ksnc_proto = &ksocknal_protocol_v3x;
1101 #if SOCKNAL_VERSION_DEBUG
1102 if (*ksocknal_tunables.ksnd_protocol == 2)
1103 conn->ksnc_proto = &ksocknal_protocol_v2x;
1104 else if (*ksocknal_tunables.ksnd_protocol == 1)
1105 conn->ksnc_proto = &ksocknal_protocol_v1x;
1109 rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1113 peerid.nid = LNET_NID_ANY;
1114 peerid.pid = LNET_PID_ANY;
1116 /* Passive, get protocol from peer_ni */
1117 conn->ksnc_proto = NULL;
1120 rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1124 LASSERT (rc == 0 || active);
1125 LASSERT (conn->ksnc_proto != NULL);
1126 LASSERT (peerid.nid != LNET_NID_ANY);
1128 cpt = lnet_cpt_of_nid(peerid.nid, ni);
1131 ksocknal_peer_addref(peer_ni);
1132 write_lock_bh(global_lock);
1134 rc = ksocknal_create_peer(&peer_ni, ni, peerid);
1138 write_lock_bh(global_lock);
1140 /* called with a ref on ni, so shutdown can't have started */
1141 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1143 peer2 = ksocknal_find_peer_locked(ni, peerid);
1144 if (peer2 == NULL) {
1145 /* NB this puts an "empty" peer_ni in the peer_ni
1146 * table (which takes my ref) */
1147 list_add_tail(&peer_ni->ksnp_list,
1148 ksocknal_nid2peerlist(peerid.nid));
1150 ksocknal_peer_decref(peer_ni);
1155 ksocknal_peer_addref(peer_ni);
1156 peer_ni->ksnp_accepting++;
1158 /* Am I already connecting to this guy? Resolve in
1159 * favour of higher NID... */
1160 if (peerid.nid < ni->ni_nid &&
1161 ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
1163 warn = "connection race resolution";
1168 if (peer_ni->ksnp_closing ||
1169 (active && route->ksnr_deleted)) {
1170 /* peer_ni/route got closed under me */
1172 warn = "peer_ni/route removed";
1176 if (peer_ni->ksnp_proto == NULL) {
1177 /* Never connected before.
1178 * NB recv_hello may have returned EPROTO to signal my peer_ni
1179 * wants a different protocol than the one I asked for.
1181 LASSERT(list_empty(&peer_ni->ksnp_conns));
1183 peer_ni->ksnp_proto = conn->ksnc_proto;
1184 peer_ni->ksnp_incarnation = incarnation;
1187 if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1188 peer_ni->ksnp_incarnation != incarnation) {
1189 /* peer_ni rebooted or I've got the wrong protocol version */
1190 ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
1192 peer_ni->ksnp_proto = NULL;
1194 warn = peer_ni->ksnp_incarnation != incarnation ?
1195 "peer_ni rebooted" :
1196 "wrong proto version";
1206 warn = "lost conn race";
1209 warn = "retry with different protocol version";
1213 /* Refuse to duplicate an existing connection, unless this is a
1214 * loopback connection */
1215 if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1216 list_for_each(tmp, &peer_ni->ksnp_conns) {
1217 conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1219 if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1220 conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1221 conn2->ksnc_type != conn->ksnc_type)
1224 /* Reply on a passive connection attempt so the peer_ni
1225 * realises we're connected. */
1235 /* If the connection created by this route didn't bind to the IP
1236 * address the route connected to, the connection/route matching
1237 * code below probably isn't going to work. */
1239 route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1240 CERROR("Route %s %pI4h connected to %pI4h\n",
1241 libcfs_id2str(peer_ni->ksnp_id),
1242 &route->ksnr_ipaddr,
1243 &conn->ksnc_ipaddr);
1246 /* Search for a route corresponding to the new connection and
1247 * create an association. This allows incoming connections created
1248 * by routes in my peer_ni to match my own route entries so I don't
1249 * continually create duplicate routes. */
1250 list_for_each(tmp, &peer_ni->ksnp_routes) {
1251 route = list_entry(tmp, ksock_route_t, ksnr_list);
1253 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1256 ksocknal_associate_route_conn_locked(route, conn);
1260 conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
1261 peer_ni->ksnp_last_alive = ktime_get_real_seconds();
1262 peer_ni->ksnp_send_keepalive = 0;
1263 peer_ni->ksnp_error = 0;
1265 sched = ksocknal_choose_scheduler_locked(cpt);
1266 sched->kss_nconns++;
1267 conn->ksnc_scheduler = sched;
1269 conn->ksnc_tx_last_post = ktime_get_real_seconds();
1270 /* Set the deadline for the outgoing HELLO to drain */
1271 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1272 conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1273 smp_mb(); /* order with adding to peer_ni's conn list */
1275 list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1276 ksocknal_conn_addref(conn);
1278 ksocknal_new_packet(conn, 0);
1280 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1282 /* Take packets blocking for this connection. */
1283 list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1284 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1288 list_del(&tx->tx_list);
1289 ksocknal_queue_tx_locked(tx, conn);
1292 write_unlock_bh(global_lock);
1294 /* We've now got a new connection. Any errors from here on are just
1295 * like "normal" comms errors and we close the connection normally.
1296 * NB (a) we still have to send the reply HELLO for passive
1298 * (b) normal I/O on the conn is blocked until I setup and call the
1302 CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1303 " incarnation:%lld sched[%d:%d]\n",
1304 libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1305 &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1306 conn->ksnc_port, incarnation, cpt,
1307 (int)(sched - &sched->kss_info->ksi_scheds[0]));
1310 /* additional routes after interface exchange? */
1311 ksocknal_create_routes(peer_ni, conn->ksnc_port,
1312 hello->kshm_ips, hello->kshm_nips);
1314 hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
1316 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1319 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1320 kshm_ips[LNET_NUM_INTERFACES]));
1322 /* setup the socket AFTER I've received hello (it disables
1323 * SO_LINGER). I might call back to the acceptor who may want
1324 * to send a protocol version response and then close the
1325 * socket; this ensures the socket only tears down after the
1326 * response has been sent. */
1328 rc = ksocknal_lib_setup_sock(sock);
1330 write_lock_bh(global_lock);
1332 /* NB my callbacks block while I hold ksnd_global_lock */
1333 ksocknal_lib_set_callback(sock, conn);
1336 peer_ni->ksnp_accepting--;
1338 write_unlock_bh(global_lock);
1341 write_lock_bh(global_lock);
1342 if (!conn->ksnc_closing) {
1343 /* could be closed by another thread */
1344 ksocknal_close_conn_locked(conn, rc);
1346 write_unlock_bh(global_lock);
1347 } else if (ksocknal_connsock_addref(conn) == 0) {
1348 /* Allow I/O to proceed. */
1349 ksocknal_read_callback(conn);
1350 ksocknal_write_callback(conn);
1351 ksocknal_connsock_decref(conn);
1354 ksocknal_connsock_decref(conn);
1355 ksocknal_conn_decref(conn);
1359 if (!peer_ni->ksnp_closing &&
1360 list_empty(&peer_ni->ksnp_conns) &&
1361 list_empty(&peer_ni->ksnp_routes)) {
1362 list_add(&zombies, &peer_ni->ksnp_tx_queue);
1363 list_del_init(&peer_ni->ksnp_tx_queue);
1364 ksocknal_unlink_peer_locked(peer_ni);
1367 write_unlock_bh(global_lock);
1371 CERROR("Not creating conn %s type %d: %s\n",
1372 libcfs_id2str(peerid), conn->ksnc_type, warn);
1374 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1375 libcfs_id2str(peerid), conn->ksnc_type, warn);
1380 /* Request retry by replying with CONN_NONE
1381 * ksnc_proto has been set already */
1382 conn->ksnc_type = SOCKLND_CONN_NONE;
1383 hello->kshm_nips = 0;
1384 ksocknal_send_hello(ni, conn, peerid.nid, hello);
1387 write_lock_bh(global_lock);
1388 peer_ni->ksnp_accepting--;
1389 write_unlock_bh(global_lock);
1393 * If we get here without an error code, just use -EALREADY.
1394 * Depending on how we got here, the error may be positive
1395 * or negative. Normalize the value for ksocknal_txlist_done().
1397 rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1398 ksocknal_txlist_done(ni, &zombies, rc2);
1399 ksocknal_peer_decref(peer_ni);
1403 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1404 kshm_ips[LNET_NUM_INTERFACES]));
1406 LIBCFS_FREE(conn, sizeof(*conn));
1414 ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1416 /* This just does the immmediate housekeeping, and queues the
1417 * connection for the reaper to terminate.
1418 * Caller holds ksnd_global_lock exclusively in irq context */
1419 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1420 ksock_route_t *route;
1421 ksock_conn_t *conn2;
1422 struct list_head *tmp;
1424 LASSERT(peer_ni->ksnp_error == 0);
1425 LASSERT(!conn->ksnc_closing);
1426 conn->ksnc_closing = 1;
1428 /* ksnd_deathrow_conns takes over peer_ni's ref */
1429 list_del(&conn->ksnc_list);
1431 route = conn->ksnc_route;
1432 if (route != NULL) {
1433 /* dissociate conn from route... */
1434 LASSERT(!route->ksnr_deleted);
1435 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1438 list_for_each(tmp, &peer_ni->ksnp_conns) {
1439 conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1441 if (conn2->ksnc_route == route &&
1442 conn2->ksnc_type == conn->ksnc_type)
1448 route->ksnr_connected &= ~(1 << conn->ksnc_type);
1450 conn->ksnc_route = NULL;
1452 ksocknal_route_decref(route); /* drop conn's ref on route */
1455 if (list_empty(&peer_ni->ksnp_conns)) {
1456 /* No more connections to this peer_ni */
1458 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1461 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1463 /* throw them to the last connection...,
1464 * these TXs will be send to /dev/null by scheduler */
1465 list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1467 ksocknal_tx_prep(conn, tx);
1469 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1470 list_splice_init(&peer_ni->ksnp_tx_queue,
1471 &conn->ksnc_tx_queue);
1472 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1475 /* renegotiate protocol version */
1476 peer_ni->ksnp_proto = NULL;
1477 /* stash last conn close reason */
1478 peer_ni->ksnp_error = error;
1480 if (list_empty(&peer_ni->ksnp_routes)) {
1481 /* I've just closed last conn belonging to a
1482 * peer_ni with no routes to it */
1483 ksocknal_unlink_peer_locked(peer_ni);
1487 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1489 list_add_tail(&conn->ksnc_list,
1490 &ksocknal_data.ksnd_deathrow_conns);
1491 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1493 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1497 ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
1500 cfs_time_t last_alive = 0;
1502 /* There has been a connection failure or comms error; but I'll only
1503 * tell LNET I think the peer_ni is dead if it's to another kernel and
1504 * there are no connections or connection attempts in existence. */
1506 read_lock(&ksocknal_data.ksnd_global_lock);
1508 if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1509 list_empty(&peer_ni->ksnp_conns) &&
1510 peer_ni->ksnp_accepting == 0 &&
1511 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1513 last_alive = peer_ni->ksnp_last_alive;
1516 read_unlock(&ksocknal_data.ksnd_global_lock);
1519 lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
1524 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1526 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1529 struct list_head zlist = LIST_HEAD_INIT(zlist);
1531 /* NB safe to finalize TXs because closing of socket will
1532 * abort all buffered data */
1533 LASSERT(conn->ksnc_sock == NULL);
1535 spin_lock(&peer_ni->ksnp_lock);
1537 list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
1538 if (tx->tx_conn != conn)
1541 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1543 tx->tx_msg.ksm_zc_cookies[0] = 0;
1544 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1545 list_del(&tx->tx_zc_list);
1546 list_add(&tx->tx_zc_list, &zlist);
1549 spin_unlock(&peer_ni->ksnp_lock);
1551 while (!list_empty(&zlist)) {
1552 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1554 list_del(&tx->tx_zc_list);
1555 ksocknal_tx_decref(tx);
1560 ksocknal_terminate_conn(ksock_conn_t *conn)
1562 /* This gets called by the reaper (guaranteed thread context) to
1563 * disengage the socket from its callbacks and close it.
1564 * ksnc_refcount will eventually hit zero, and then the reaper will
1566 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1567 ksock_sched_t *sched = conn->ksnc_scheduler;
1570 LASSERT(conn->ksnc_closing);
1572 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1573 spin_lock_bh(&sched->kss_lock);
1575 /* a closing conn is always ready to tx */
1576 conn->ksnc_tx_ready = 1;
1578 if (!conn->ksnc_tx_scheduled &&
1579 !list_empty(&conn->ksnc_tx_queue)) {
1580 list_add_tail(&conn->ksnc_tx_list,
1581 &sched->kss_tx_conns);
1582 conn->ksnc_tx_scheduled = 1;
1583 /* extra ref for scheduler */
1584 ksocknal_conn_addref(conn);
1586 wake_up (&sched->kss_waitq);
1589 spin_unlock_bh(&sched->kss_lock);
1591 /* serialise with callbacks */
1592 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1594 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1596 /* OK, so this conn may not be completely disengaged from its
1597 * scheduler yet, but it _has_ committed to terminate... */
1598 conn->ksnc_scheduler->kss_nconns--;
1600 if (peer_ni->ksnp_error != 0) {
1601 /* peer_ni's last conn closed in error */
1602 LASSERT(list_empty(&peer_ni->ksnp_conns));
1604 peer_ni->ksnp_error = 0; /* avoid multiple notifications */
1607 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1610 ksocknal_peer_failed(peer_ni);
1612 /* The socket is closed on the final put; either here, or in
1613 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1614 * when the connection was established, this will close the socket
1615 * immediately, aborting anything buffered in it. Any hung
1616 * zero-copy transmits will therefore complete in finite time. */
1617 ksocknal_connsock_decref(conn);
1621 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1623 /* Queue the conn for the reaper to destroy */
1625 LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1626 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1628 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1629 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1631 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1635 ksocknal_destroy_conn (ksock_conn_t *conn)
1637 cfs_time_t last_rcv;
1639 /* Final coup-de-grace of the reaper */
1640 CDEBUG (D_NET, "connection %p\n", conn);
1642 LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1643 LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1644 LASSERT (conn->ksnc_sock == NULL);
1645 LASSERT (conn->ksnc_route == NULL);
1646 LASSERT (!conn->ksnc_tx_scheduled);
1647 LASSERT (!conn->ksnc_rx_scheduled);
1648 LASSERT(list_empty(&conn->ksnc_tx_queue));
1650 /* complete current receive if any */
1651 switch (conn->ksnc_rx_state) {
1652 case SOCKNAL_RX_LNET_PAYLOAD:
1653 last_rcv = conn->ksnc_rx_deadline -
1654 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1655 CERROR("Completing partial receive from %s[%d], "
1656 "ip %pI4h:%d, with error, wanted: %d, left: %d, "
1657 "last alive is %ld secs ago\n",
1658 libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1659 &conn->ksnc_ipaddr, conn->ksnc_port,
1660 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1661 cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(),
1663 lnet_finalize(conn->ksnc_cookie, -EIO);
1665 case SOCKNAL_RX_LNET_HEADER:
1666 if (conn->ksnc_rx_started)
1667 CERROR("Incomplete receive of lnet header from %s, "
1668 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1669 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1670 &conn->ksnc_ipaddr, conn->ksnc_port,
1671 conn->ksnc_proto->pro_version);
1673 case SOCKNAL_RX_KSM_HEADER:
1674 if (conn->ksnc_rx_started)
1675 CERROR("Incomplete receive of ksock message from %s, "
1676 "ip %pI4h:%d, with error, protocol: %d.x.\n",
1677 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1678 &conn->ksnc_ipaddr, conn->ksnc_port,
1679 conn->ksnc_proto->pro_version);
1681 case SOCKNAL_RX_SLOP:
1682 if (conn->ksnc_rx_started)
1683 CERROR("Incomplete receive of slops from %s, "
1684 "ip %pI4h:%d, with error\n",
1685 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1686 &conn->ksnc_ipaddr, conn->ksnc_port);
1693 ksocknal_peer_decref(conn->ksnc_peer);
1695 LIBCFS_FREE (conn, sizeof (*conn));
1699 ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why)
1702 struct list_head *ctmp;
1703 struct list_head *cnxt;
1706 list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
1707 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1710 conn->ksnc_ipaddr == ipaddr) {
1712 ksocknal_close_conn_locked (conn, why);
1720 ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1722 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
1723 __u32 ipaddr = conn->ksnc_ipaddr;
1726 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1728 count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
1730 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1736 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
1738 ksock_peer_ni_t *peer_ni;
1739 struct list_head *ptmp;
1740 struct list_head *pnxt;
1746 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1748 if (id.nid != LNET_NID_ANY)
1749 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1752 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1755 for (i = lo; i <= hi; i++) {
1756 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
1758 peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
1760 if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
1761 (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
1764 count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
1768 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1770 /* wildcards always succeed */
1771 if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1774 return (count == 0 ? -ENOENT : 0);
1778 ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
1780 /* The router is telling me she's been notified of a change in
1783 struct lnet_process_id id = {
1785 .pid = LNET_PID_ANY,
1788 CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1789 alive ? "up" : "down");
1792 /* If the gateway crashed, close all open connections... */
1793 ksocknal_close_matching_conns (id, 0);
1797 /* ...otherwise do nothing. We can only establish new connections
1798 * if we have autroutes, and these connect on demand. */
1802 ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when)
1805 time64_t last_alive = 0;
1806 time64_t now = ktime_get_real_seconds();
1807 ksock_peer_ni_t *peer_ni = NULL;
1808 rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1809 struct lnet_process_id id = {
1811 .pid = LNET_PID_LUSTRE,
1816 peer_ni = ksocknal_find_peer_locked(ni, id);
1817 if (peer_ni != NULL) {
1818 struct list_head *tmp;
1822 list_for_each(tmp, &peer_ni->ksnp_conns) {
1823 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1824 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1826 if (bufnob < conn->ksnc_tx_bufnob) {
1827 /* something got ACKed */
1828 conn->ksnc_tx_deadline =
1829 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1830 peer_ni->ksnp_last_alive = now;
1831 conn->ksnc_tx_bufnob = bufnob;
1835 last_alive = peer_ni->ksnp_last_alive;
1836 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
1842 if (last_alive != 0)
1845 CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago, connect %d\n",
1846 libcfs_nid2str(nid), peer_ni,
1847 last_alive ? cfs_duration_sec(now - last_alive) : -1,
1853 ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1855 write_lock_bh(glock);
1857 peer_ni = ksocknal_find_peer_locked(ni, id);
1858 if (peer_ni != NULL)
1859 ksocknal_launch_all_connections_locked(peer_ni);
1861 write_unlock_bh(glock);
1866 ksocknal_push_peer (ksock_peer_ni_t *peer_ni)
1870 struct list_head *tmp;
1873 for (index = 0; ; index++) {
1874 read_lock(&ksocknal_data.ksnd_global_lock);
1879 list_for_each(tmp, &peer_ni->ksnp_conns) {
1881 conn = list_entry(tmp, ksock_conn_t,
1883 ksocknal_conn_addref(conn);
1888 read_unlock(&ksocknal_data.ksnd_global_lock);
1893 ksocknal_lib_push_conn (conn);
1894 ksocknal_conn_decref(conn);
1899 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
1901 struct list_head *start;
1902 struct list_head *end;
1903 struct list_head *tmp;
1905 unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
1907 if (id.nid == LNET_NID_ANY) {
1908 start = &ksocknal_data.ksnd_peers[0];
1909 end = &ksocknal_data.ksnd_peers[hsize - 1];
1911 start = end = ksocknal_nid2peerlist(id.nid);
1914 for (tmp = start; tmp <= end; tmp++) {
1915 int peer_off; /* searching offset in peer_ni hash table */
1917 for (peer_off = 0; ; peer_off++) {
1918 ksock_peer_ni_t *peer_ni;
1921 read_lock(&ksocknal_data.ksnd_global_lock);
1922 list_for_each_entry(peer_ni, tmp, ksnp_list) {
1923 if (!((id.nid == LNET_NID_ANY ||
1924 id.nid == peer_ni->ksnp_id.nid) &&
1925 (id.pid == LNET_PID_ANY ||
1926 id.pid == peer_ni->ksnp_id.pid)))
1929 if (i++ == peer_off) {
1930 ksocknal_peer_addref(peer_ni);
1934 read_unlock(&ksocknal_data.ksnd_global_lock);
1936 if (i == 0) /* no match */
1940 ksocknal_push_peer(peer_ni);
1941 ksocknal_peer_decref(peer_ni);
1948 ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
1950 ksock_net_t *net = ni->ni_data;
1951 ksock_interface_t *iface;
1955 struct list_head *ptmp;
1956 ksock_peer_ni_t *peer_ni;
1957 struct list_head *rtmp;
1958 ksock_route_t *route;
1960 if (ipaddress == 0 ||
1964 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1966 iface = ksocknal_ip2iface(ni, ipaddress);
1967 if (iface != NULL) {
1968 /* silently ignore dups */
1970 } else if (net->ksnn_ninterfaces == LNET_NUM_INTERFACES) {
1973 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1975 iface->ksni_ipaddr = ipaddress;
1976 iface->ksni_netmask = netmask;
1977 iface->ksni_nroutes = 0;
1978 iface->ksni_npeers = 0;
1980 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1981 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1982 peer_ni = list_entry(ptmp, ksock_peer_ni_t,
1985 for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
1986 if (peer_ni->ksnp_passive_ips[j] == ipaddress)
1987 iface->ksni_npeers++;
1989 list_for_each(rtmp, &peer_ni->ksnp_routes) {
1990 route = list_entry(rtmp,
1994 if (route->ksnr_myipaddr == ipaddress)
1995 iface->ksni_nroutes++;
2001 /* NB only new connections will pay attention to the new interface! */
2004 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2010 ksocknal_peer_del_interface_locked(ksock_peer_ni_t *peer_ni, __u32 ipaddr)
2012 struct list_head *tmp;
2013 struct list_head *nxt;
2014 ksock_route_t *route;
2019 for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
2020 if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
2021 for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
2022 peer_ni->ksnp_passive_ips[j-1] =
2023 peer_ni->ksnp_passive_ips[j];
2024 peer_ni->ksnp_n_passive_ips--;
2028 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
2029 route = list_entry(tmp, ksock_route_t, ksnr_list);
2031 if (route->ksnr_myipaddr != ipaddr)
2034 if (route->ksnr_share_count != 0) {
2035 /* Manually created; keep, but unbind */
2036 route->ksnr_myipaddr = 0;
2038 ksocknal_del_route_locked(route);
2042 list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
2043 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2045 if (conn->ksnc_myipaddr == ipaddr)
2046 ksocknal_close_conn_locked (conn, 0);
2051 ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
2053 ksock_net_t *net = ni->ni_data;
2055 struct list_head *tmp;
2056 struct list_head *nxt;
2057 ksock_peer_ni_t *peer_ni;
2062 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2064 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2065 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2067 if (!(ipaddress == 0 ||
2068 ipaddress == this_ip))
2073 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2074 net->ksnn_interfaces[j-1] =
2075 net->ksnn_interfaces[j];
2077 net->ksnn_ninterfaces--;
2079 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2080 list_for_each_safe(tmp, nxt,
2081 &ksocknal_data.ksnd_peers[j]) {
2082 peer_ni = list_entry(tmp, ksock_peer_ni_t,
2085 if (peer_ni->ksnp_ni != ni)
2088 ksocknal_peer_del_interface_locked(peer_ni, this_ip);
2093 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2099 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
2101 struct lnet_process_id id = {0};
2102 struct libcfs_ioctl_data *data = arg;
2106 case IOC_LIBCFS_GET_INTERFACE: {
2107 ksock_net_t *net = ni->ni_data;
2108 ksock_interface_t *iface;
2110 read_lock(&ksocknal_data.ksnd_global_lock);
2112 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2116 iface = &net->ksnn_interfaces[data->ioc_count];
2118 data->ioc_u32[0] = iface->ksni_ipaddr;
2119 data->ioc_u32[1] = iface->ksni_netmask;
2120 data->ioc_u32[2] = iface->ksni_npeers;
2121 data->ioc_u32[3] = iface->ksni_nroutes;
2124 read_unlock(&ksocknal_data.ksnd_global_lock);
2128 case IOC_LIBCFS_ADD_INTERFACE:
2129 return ksocknal_add_interface(ni,
2130 data->ioc_u32[0], /* IP address */
2131 data->ioc_u32[1]); /* net mask */
2133 case IOC_LIBCFS_DEL_INTERFACE:
2134 return ksocknal_del_interface(ni,
2135 data->ioc_u32[0]); /* IP address */
2137 case IOC_LIBCFS_GET_PEER: {
2142 int share_count = 0;
2144 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2145 &id, &myip, &ip, &port,
2146 &conn_count, &share_count);
2150 data->ioc_nid = id.nid;
2151 data->ioc_count = share_count;
2152 data->ioc_u32[0] = ip;
2153 data->ioc_u32[1] = port;
2154 data->ioc_u32[2] = myip;
2155 data->ioc_u32[3] = conn_count;
2156 data->ioc_u32[4] = id.pid;
2160 case IOC_LIBCFS_ADD_PEER:
2161 id.nid = data->ioc_nid;
2162 id.pid = LNET_PID_LUSTRE;
2163 return ksocknal_add_peer (ni, id,
2164 data->ioc_u32[0], /* IP */
2165 data->ioc_u32[1]); /* port */
2167 case IOC_LIBCFS_DEL_PEER:
2168 id.nid = data->ioc_nid;
2169 id.pid = LNET_PID_ANY;
2170 return ksocknal_del_peer (ni, id,
2171 data->ioc_u32[0]); /* IP */
2173 case IOC_LIBCFS_GET_CONN: {
2177 ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2182 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2184 data->ioc_count = txmem;
2185 data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
2186 data->ioc_flags = nagle;
2187 data->ioc_u32[0] = conn->ksnc_ipaddr;
2188 data->ioc_u32[1] = conn->ksnc_port;
2189 data->ioc_u32[2] = conn->ksnc_myipaddr;
2190 data->ioc_u32[3] = conn->ksnc_type;
2191 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2192 data->ioc_u32[5] = rxmem;
2193 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2194 ksocknal_conn_decref(conn);
2198 case IOC_LIBCFS_CLOSE_CONNECTION:
2199 id.nid = data->ioc_nid;
2200 id.pid = LNET_PID_ANY;
2201 return ksocknal_close_matching_conns (id,
2204 case IOC_LIBCFS_REGISTER_MYNID:
2205 /* Ignore if this is a noop */
2206 if (data->ioc_nid == ni->ni_nid)
2209 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2210 libcfs_nid2str(data->ioc_nid),
2211 libcfs_nid2str(ni->ni_nid));
2214 case IOC_LIBCFS_PUSH_CONNECTION:
2215 id.nid = data->ioc_nid;
2216 id.pid = LNET_PID_ANY;
2217 return ksocknal_push(ni, id);
2226 ksocknal_free_buffers (void)
2228 LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2230 if (ksocknal_data.ksnd_sched_info != NULL) {
2231 struct ksock_sched_info *info;
2234 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2235 if (info->ksi_scheds != NULL) {
2236 LIBCFS_FREE(info->ksi_scheds,
2237 info->ksi_nthreads_max *
2238 sizeof(info->ksi_scheds[0]));
2241 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2244 LIBCFS_FREE (ksocknal_data.ksnd_peers,
2245 sizeof(struct list_head) *
2246 ksocknal_data.ksnd_peer_hash_size);
2248 spin_lock(&ksocknal_data.ksnd_tx_lock);
2250 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2251 struct list_head zlist;
2254 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2255 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2256 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2258 while (!list_empty(&zlist)) {
2259 tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2260 list_del(&tx->tx_list);
2261 LIBCFS_FREE(tx, tx->tx_desc_size);
2264 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2269 ksocknal_base_shutdown(void)
2271 struct ksock_sched_info *info;
2272 ksock_sched_t *sched;
2276 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2277 atomic_read (&libcfs_kmemory));
2278 LASSERT (ksocknal_data.ksnd_nnets == 0);
2280 switch (ksocknal_data.ksnd_init) {
2284 case SOCKNAL_INIT_ALL:
2285 case SOCKNAL_INIT_DATA:
2286 LASSERT (ksocknal_data.ksnd_peers != NULL);
2287 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2288 LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2291 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2292 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2293 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2294 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2295 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2297 if (ksocknal_data.ksnd_sched_info != NULL) {
2298 cfs_percpt_for_each(info, i,
2299 ksocknal_data.ksnd_sched_info) {
2300 if (info->ksi_scheds == NULL)
2303 for (j = 0; j < info->ksi_nthreads_max; j++) {
2305 sched = &info->ksi_scheds[j];
2306 LASSERT(list_empty(&sched->\
2308 LASSERT(list_empty(&sched->\
2310 LASSERT(list_empty(&sched-> \
2311 kss_zombie_noop_txs));
2312 LASSERT(sched->kss_nconns == 0);
2317 /* flag threads to terminate; wake and wait for them to die */
2318 ksocknal_data.ksnd_shuttingdown = 1;
2319 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2320 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2322 if (ksocknal_data.ksnd_sched_info != NULL) {
2323 cfs_percpt_for_each(info, i,
2324 ksocknal_data.ksnd_sched_info) {
2325 if (info->ksi_scheds == NULL)
2328 for (j = 0; j < info->ksi_nthreads_max; j++) {
2329 sched = &info->ksi_scheds[j];
2330 wake_up_all(&sched->kss_waitq);
2336 read_lock(&ksocknal_data.ksnd_global_lock);
2337 while (ksocknal_data.ksnd_nthreads != 0) {
2340 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2341 "waiting for %d threads to terminate\n",
2342 ksocknal_data.ksnd_nthreads);
2343 read_unlock(&ksocknal_data.ksnd_global_lock);
2344 set_current_state(TASK_UNINTERRUPTIBLE);
2345 schedule_timeout(cfs_time_seconds(1));
2346 read_lock(&ksocknal_data.ksnd_global_lock);
2348 read_unlock(&ksocknal_data.ksnd_global_lock);
2350 ksocknal_free_buffers();
2352 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2356 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2357 atomic_read (&libcfs_kmemory));
2359 module_put(THIS_MODULE);
2363 ksocknal_base_startup(void)
2365 struct ksock_sched_info *info;
2369 LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2370 LASSERT (ksocknal_data.ksnd_nnets == 0);
2372 memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2374 ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2375 LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2376 sizeof(struct list_head) *
2377 ksocknal_data.ksnd_peer_hash_size);
2378 if (ksocknal_data.ksnd_peers == NULL)
2381 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2382 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2384 rwlock_init(&ksocknal_data.ksnd_global_lock);
2385 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2387 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2388 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2389 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2390 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2391 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2393 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2394 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2395 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2396 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2398 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2399 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2401 /* NB memset above zeros whole of ksocknal_data */
2403 /* flag lists/ptrs/locks initialised */
2404 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2405 try_module_get(THIS_MODULE);
2407 ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2409 if (ksocknal_data.ksnd_sched_info == NULL)
2412 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2413 ksock_sched_t *sched;
2416 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2417 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2418 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2420 /* max to half of CPUs, assume another half should be
2421 * reserved for upper layer modules */
2422 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2425 info->ksi_nthreads_max = nthrs;
2428 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2429 info->ksi_nthreads_max * sizeof(*sched));
2430 if (info->ksi_scheds == NULL)
2433 for (; nthrs > 0; nthrs--) {
2434 sched = &info->ksi_scheds[nthrs - 1];
2436 sched->kss_info = info;
2437 spin_lock_init(&sched->kss_lock);
2438 INIT_LIST_HEAD(&sched->kss_rx_conns);
2439 INIT_LIST_HEAD(&sched->kss_tx_conns);
2440 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2441 init_waitqueue_head(&sched->kss_waitq);
2445 ksocknal_data.ksnd_connd_starting = 0;
2446 ksocknal_data.ksnd_connd_failed_stamp = 0;
2447 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2448 /* must have at least 2 connds to remain responsive to accepts while
2450 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2451 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2453 if (*ksocknal_tunables.ksnd_nconnds_max <
2454 *ksocknal_tunables.ksnd_nconnds) {
2455 ksocknal_tunables.ksnd_nconnds_max =
2456 ksocknal_tunables.ksnd_nconnds;
2459 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2461 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2462 ksocknal_data.ksnd_connd_starting++;
2463 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2466 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2467 rc = ksocknal_thread_start(ksocknal_connd,
2468 (void *)((uintptr_t)i), name);
2470 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2471 ksocknal_data.ksnd_connd_starting--;
2472 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2473 CERROR("Can't spawn socknal connd: %d\n", rc);
2478 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2480 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2484 /* flag everything initialised */
2485 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2490 ksocknal_base_shutdown();
2495 ksocknal_debug_peerhash(struct lnet_ni *ni)
2497 ksock_peer_ni_t *peer_ni = NULL;
2498 struct list_head *tmp;
2501 read_lock(&ksocknal_data.ksnd_global_lock);
2503 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2504 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2505 peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
2507 if (peer_ni->ksnp_ni == ni) break;
2513 if (peer_ni != NULL) {
2514 ksock_route_t *route;
2517 CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
2518 "closing %d, accepting %d, err %d, zcookie %llu, "
2519 "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
2520 atomic_read(&peer_ni->ksnp_refcount),
2521 peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
2522 peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2523 peer_ni->ksnp_zc_next_cookie,
2524 !list_empty(&peer_ni->ksnp_tx_queue),
2525 !list_empty(&peer_ni->ksnp_zc_req_list));
2527 list_for_each(tmp, &peer_ni->ksnp_routes) {
2528 route = list_entry(tmp, ksock_route_t, ksnr_list);
2529 CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2530 "del %d\n", atomic_read(&route->ksnr_refcount),
2531 route->ksnr_scheduled, route->ksnr_connecting,
2532 route->ksnr_connected, route->ksnr_deleted);
2535 list_for_each(tmp, &peer_ni->ksnp_conns) {
2536 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2537 CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2538 atomic_read(&conn->ksnc_conn_refcount),
2539 atomic_read(&conn->ksnc_sock_refcount),
2540 conn->ksnc_type, conn->ksnc_closing);
2544 read_unlock(&ksocknal_data.ksnd_global_lock);
2549 ksocknal_shutdown(struct lnet_ni *ni)
2551 ksock_net_t *net = ni->ni_data;
2552 struct lnet_process_id anyid = {
2553 .nid = LNET_NID_ANY,
2554 .pid = LNET_PID_ANY,
2558 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2559 LASSERT(ksocknal_data.ksnd_nnets > 0);
2561 spin_lock_bh(&net->ksnn_lock);
2562 net->ksnn_shutdown = 1; /* prevent new peers */
2563 spin_unlock_bh(&net->ksnn_lock);
2565 /* Delete all peers */
2566 ksocknal_del_peer(ni, anyid, 0);
2568 /* Wait for all peer_ni state to clean up */
2570 spin_lock_bh(&net->ksnn_lock);
2571 while (net->ksnn_npeers != 0) {
2572 spin_unlock_bh(&net->ksnn_lock);
2575 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2576 "waiting for %d peers to disconnect\n",
2578 set_current_state(TASK_UNINTERRUPTIBLE);
2579 schedule_timeout(cfs_time_seconds(1));
2581 ksocknal_debug_peerhash(ni);
2583 spin_lock_bh(&net->ksnn_lock);
2585 spin_unlock_bh(&net->ksnn_lock);
2587 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2588 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2589 LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2592 list_del(&net->ksnn_list);
2593 LIBCFS_FREE(net, sizeof(*net));
2595 ksocknal_data.ksnd_nnets--;
2596 if (ksocknal_data.ksnd_nnets == 0)
2597 ksocknal_base_shutdown();
2601 ksocknal_enumerate_interfaces(ksock_net_t *net)
2609 n = lnet_ipif_enumerate(&names);
2611 CERROR("Can't enumerate interfaces: %d\n", n);
2615 for (i = j = 0; i < n; i++) {
2620 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2623 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2625 CWARN("Can't get interface %s info: %d\n",
2631 CWARN("Ignoring interface %s (down)\n",
2636 if (j == LNET_NUM_INTERFACES) {
2637 CWARN("Ignoring interface %s (too many interfaces)\n",
2642 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2643 net->ksnn_interfaces[j].ksni_netmask = mask;
2644 strlcpy(net->ksnn_interfaces[j].ksni_name,
2645 names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2649 lnet_ipif_free_enumeration(names, n);
2652 CERROR("Can't find any usable interfaces\n");
2658 ksocknal_search_new_ipif(ksock_net_t *net)
2663 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2664 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2665 char *colon = strchr(ifnam, ':');
2670 if (colon != NULL) /* ignore alias device */
2673 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2675 for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2676 char *ifnam2 = &tmp->ksnn_interfaces[j].\
2678 char *colon2 = strchr(ifnam2, ':');
2683 found = strcmp(ifnam, ifnam2) == 0;
2700 ksocknal_start_schedulers(struct ksock_sched_info *info)
2706 if (info->ksi_nthreads == 0) {
2707 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2708 nthrs = info->ksi_nthreads_max;
2710 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2712 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2713 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2715 nthrs = min(nthrs, info->ksi_nthreads_max);
2717 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2718 /* increase two threads if there is new interface */
2719 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2722 for (i = 0; i < nthrs; i++) {
2725 ksock_sched_t *sched;
2726 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2727 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2728 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2729 info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2731 rc = ksocknal_thread_start(ksocknal_scheduler,
2736 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2737 info->ksi_cpt, info->ksi_nthreads + i, rc);
2741 info->ksi_nthreads += i;
2746 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2748 int newif = ksocknal_search_new_ipif(net);
2752 if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2755 for (i = 0; i < ncpts; i++) {
2756 struct ksock_sched_info *info;
2757 int cpt = (cpts == NULL) ? i : cpts[i];
2759 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2760 info = ksocknal_data.ksnd_sched_info[cpt];
2762 if (!newif && info->ksi_nthreads > 0)
2765 rc = ksocknal_start_schedulers(info);
2773 ksocknal_startup(struct lnet_ni *ni)
2778 struct net_device *net_dev;
2781 LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2783 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2784 rc = ksocknal_base_startup();
2789 LIBCFS_ALLOC(net, sizeof(*net));
2793 spin_lock_init(&net->ksnn_lock);
2794 net->ksnn_incarnation = ktime_get_real_ns();
2796 if (!ni->ni_net->net_tunables_set) {
2797 ni->ni_net->net_tunables.lct_peer_timeout =
2798 *ksocknal_tunables.ksnd_peertimeout;
2799 ni->ni_net->net_tunables.lct_max_tx_credits =
2800 *ksocknal_tunables.ksnd_credits;
2801 ni->ni_net->net_tunables.lct_peer_tx_credits =
2802 *ksocknal_tunables.ksnd_peertxcredits;
2803 ni->ni_net->net_tunables.lct_peer_rtr_credits =
2804 *ksocknal_tunables.ksnd_peerrtrcredits;
2805 ni->ni_net->net_tunables_set = true;
2809 if (ni->ni_interfaces[0] == NULL) {
2810 rc = ksocknal_enumerate_interfaces(net);
2814 net->ksnn_ninterfaces = 1;
2816 for (i = 0; i < LNET_NUM_INTERFACES; i++) {
2819 if (ni->ni_interfaces[i] == NULL)
2822 rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2823 &net->ksnn_interfaces[i].ksni_ipaddr,
2824 &net->ksnn_interfaces[i].ksni_netmask);
2827 CERROR("Can't get interface %s info: %d\n",
2828 ni->ni_interfaces[i], rc);
2833 CERROR("Interface %s is down\n",
2834 ni->ni_interfaces[i]);
2838 strlcpy(net->ksnn_interfaces[i].ksni_name,
2839 ni->ni_interfaces[i],
2840 sizeof(net->ksnn_interfaces[i].ksni_name));
2843 net->ksnn_ninterfaces = i;
2846 net_dev = dev_get_by_name(&init_net,
2847 net->ksnn_interfaces[0].ksni_name);
2848 if (net_dev != NULL) {
2849 node_id = dev_to_node(&net_dev->dev);
2850 ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
2853 ni->ni_dev_cpt = CFS_CPT_ANY;
2856 /* call it before add it to ksocknal_data.ksnd_nets */
2857 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2861 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2862 net->ksnn_interfaces[0].ksni_ipaddr);
2863 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2865 ksocknal_data.ksnd_nnets++;
2870 LIBCFS_FREE(net, sizeof(*net));
2872 if (ksocknal_data.ksnd_nnets == 0)
2873 ksocknal_base_shutdown();
2879 static void __exit ksocklnd_exit(void)
2881 lnet_unregister_lnd(&the_ksocklnd);
2884 static int __init ksocklnd_init(void)
2888 /* check ksnr_connected/connecting field large enough */
2889 CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2890 CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2892 /* initialize the_ksocklnd */
2893 the_ksocklnd.lnd_type = SOCKLND;
2894 the_ksocklnd.lnd_startup = ksocknal_startup;
2895 the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2896 the_ksocklnd.lnd_ctl = ksocknal_ctl;
2897 the_ksocklnd.lnd_send = ksocknal_send;
2898 the_ksocklnd.lnd_recv = ksocknal_recv;
2899 the_ksocklnd.lnd_notify = ksocknal_notify;
2900 the_ksocklnd.lnd_query = ksocknal_query;
2901 the_ksocklnd.lnd_accept = ksocknal_accept;
2903 rc = ksocknal_tunables_init();
2907 lnet_register_lnd(&the_ksocklnd);
2912 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2913 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2914 MODULE_VERSION("2.8.0");
2915 MODULE_LICENSE("GPL");
2917 module_init(ksocklnd_init);
2918 module_exit(ksocklnd_exit);