4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lnet/klnds/socklnd/socklnd.c
33 * Author: Zach Brown <zab@zabbo.net>
34 * Author: Peter J. Braam <braam@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
36 * Author: Eric Barton <eric@bartonsoftware.com>
39 #include <linux/ethtool.h>
40 #include <linux/inetdevice.h>
41 #include <linux/kernel.h>
42 #include <linux/sunrpc/addr.h>
43 #include <net/addrconf.h>
46 static const struct lnet_lnd the_ksocklnd;
47 struct ksock_nal_data ksocknal_data;
49 static struct ksock_interface *
50 ksocknal_index2iface(struct lnet_ni *ni, int index)
52 struct ksock_net *net = ni->ni_data;
53 struct ksock_interface *iface;
55 iface = &net->ksnn_interface;
57 if (iface->ksni_index == index)
63 static int ksocknal_ip2index(struct sockaddr *addr, struct lnet_ni *ni)
65 struct net_device *dev;
67 DECLARE_CONST_IN_IFADDR(ifa);
69 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
73 for_each_netdev(ni->ni_net_ns, dev) {
74 int flags = dev_get_flags(dev);
75 struct in_device *in_dev;
77 if (flags & IFF_LOOPBACK) /* skip the loopback IF */
80 if (!(flags & IFF_UP))
83 switch (addr->sa_family) {
85 in_dev = __in_dev_get_rcu(dev);
89 in_dev_for_each_ifa_rcu(ifa, in_dev) {
91 ((struct sockaddr_in *)addr)->sin_addr.s_addr)
96 #if IS_ENABLED(CONFIG_IPV6)
98 struct inet6_dev *in6_dev;
99 const struct inet6_ifaddr *ifa6;
100 struct sockaddr_in6 *addr6 = (struct sockaddr_in6*)addr;
102 in6_dev = __in6_dev_get(dev);
106 list_for_each_entry_rcu(ifa6, &in6_dev->addr_list, if_list) {
107 if (ipv6_addr_cmp(&ifa6->addr,
108 &addr6->sin6_addr) == 0)
113 #endif /* IS_ENABLED(CONFIG_IPV6) */
123 static struct ksock_conn_cb *
124 ksocknal_create_conn_cb(struct sockaddr *addr)
126 struct ksock_conn_cb *conn_cb;
128 LIBCFS_ALLOC(conn_cb, sizeof(*conn_cb));
132 refcount_set(&conn_cb->ksnr_refcount, 1);
133 conn_cb->ksnr_peer = NULL;
134 conn_cb->ksnr_retry_interval = 0; /* OK to connect at any time */
135 rpc_copy_addr((struct sockaddr *)&conn_cb->ksnr_addr, addr);
136 rpc_set_port((struct sockaddr *)&conn_cb->ksnr_addr,
138 conn_cb->ksnr_myiface = -1;
139 conn_cb->ksnr_scheduled = 0;
140 conn_cb->ksnr_connecting = 0;
141 conn_cb->ksnr_connected = 0;
142 conn_cb->ksnr_deleted = 0;
143 conn_cb->ksnr_conn_count = 0;
144 conn_cb->ksnr_ctrl_conn_count = 0;
145 conn_cb->ksnr_blki_conn_count = 0;
146 conn_cb->ksnr_blko_conn_count = 0;
147 conn_cb->ksnr_max_conns = 0;
148 conn_cb->ksnr_busy_retry_count = 0;
154 ksocknal_destroy_conn_cb(struct ksock_conn_cb *conn_cb)
156 LASSERT(refcount_read(&conn_cb->ksnr_refcount) == 0);
158 if (conn_cb->ksnr_peer)
159 ksocknal_peer_decref(conn_cb->ksnr_peer);
161 LIBCFS_FREE(conn_cb, sizeof(*conn_cb));
164 static struct ksock_peer_ni *
165 ksocknal_create_peer(struct lnet_ni *ni, struct lnet_processid *id)
167 int cpt = lnet_nid2cpt(&id->nid, ni);
168 struct ksock_net *net = ni->ni_data;
169 struct ksock_peer_ni *peer_ni;
171 LASSERT(!LNET_NID_IS_ANY(&id->nid));
172 LASSERT(id->pid != LNET_PID_ANY);
173 LASSERT(!in_interrupt());
175 if (!atomic_inc_unless_negative(&net->ksnn_npeers)) {
176 CERROR("Can't create peer_ni: network shutdown\n");
177 return ERR_PTR(-ESHUTDOWN);
180 LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
182 atomic_dec(&net->ksnn_npeers);
183 return ERR_PTR(-ENOMEM);
186 peer_ni->ksnp_ni = ni;
187 peer_ni->ksnp_id = *id;
188 refcount_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
189 peer_ni->ksnp_closing = 0;
190 peer_ni->ksnp_accepting = 0;
191 peer_ni->ksnp_proto = NULL;
192 peer_ni->ksnp_last_alive = 0;
193 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
194 peer_ni->ksnp_conn_cb = NULL;
196 INIT_LIST_HEAD(&peer_ni->ksnp_conns);
197 INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
198 INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
199 spin_lock_init(&peer_ni->ksnp_lock);
205 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
207 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
209 CDEBUG (D_NET, "peer_ni %s %p deleted\n",
210 libcfs_idstr(&peer_ni->ksnp_id), peer_ni);
212 LASSERT(refcount_read(&peer_ni->ksnp_refcount) == 0);
213 LASSERT(peer_ni->ksnp_accepting == 0);
214 LASSERT(list_empty(&peer_ni->ksnp_conns));
215 LASSERT(peer_ni->ksnp_conn_cb == NULL);
216 LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
217 LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
219 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
221 /* NB a peer_ni's connections and conn_cb keep a reference on their
222 * peer_ni until they are destroyed, so we can be assured that _all_
223 * state to do with this peer_ni has been cleaned up when its refcount
226 if (atomic_dec_and_test(&net->ksnn_npeers))
227 wake_up_var(&net->ksnn_npeers);
230 struct ksock_peer_ni *
231 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_processid *id)
233 struct ksock_peer_ni *peer_ni;
234 unsigned long hash = nidhash(&id->nid);
236 hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni,
238 LASSERT(!peer_ni->ksnp_closing);
240 if (peer_ni->ksnp_ni != ni)
243 if (!nid_same(&peer_ni->ksnp_id.nid, &id->nid) ||
244 peer_ni->ksnp_id.pid != id->pid)
247 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
248 peer_ni, libcfs_idstr(id),
249 refcount_read(&peer_ni->ksnp_refcount));
255 struct ksock_peer_ni *
256 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_processid *id)
258 struct ksock_peer_ni *peer_ni;
260 read_lock(&ksocknal_data.ksnd_global_lock);
261 peer_ni = ksocknal_find_peer_locked(ni, id);
262 if (peer_ni != NULL) /* +1 ref for caller? */
263 ksocknal_peer_addref(peer_ni);
264 read_unlock(&ksocknal_data.ksnd_global_lock);
270 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
272 LASSERT(list_empty(&peer_ni->ksnp_conns));
273 LASSERT(peer_ni->ksnp_conn_cb == NULL);
274 LASSERT(!peer_ni->ksnp_closing);
275 peer_ni->ksnp_closing = 1;
276 hlist_del(&peer_ni->ksnp_list);
277 /* lose peerlist's ref */
278 ksocknal_peer_decref(peer_ni);
283 ksocknal_dump_peer_debug_info(struct ksock_peer_ni *peer_ni)
285 struct ksock_conn *conn;
286 struct list_head *ctmp;
287 struct list_head *txtmp;
291 list_for_each(ctmp, &peer_ni->ksnp_conns) {
292 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
294 if (!list_empty(&conn->ksnc_tx_queue))
295 list_for_each(txtmp, &conn->ksnc_tx_queue) txcount++;
297 CDEBUG(D_CONSOLE, "Conn %d [type, closing, crefcnt, srefcnt]: %d, %d, %d, %d\n",
301 refcount_read(&conn->ksnc_conn_refcount),
302 refcount_read(&conn->ksnc_sock_refcount));
303 CDEBUG(D_CONSOLE, "Conn %d rx [scheduled, ready, state]: %d, %d, %d\n",
305 conn->ksnc_rx_scheduled,
307 conn->ksnc_rx_state);
308 CDEBUG(D_CONSOLE, "Conn %d tx [txqcnt, scheduled, last_post, ready, deadline]: %d, %d, %lld, %d, %lld\n",
311 conn->ksnc_tx_scheduled,
312 conn->ksnc_tx_last_post,
314 conn->ksnc_rx_deadline);
316 if (conn->ksnc_scheduler)
317 CDEBUG(D_CONSOLE, "Conn %d sched [nconns, cpt]: %d, %d\n",
319 conn->ksnc_scheduler->kss_nconns,
320 conn->ksnc_scheduler->kss_cpt);
328 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
329 struct lnet_processid *id, __u32 *myip, __u32 *peer_ip,
330 int *port, int *conn_count, int *share_count)
332 struct ksock_peer_ni *peer_ni;
333 struct ksock_conn_cb *conn_cb;
337 read_lock(&ksocknal_data.ksnd_global_lock);
339 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
341 if (peer_ni->ksnp_ni != ni)
346 *id = peer_ni->ksnp_id;
347 conn_cb = peer_ni->ksnp_conn_cb;
348 if (conn_cb == NULL) {
356 ksocknal_dump_peer_debug_info(peer_ni);
358 if (conn_cb->ksnr_addr.ss_family == AF_INET) {
359 struct sockaddr_in *sa =
360 (void *)&conn_cb->ksnr_addr;
362 rc = choose_ipv4_src(myip,
363 conn_cb->ksnr_myiface,
364 ntohl(sa->sin_addr.s_addr),
366 *peer_ip = ntohl(sa->sin_addr.s_addr);
367 *port = ntohs(sa->sin_port);
371 *peer_ip = 0xFFFFFFFF;
375 *conn_count = conn_cb->ksnr_conn_count;
380 read_unlock(&ksocknal_data.ksnd_global_lock);
385 ksocknal_get_conn_count_by_type(struct ksock_conn_cb *conn_cb,
388 unsigned int count = 0;
391 case SOCKLND_CONN_CONTROL:
392 count = conn_cb->ksnr_ctrl_conn_count;
394 case SOCKLND_CONN_BULK_IN:
395 count = conn_cb->ksnr_blki_conn_count;
397 case SOCKLND_CONN_BULK_OUT:
398 count = conn_cb->ksnr_blko_conn_count;
400 case SOCKLND_CONN_ANY:
401 count = conn_cb->ksnr_conn_count;
412 ksocknal_get_conns_per_peer(struct ksock_peer_ni *peer_ni)
414 struct lnet_ni *ni = peer_ni->ksnp_ni;
415 struct lnet_ioctl_config_socklnd_tunables *tunables;
419 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_sock;
421 return tunables->lnd_conns_per_peer;
425 ksocknal_incr_conn_count(struct ksock_conn_cb *conn_cb,
428 conn_cb->ksnr_conn_count++;
430 /* check if all connections of the given type got created */
432 case SOCKLND_CONN_CONTROL:
433 conn_cb->ksnr_ctrl_conn_count++;
434 /* there's a single control connection per peer,
435 * two in case of loopback
437 conn_cb->ksnr_connected |= BIT(type);
439 case SOCKLND_CONN_BULK_IN:
440 conn_cb->ksnr_blki_conn_count++;
441 if (conn_cb->ksnr_blki_conn_count >= conn_cb->ksnr_max_conns)
442 conn_cb->ksnr_connected |= BIT(type);
444 case SOCKLND_CONN_BULK_OUT:
445 conn_cb->ksnr_blko_conn_count++;
446 if (conn_cb->ksnr_blko_conn_count >= conn_cb->ksnr_max_conns)
447 conn_cb->ksnr_connected |= BIT(type);
449 case SOCKLND_CONN_ANY:
450 if (conn_cb->ksnr_conn_count >= conn_cb->ksnr_max_conns)
451 conn_cb->ksnr_connected |= BIT(type);
458 CDEBUG(D_NET, "Add conn type %d, ksnr_connected %x ksnr_max_conns %d\n",
459 type, conn_cb->ksnr_connected, conn_cb->ksnr_max_conns);
464 ksocknal_decr_conn_count(struct ksock_conn_cb *conn_cb,
467 conn_cb->ksnr_conn_count--;
469 /* check if all connections of the given type got created */
471 case SOCKLND_CONN_CONTROL:
472 conn_cb->ksnr_ctrl_conn_count--;
473 /* there's a single control connection per peer,
474 * two in case of loopback
476 if (conn_cb->ksnr_ctrl_conn_count == 0)
477 conn_cb->ksnr_connected &= ~BIT(type);
479 case SOCKLND_CONN_BULK_IN:
480 conn_cb->ksnr_blki_conn_count--;
481 if (conn_cb->ksnr_blki_conn_count < conn_cb->ksnr_max_conns)
482 conn_cb->ksnr_connected &= ~BIT(type);
484 case SOCKLND_CONN_BULK_OUT:
485 conn_cb->ksnr_blko_conn_count--;
486 if (conn_cb->ksnr_blko_conn_count < conn_cb->ksnr_max_conns)
487 conn_cb->ksnr_connected &= ~BIT(type);
489 case SOCKLND_CONN_ANY:
490 if (conn_cb->ksnr_conn_count < conn_cb->ksnr_max_conns)
491 conn_cb->ksnr_connected &= ~BIT(type);
498 CDEBUG(D_NET, "Del conn type %d, ksnr_connected %x ksnr_max_conns %d\n",
499 type, conn_cb->ksnr_connected, conn_cb->ksnr_max_conns);
503 ksocknal_associate_cb_conn_locked(struct ksock_conn_cb *conn_cb,
504 struct ksock_conn *conn)
506 struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer;
507 int type = conn->ksnc_type;
508 struct ksock_interface *iface;
511 conn_iface = ksocknal_ip2index((struct sockaddr *)&conn->ksnc_myaddr,
513 conn->ksnc_conn_cb = conn_cb;
514 ksocknal_conn_cb_addref(conn_cb);
516 if (conn_cb->ksnr_myiface != conn_iface) {
517 if (conn_cb->ksnr_myiface < 0) {
518 /* route wasn't bound locally yet (the initial route) */
519 CDEBUG(D_NET, "Binding %s %pISc to interface %d\n",
520 libcfs_idstr(&peer_ni->ksnp_id),
525 "Rebinding %s %pISc from interface %d to %d\n",
526 libcfs_idstr(&peer_ni->ksnp_id),
528 conn_cb->ksnr_myiface,
531 iface = ksocknal_index2iface(peer_ni->ksnp_ni,
532 conn_cb->ksnr_myiface);
534 iface->ksni_nroutes--;
536 conn_cb->ksnr_myiface = conn_iface;
537 iface = ksocknal_index2iface(peer_ni->ksnp_ni,
538 conn_cb->ksnr_myiface);
540 iface->ksni_nroutes++;
543 ksocknal_incr_conn_count(conn_cb, type);
545 /* Successful connection => further attempts can
546 * proceed immediately
548 conn_cb->ksnr_retry_interval = 0;
552 ksocknal_add_conn_cb_locked(struct ksock_peer_ni *peer_ni,
553 struct ksock_conn_cb *conn_cb)
555 struct ksock_conn *conn;
556 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
558 LASSERT(!peer_ni->ksnp_closing);
559 LASSERT(!conn_cb->ksnr_peer);
560 LASSERT(!conn_cb->ksnr_scheduled);
561 LASSERT(!conn_cb->ksnr_connecting);
562 LASSERT(conn_cb->ksnr_connected == 0);
564 conn_cb->ksnr_peer = peer_ni;
565 ksocknal_peer_addref(peer_ni);
567 /* set the conn_cb's interface to the current net's interface */
568 conn_cb->ksnr_myiface = net->ksnn_interface.ksni_index;
569 net->ksnn_interface.ksni_nroutes++;
571 /* peer_ni's route list takes over my ref on 'route' */
572 peer_ni->ksnp_conn_cb = conn_cb;
574 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
575 if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
576 (struct sockaddr *)&conn_cb->ksnr_addr))
579 ksocknal_associate_cb_conn_locked(conn_cb, conn);
580 /* keep going (typed conns) */
585 ksocknal_del_conn_cb_locked(struct ksock_conn_cb *conn_cb)
587 struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer;
588 struct ksock_interface *iface;
589 struct ksock_conn *conn;
590 struct ksock_conn *cnxt;
592 LASSERT(!conn_cb->ksnr_deleted);
594 /* Close associated conns */
595 list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns, ksnc_list) {
596 if (conn->ksnc_conn_cb != conn_cb)
599 ksocknal_close_conn_locked(conn, 0);
602 if (conn_cb->ksnr_myiface >= 0) {
603 iface = ksocknal_index2iface(peer_ni->ksnp_ni,
604 conn_cb->ksnr_myiface);
606 iface->ksni_nroutes--;
609 conn_cb->ksnr_deleted = 1;
610 ksocknal_conn_cb_decref(conn_cb); /* drop peer_ni's ref */
611 peer_ni->ksnp_conn_cb = NULL;
613 if (list_empty(&peer_ni->ksnp_conns)) {
614 /* I've just removed the last route to a peer_ni with no active
617 ksocknal_unlink_peer_locked(peer_ni);
622 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_processid *id,
623 struct sockaddr *addr)
625 struct ksock_peer_ni *peer_ni;
626 struct ksock_peer_ni *peer2;
627 struct ksock_conn_cb *conn_cb;
629 if (LNET_NID_IS_ANY(&id->nid) ||
630 id->pid == LNET_PID_ANY)
633 /* Have a brand new peer_ni ready... */
634 peer_ni = ksocknal_create_peer(ni, id);
636 return PTR_ERR(peer_ni);
638 conn_cb = ksocknal_create_conn_cb(addr);
640 ksocknal_peer_decref(peer_ni);
644 write_lock_bh(&ksocknal_data.ksnd_global_lock);
646 /* always called with a ref on ni, so shutdown can't have started */
647 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers)
650 peer2 = ksocknal_find_peer_locked(ni, id);
652 ksocknal_peer_decref(peer_ni);
655 /* peer_ni table takes my ref on peer_ni */
656 hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list,
660 if (peer_ni->ksnp_conn_cb) {
661 ksocknal_conn_cb_decref(conn_cb);
663 ksocknal_add_conn_cb_locked(peer_ni, conn_cb);
664 /* Remember conns_per_peer setting at the time
665 * of connection initiation. It will define the
666 * max number of conns per type for this conn_cb
669 conn_cb->ksnr_max_conns = ksocknal_get_conns_per_peer(peer_ni);
672 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
678 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni)
680 struct ksock_conn *conn;
681 struct ksock_conn *cnxt;
682 struct ksock_conn_cb *conn_cb;
684 LASSERT(!peer_ni->ksnp_closing);
686 /* Extra ref prevents peer_ni disappearing until I'm done with it */
687 ksocknal_peer_addref(peer_ni);
688 conn_cb = peer_ni->ksnp_conn_cb;
690 ksocknal_del_conn_cb_locked(conn_cb);
692 list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns,
694 ksocknal_close_conn_locked(conn, 0);
696 ksocknal_peer_decref(peer_ni);
697 /* NB peer_ni unlinks itself when last conn/conn_cb is removed */
701 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_processid *id)
704 struct hlist_node *pnxt;
705 struct ksock_peer_ni *peer_ni;
711 write_lock_bh(&ksocknal_data.ksnd_global_lock);
713 if (id && !LNET_NID_IS_ANY(&id->nid)) {
714 lo = hash_min(nidhash(&id->nid),
715 HASH_BITS(ksocknal_data.ksnd_peers));
719 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
722 for (i = lo; i <= hi; i++) {
723 hlist_for_each_entry_safe(peer_ni, pnxt,
724 &ksocknal_data.ksnd_peers[i],
726 if (peer_ni->ksnp_ni != ni)
729 if (!((!id || LNET_NID_IS_ANY(&id->nid) ||
730 nid_same(&peer_ni->ksnp_id.nid, &id->nid)) &&
731 (!id || id->pid == LNET_PID_ANY ||
732 peer_ni->ksnp_id.pid == id->pid)))
735 ksocknal_peer_addref(peer_ni); /* a ref for me... */
737 ksocknal_del_peer_locked(peer_ni);
739 if (peer_ni->ksnp_closing &&
740 !list_empty(&peer_ni->ksnp_tx_queue)) {
741 LASSERT(list_empty(&peer_ni->ksnp_conns));
742 LASSERT(peer_ni->ksnp_conn_cb == NULL);
744 list_splice_init(&peer_ni->ksnp_tx_queue,
748 ksocknal_peer_decref(peer_ni); /* ...till here */
750 rc = 0; /* matched! */
754 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
756 ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
761 static struct ksock_conn *
762 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
764 struct ksock_peer_ni *peer_ni;
765 struct ksock_conn *conn;
768 read_lock(&ksocknal_data.ksnd_global_lock);
770 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
771 LASSERT(!peer_ni->ksnp_closing);
773 if (peer_ni->ksnp_ni != ni)
776 list_for_each_entry(conn, &peer_ni->ksnp_conns,
781 ksocknal_conn_addref(conn);
782 read_unlock(&ksocknal_data.ksnd_global_lock);
787 read_unlock(&ksocknal_data.ksnd_global_lock);
791 static struct ksock_sched *
792 ksocknal_choose_scheduler_locked(unsigned int cpt)
794 struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
797 if (sched->kss_nthreads == 0) {
798 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
799 if (sched->kss_nthreads > 0) {
800 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
801 cpt, sched->kss_cpt);
812 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
814 struct ksock_connreq *cr;
816 struct sockaddr_storage peer;
818 rc = lnet_sock_getaddr(sock, true, &peer);
820 CERROR("Can't determine new connection's address\n");
824 LIBCFS_ALLOC(cr, sizeof(*cr));
826 LCONSOLE_ERROR_MSG(0x12f,
827 "Dropping connection request from %pISc: memory exhausted\n",
834 cr->ksncr_sock = sock;
836 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
838 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
839 wake_up(&ksocknal_data.ksnd_connd_waitq);
841 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
845 static const struct ln_key_list ksocknal_tunables_keys = {
846 .lkl_maxattr = LNET_NET_SOCKLND_TUNABLES_ATTR_MAX,
848 [LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER] = {
849 .lkp_value = "conns_per_peer",
850 .lkp_data_type = NLA_S32
856 ksocknal_nl_set(int cmd, struct nlattr *attr, int type, void *data)
858 struct lnet_lnd_tunables *tunables = data;
861 if (cmd != LNET_CMD_NETS)
864 if (type != LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER ||
865 nla_type(attr) != LN_SCALAR_ATTR_INT_VALUE)
868 num = nla_get_s64(attr);
869 clamp_t(s64, num, 1, 127);
870 tunables->lnd_tun_u.lnd_sock.lnd_conns_per_peer = num;
876 ksocknal_connecting(struct ksock_conn_cb *conn_cb, struct sockaddr *sa)
879 rpc_cmp_addr((struct sockaddr *)&conn_cb->ksnr_addr, sa))
880 return conn_cb->ksnr_connecting;
885 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_conn_cb *conn_cb,
886 struct socket *sock, int type)
888 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
890 struct lnet_processid peerid;
892 struct ksock_conn *conn;
893 struct ksock_conn *conn2;
894 struct ksock_peer_ni *peer_ni = NULL;
895 struct ksock_peer_ni *peer2;
896 struct ksock_sched *sched;
897 struct ksock_hello_msg *hello;
900 struct ksock_tx *txtmp;
907 active = (conn_cb != NULL);
909 LASSERT(active == (type != SOCKLND_CONN_NONE));
911 LIBCFS_ALLOC(conn, sizeof(*conn));
917 conn->ksnc_peer = NULL;
918 conn->ksnc_conn_cb = NULL;
919 conn->ksnc_sock = sock;
920 /* 2 ref, 1 for conn, another extra ref prevents socket
921 * being closed before establishment of connection */
922 refcount_set(&conn->ksnc_sock_refcount, 2);
923 conn->ksnc_type = type;
924 ksocknal_lib_save_callback(sock, conn);
925 refcount_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
927 conn->ksnc_rx_ready = 0;
928 conn->ksnc_rx_scheduled = 0;
930 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
931 conn->ksnc_tx_ready = 0;
932 conn->ksnc_tx_scheduled = 0;
933 conn->ksnc_tx_carrier = NULL;
934 atomic_set (&conn->ksnc_tx_nob, 0);
936 LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
937 kshm_ips[LNET_INTERFACES_NUM]));
943 /* stash conn's local and remote addrs */
944 rc = ksocknal_lib_get_conn_addrs(conn);
948 /* Find out/confirm peer_ni's NID and connection type and get the
949 * vector of interfaces she's willing to let me connect to.
950 * Passive connections use the listener timeout since the peer_ni sends
955 peer_ni = conn_cb->ksnr_peer;
956 LASSERT(ni == peer_ni->ksnp_ni);
958 /* Active connection sends HELLO eagerly */
959 hello->kshm_nips = 0;
960 peerid = peer_ni->ksnp_id;
962 write_lock_bh(global_lock);
963 conn->ksnc_proto = peer_ni->ksnp_proto;
964 write_unlock_bh(global_lock);
966 if (conn->ksnc_proto == NULL) {
967 conn->ksnc_proto = &ksocknal_protocol_v3x;
968 #if SOCKNAL_VERSION_DEBUG
969 if (*ksocknal_tunables.ksnd_protocol == 2)
970 conn->ksnc_proto = &ksocknal_protocol_v2x;
971 else if (*ksocknal_tunables.ksnd_protocol == 1)
972 conn->ksnc_proto = &ksocknal_protocol_v1x;
976 rc = ksocknal_send_hello(ni, conn, &peerid.nid, hello);
980 peerid.nid = LNET_ANY_NID;
981 peerid.pid = LNET_PID_ANY;
983 /* Passive, get protocol from peer_ni */
984 conn->ksnc_proto = NULL;
987 rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
991 LASSERT(rc == 0 || active);
992 LASSERT(conn->ksnc_proto != NULL);
993 LASSERT(!LNET_NID_IS_ANY(&peerid.nid));
995 cpt = lnet_nid2cpt(&peerid.nid, ni);
998 ksocknal_peer_addref(peer_ni);
999 write_lock_bh(global_lock);
1001 peer_ni = ksocknal_create_peer(ni, &peerid);
1002 if (IS_ERR(peer_ni)) {
1003 rc = PTR_ERR(peer_ni);
1007 write_lock_bh(global_lock);
1009 /* called with a ref on ni, so shutdown can't have started */
1010 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers) >= 0);
1012 peer2 = ksocknal_find_peer_locked(ni, &peerid);
1013 if (peer2 == NULL) {
1014 /* NB this puts an "empty" peer_ni in the peer_ni
1015 * table (which takes my ref) */
1016 hash_add(ksocknal_data.ksnd_peers,
1017 &peer_ni->ksnp_list, nidhash(&peerid.nid));
1019 ksocknal_peer_decref(peer_ni);
1024 ksocknal_peer_addref(peer_ni);
1025 peer_ni->ksnp_accepting++;
1027 /* Am I already connecting to this guy? Resolve in
1028 * favour of higher NID...
1030 if (memcmp(&peerid.nid, &ni->ni_nid, sizeof(peerid.nid)) < 0 &&
1031 ksocknal_connecting(peer_ni->ksnp_conn_cb,
1032 ((struct sockaddr *) &conn->ksnc_peeraddr))) {
1034 warn = "connection race resolution";
1039 if (peer_ni->ksnp_closing ||
1040 (active && conn_cb->ksnr_deleted)) {
1041 /* peer_ni/conn_cb got closed under me */
1043 warn = "peer_ni/conn_cb removed";
1047 if (peer_ni->ksnp_proto == NULL) {
1048 /* Never connected before.
1049 * NB recv_hello may have returned EPROTO to signal my peer_ni
1050 * wants a different protocol than the one I asked for.
1052 LASSERT(list_empty(&peer_ni->ksnp_conns));
1054 peer_ni->ksnp_proto = conn->ksnc_proto;
1055 peer_ni->ksnp_incarnation = incarnation;
1058 if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1059 peer_ni->ksnp_incarnation != incarnation) {
1060 /* peer_ni rebooted or I've got the wrong protocol version */
1061 ksocknal_close_peer_conns_locked(peer_ni, NULL, 0);
1063 peer_ni->ksnp_proto = NULL;
1065 warn = peer_ni->ksnp_incarnation != incarnation ?
1066 "peer_ni rebooted" :
1067 "wrong proto version";
1077 warn = "lost conn race";
1080 warn = "retry with different protocol version";
1084 /* Refuse to duplicate an existing connection, unless this is a
1085 * loopback connection */
1086 if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
1087 (struct sockaddr *)&conn->ksnc_myaddr)) {
1088 list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
1090 (struct sockaddr *)&conn2->ksnc_peeraddr,
1091 (struct sockaddr *)&conn->ksnc_peeraddr) ||
1093 (struct sockaddr *)&conn2->ksnc_myaddr,
1094 (struct sockaddr *)&conn->ksnc_myaddr) ||
1095 conn2->ksnc_type != conn->ksnc_type)
1099 /* If max conns per type is not registered in conn_cb
1100 * as ksnr_max_conns, use ni's conns_per_peer
1102 if ((peer_ni->ksnp_conn_cb &&
1103 num_dup < peer_ni->ksnp_conn_cb->ksnr_max_conns) ||
1104 (!peer_ni->ksnp_conn_cb &&
1105 num_dup < ksocknal_get_conns_per_peer(peer_ni)))
1108 /* Reply on a passive connection attempt so the peer_ni
1109 * realises we're connected.
1119 /* If the connection created by this route didn't bind to the IP
1120 * address the route connected to, the connection/route matching
1121 * code below probably isn't going to work.
1124 !rpc_cmp_addr((struct sockaddr *)&conn_cb->ksnr_addr,
1125 (struct sockaddr *)&conn->ksnc_peeraddr)) {
1126 CERROR("Route %s %pISc connected to %pISc\n",
1127 libcfs_idstr(&peer_ni->ksnp_id),
1128 &conn_cb->ksnr_addr,
1129 &conn->ksnc_peeraddr);
1132 /* Search for a conn_cb corresponding to the new connection and
1133 * create an association. This allows incoming connections created
1134 * by conn_cbs in my peer_ni to match my own conn_cb entries so I don't
1135 * continually create duplicate conn_cbs.
1137 conn_cb = peer_ni->ksnp_conn_cb;
1139 if (conn_cb && rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
1140 (struct sockaddr *)&conn_cb->ksnr_addr))
1141 ksocknal_associate_cb_conn_locked(conn_cb, conn);
1143 conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
1144 peer_ni->ksnp_last_alive = ktime_get_seconds();
1145 peer_ni->ksnp_send_keepalive = 0;
1146 peer_ni->ksnp_error = 0;
1148 sched = ksocknal_choose_scheduler_locked(cpt);
1150 CERROR("no schedulers available. node is unhealthy\n");
1154 * The cpt might have changed if we ended up selecting a non cpt
1155 * native scheduler. So use the scheduler's cpt instead.
1157 cpt = sched->kss_cpt;
1158 sched->kss_nconns++;
1159 conn->ksnc_scheduler = sched;
1161 conn->ksnc_tx_last_post = ktime_get_seconds();
1162 /* Set the deadline for the outgoing HELLO to drain */
1163 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1164 conn->ksnc_tx_deadline = ktime_get_seconds() +
1166 smp_mb(); /* order with adding to peer_ni's conn list */
1168 list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1169 ksocknal_conn_addref(conn);
1171 ksocknal_new_packet(conn, 0);
1173 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1175 /* Take packets blocking for this connection. */
1176 list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1177 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1181 list_del(&tx->tx_list);
1182 ksocknal_queue_tx_locked(tx, conn);
1185 write_unlock_bh(global_lock);
1186 /* We've now got a new connection. Any errors from here on are just
1187 * like "normal" comms errors and we close the connection normally.
1188 * NB (a) we still have to send the reply HELLO for passive
1190 * (b) normal I/O on the conn is blocked until I setup and call the
1194 CDEBUG(D_NET, "New conn %s p %d.x %pISc -> %pIScp"
1195 " incarnation:%lld sched[%d]\n",
1196 libcfs_idstr(&peerid), conn->ksnc_proto->pro_version,
1197 &conn->ksnc_myaddr, &conn->ksnc_peeraddr,
1201 hello->kshm_nips = 0;
1202 rc = ksocknal_send_hello(ni, conn, &peerid.nid, hello);
1205 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1206 kshm_ips[LNET_INTERFACES_NUM]));
1208 /* setup the socket AFTER I've received hello (it disables
1209 * SO_LINGER). I might call back to the acceptor who may want
1210 * to send a protocol version response and then close the
1211 * socket; this ensures the socket only tears down after the
1212 * response has been sent.
1215 rc = ksocknal_lib_setup_sock(sock);
1217 write_lock_bh(global_lock);
1219 /* NB my callbacks block while I hold ksnd_global_lock */
1220 ksocknal_lib_set_callback(sock, conn);
1223 peer_ni->ksnp_accepting--;
1225 write_unlock_bh(global_lock);
1228 write_lock_bh(global_lock);
1229 if (!conn->ksnc_closing) {
1230 /* could be closed by another thread */
1231 ksocknal_close_conn_locked(conn, rc);
1233 write_unlock_bh(global_lock);
1234 } else if (ksocknal_connsock_addref(conn) == 0) {
1235 /* Allow I/O to proceed. */
1236 ksocknal_read_callback(conn);
1237 ksocknal_write_callback(conn);
1238 ksocknal_connsock_decref(conn);
1241 ksocknal_connsock_decref(conn);
1242 ksocknal_conn_decref(conn);
1247 if (!peer_ni->ksnp_closing &&
1248 list_empty(&peer_ni->ksnp_conns) &&
1249 peer_ni->ksnp_conn_cb == NULL) {
1250 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
1251 ksocknal_unlink_peer_locked(peer_ni);
1254 write_unlock_bh(global_lock);
1258 CERROR("Not creating conn %s type %d: %s\n",
1259 libcfs_idstr(&peerid), conn->ksnc_type, warn);
1261 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1262 libcfs_idstr(&peerid), conn->ksnc_type, warn);
1267 /* Request retry by replying with CONN_NONE
1268 * ksnc_proto has been set already
1270 conn->ksnc_type = SOCKLND_CONN_NONE;
1271 hello->kshm_nips = 0;
1272 ksocknal_send_hello(ni, conn, &peerid.nid, hello);
1275 write_lock_bh(global_lock);
1276 peer_ni->ksnp_accepting--;
1277 write_unlock_bh(global_lock);
1281 * If we get here without an error code, just use -EALREADY.
1282 * Depending on how we got here, the error may be positive
1283 * or negative. Normalize the value for ksocknal_txlist_done().
1285 rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1286 ksocknal_txlist_done(ni, &zombies, rc2);
1287 ksocknal_peer_decref(peer_ni);
1291 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1292 kshm_ips[LNET_INTERFACES_NUM]));
1294 LIBCFS_FREE(conn, sizeof(*conn));
1303 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1305 /* This just does the immmediate housekeeping, and queues the
1306 * connection for the reaper to terminate.
1307 * Caller holds ksnd_global_lock exclusively in irq context */
1308 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1309 struct ksock_conn_cb *conn_cb;
1310 struct ksock_conn *conn2;
1312 int duplicate_count = 0;
1314 LASSERT(peer_ni->ksnp_error == 0);
1315 LASSERT(!conn->ksnc_closing);
1316 conn->ksnc_closing = 1;
1318 /* ksnd_deathrow_conns takes over peer_ni's ref */
1319 list_del(&conn->ksnc_list);
1321 conn_cb = conn->ksnc_conn_cb;
1322 if (conn_cb != NULL) {
1323 /* dissociate conn from cb... */
1324 LASSERT(!conn_cb->ksnr_deleted);
1326 conn_count = ksocknal_get_conn_count_by_type(conn_cb,
1328 /* connected bit is set only if all connections
1329 * of the given type got created
1331 if (conn_count == conn_cb->ksnr_max_conns)
1332 LASSERT((conn_cb->ksnr_connected &
1333 BIT(conn->ksnc_type)) != 0);
1335 if (conn_count == 1) {
1336 list_for_each_entry(conn2, &peer_ni->ksnp_conns,
1338 if (conn2->ksnc_conn_cb == conn_cb &&
1339 conn2->ksnc_type == conn->ksnc_type)
1340 duplicate_count += 1;
1342 if (duplicate_count > 0)
1343 CERROR("Found %d duplicate conns type %d\n",
1347 ksocknal_decr_conn_count(conn_cb, conn->ksnc_type);
1349 conn->ksnc_conn_cb = NULL;
1351 /* drop conn's ref on conn_cb */
1352 ksocknal_conn_cb_decref(conn_cb);
1355 if (list_empty(&peer_ni->ksnp_conns)) {
1356 /* No more connections to this peer_ni */
1358 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1359 struct ksock_tx *tx;
1361 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1363 /* throw them to the last connection...,
1364 * these TXs will be send to /dev/null by scheduler */
1365 list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1367 ksocknal_tx_prep(conn, tx);
1369 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1370 list_splice_init(&peer_ni->ksnp_tx_queue,
1371 &conn->ksnc_tx_queue);
1372 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1375 /* renegotiate protocol version */
1376 peer_ni->ksnp_proto = NULL;
1377 /* stash last conn close reason */
1378 peer_ni->ksnp_error = error;
1380 if (peer_ni->ksnp_conn_cb == NULL) {
1381 /* I've just closed last conn belonging to a
1382 * peer_ni with no connections to it
1384 ksocknal_unlink_peer_locked(peer_ni);
1388 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1390 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
1391 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1393 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1397 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1399 bool notify = false;
1400 time64_t last_alive = 0;
1402 /* There has been a connection failure or comms error; but I'll only
1403 * tell LNET I think the peer_ni is dead if it's to another kernel and
1404 * there are no connections or connection attempts in existence. */
1406 read_lock(&ksocknal_data.ksnd_global_lock);
1408 if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1409 list_empty(&peer_ni->ksnp_conns) &&
1410 peer_ni->ksnp_accepting == 0 &&
1411 !ksocknal_find_connecting_conn_cb_locked(peer_ni)) {
1413 last_alive = peer_ni->ksnp_last_alive;
1416 read_unlock(&ksocknal_data.ksnd_global_lock);
1419 lnet_notify(peer_ni->ksnp_ni,
1420 &peer_ni->ksnp_id.nid,
1421 false, false, last_alive);
1425 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1427 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1428 struct ksock_tx *tx;
1429 struct ksock_tx *tmp;
1432 /* NB safe to finalize TXs because closing of socket will
1433 * abort all buffered data */
1434 LASSERT(conn->ksnc_sock == NULL);
1436 spin_lock(&peer_ni->ksnp_lock);
1438 list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list,
1440 if (tx->tx_conn != conn)
1443 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1445 tx->tx_msg.ksm_zc_cookies[0] = 0;
1446 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1447 list_move(&tx->tx_zc_list, &zlist);
1450 spin_unlock(&peer_ni->ksnp_lock);
1452 while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
1453 tx_zc_list)) != NULL) {
1454 list_del(&tx->tx_zc_list);
1455 ksocknal_tx_decref(tx);
1460 ksocknal_terminate_conn(struct ksock_conn *conn)
1462 /* This gets called by the reaper (guaranteed thread context) to
1463 * disengage the socket from its callbacks and close it.
1464 * ksnc_refcount will eventually hit zero, and then the reaper will
1467 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1468 struct ksock_sched *sched = conn->ksnc_scheduler;
1469 bool failed = false;
1471 LASSERT(conn->ksnc_closing);
1473 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1474 spin_lock_bh(&sched->kss_lock);
1476 /* a closing conn is always ready to tx */
1477 conn->ksnc_tx_ready = 1;
1479 if (!conn->ksnc_tx_scheduled &&
1480 !list_empty(&conn->ksnc_tx_queue)) {
1481 list_add_tail(&conn->ksnc_tx_list,
1482 &sched->kss_tx_conns);
1483 conn->ksnc_tx_scheduled = 1;
1484 /* extra ref for scheduler */
1485 ksocknal_conn_addref(conn);
1487 wake_up(&sched->kss_waitq);
1490 spin_unlock_bh(&sched->kss_lock);
1492 /* serialise with callbacks */
1493 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1495 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1497 /* OK, so this conn may not be completely disengaged from its
1498 * scheduler yet, but it _has_ committed to terminate...
1500 conn->ksnc_scheduler->kss_nconns--;
1502 if (peer_ni->ksnp_error != 0) {
1503 /* peer_ni's last conn closed in error */
1504 LASSERT(list_empty(&peer_ni->ksnp_conns));
1506 peer_ni->ksnp_error = 0; /* avoid multiple notifications */
1509 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1512 ksocknal_peer_failed(peer_ni);
1514 /* The socket is closed on the final put; either here, or in
1515 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1516 * when the connection was established, this will close the socket
1517 * immediately, aborting anything buffered in it. Any hung
1518 * zero-copy transmits will therefore complete in finite time.
1520 ksocknal_connsock_decref(conn);
1524 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1526 /* Queue the conn for the reaper to destroy */
1527 LASSERT(refcount_read(&conn->ksnc_conn_refcount) == 0);
1528 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1530 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1531 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1533 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1537 ksocknal_destroy_conn(struct ksock_conn *conn)
1541 /* Final coup-de-grace of the reaper */
1542 CDEBUG(D_NET, "connection %p\n", conn);
1544 LASSERT(refcount_read(&conn->ksnc_conn_refcount) == 0);
1545 LASSERT(refcount_read(&conn->ksnc_sock_refcount) == 0);
1546 LASSERT(conn->ksnc_sock == NULL);
1547 LASSERT(conn->ksnc_conn_cb == NULL);
1548 LASSERT(!conn->ksnc_tx_scheduled);
1549 LASSERT(!conn->ksnc_rx_scheduled);
1550 LASSERT(list_empty(&conn->ksnc_tx_queue));
1552 /* complete current receive if any */
1553 switch (conn->ksnc_rx_state) {
1554 case SOCKNAL_RX_LNET_PAYLOAD:
1555 last_rcv = conn->ksnc_rx_deadline -
1557 CERROR("Completing partial receive from %s[%d], ip %pIScp, with error, wanted: %d, left: %d, last alive is %lld secs ago\n",
1558 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1560 &conn->ksnc_peeraddr,
1561 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1562 ktime_get_seconds() - last_rcv);
1563 if (conn->ksnc_lnet_msg)
1564 conn->ksnc_lnet_msg->msg_health_status =
1565 LNET_MSG_STATUS_REMOTE_ERROR;
1566 lnet_finalize(conn->ksnc_lnet_msg, -EIO);
1568 case SOCKNAL_RX_LNET_HEADER:
1569 if (conn->ksnc_rx_started)
1570 CERROR("Incomplete receive of lnet header from %s, ip %pIScp, with error, protocol: %d.x.\n",
1571 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1572 &conn->ksnc_peeraddr,
1573 conn->ksnc_proto->pro_version);
1575 case SOCKNAL_RX_KSM_HEADER:
1576 if (conn->ksnc_rx_started)
1577 CERROR("Incomplete receive of ksock message from %s, ip %pIScp, with error, protocol: %d.x.\n",
1578 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1579 &conn->ksnc_peeraddr,
1580 conn->ksnc_proto->pro_version);
1582 case SOCKNAL_RX_SLOP:
1583 if (conn->ksnc_rx_started)
1584 CERROR("Incomplete receive of slops from %s, ip %pIScp, with error\n",
1585 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1586 &conn->ksnc_peeraddr);
1593 ksocknal_peer_decref(conn->ksnc_peer);
1595 LIBCFS_FREE(conn, sizeof(*conn));
1599 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni,
1600 struct sockaddr *addr, int why)
1602 struct ksock_conn *conn;
1603 struct ksock_conn *cnxt;
1606 list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns, ksnc_list) {
1609 (struct sockaddr *)&conn->ksnc_peeraddr)) {
1611 ksocknal_close_conn_locked(conn, why);
1619 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1621 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1624 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1626 count = ksocknal_close_peer_conns_locked(
1627 peer_ni, (struct sockaddr *)&conn->ksnc_peeraddr, why);
1629 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1635 ksocknal_close_matching_conns(struct lnet_processid *id, __u32 ipaddr)
1637 struct ksock_peer_ni *peer_ni;
1638 struct hlist_node *pnxt;
1643 struct sockaddr_in sa = {.sin_family = AF_INET};
1645 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1647 if (!LNET_NID_IS_ANY(&id->nid)) {
1648 lo = hash_min(nidhash(&id->nid),
1649 HASH_BITS(ksocknal_data.ksnd_peers));
1653 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1656 sa.sin_addr.s_addr = htonl(ipaddr);
1657 for (i = lo; i <= hi; i++) {
1658 hlist_for_each_entry_safe(peer_ni, pnxt,
1659 &ksocknal_data.ksnd_peers[i],
1662 if (!((LNET_NID_IS_ANY(&id->nid) ||
1663 nid_same(&id->nid, &peer_ni->ksnp_id.nid)) &&
1664 (id->pid == LNET_PID_ANY ||
1665 id->pid == peer_ni->ksnp_id.pid)))
1668 count += ksocknal_close_peer_conns_locked(
1670 ipaddr ? (struct sockaddr *)&sa : NULL, 0);
1674 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1676 /* wildcards always succeed */
1677 if (LNET_NID_IS_ANY(&id->nid) || id->pid == LNET_PID_ANY ||
1681 return (count == 0 ? -ENOENT : 0);
1685 ksocknal_notify_gw_down(struct lnet_nid *gw_nid)
1687 /* The router is telling me she's been notified of a change in
1690 struct lnet_processid id = {
1691 .pid = LNET_PID_ANY,
1695 CDEBUG(D_NET, "gw %s down\n", libcfs_nidstr(gw_nid));
1697 /* If the gateway crashed, close all open connections... */
1698 ksocknal_close_matching_conns(&id, 0);
1701 /* We can only establish new connections
1702 * if we have autroutes, and these connect on demand.
1707 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1711 struct ksock_conn *conn;
1713 for (index = 0; ; index++) {
1714 read_lock(&ksocknal_data.ksnd_global_lock);
1719 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
1721 ksocknal_conn_addref(conn);
1726 read_unlock(&ksocknal_data.ksnd_global_lock);
1731 ksocknal_lib_push_conn (conn);
1732 ksocknal_conn_decref(conn);
1737 ksocknal_push(struct lnet_ni *ni, struct lnet_processid *id)
1744 if (!LNET_NID_IS_ANY(&id->nid)) {
1745 lo = hash_min(nidhash(&id->nid),
1746 HASH_BITS(ksocknal_data.ksnd_peers));
1750 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1753 for (bkt = lo; bkt <= hi; bkt++) {
1754 int peer_off; /* searching offset in peer_ni hash table */
1756 for (peer_off = 0; ; peer_off++) {
1757 struct ksock_peer_ni *peer_ni;
1760 read_lock(&ksocknal_data.ksnd_global_lock);
1761 hlist_for_each_entry(peer_ni,
1762 &ksocknal_data.ksnd_peers[bkt],
1764 if (!((LNET_NID_IS_ANY(&id->nid) ||
1766 &peer_ni->ksnp_id.nid)) &&
1767 (id->pid == LNET_PID_ANY ||
1768 id->pid == peer_ni->ksnp_id.pid)))
1771 if (i++ == peer_off) {
1772 ksocknal_peer_addref(peer_ni);
1776 read_unlock(&ksocknal_data.ksnd_global_lock);
1778 if (i <= peer_off) /* no match */
1782 ksocknal_push_peer(peer_ni);
1783 ksocknal_peer_decref(peer_ni);
1790 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1792 struct lnet_processid id = {};
1793 struct libcfs_ioctl_data *data = arg;
1797 case IOC_LIBCFS_GET_INTERFACE: {
1798 struct ksock_net *net = ni->ni_data;
1799 struct ksock_interface *iface;
1800 struct sockaddr_in *sa;
1802 read_lock(&ksocknal_data.ksnd_global_lock);
1804 if (data->ioc_count >= 1) {
1808 iface = &net->ksnn_interface;
1810 sa = (void *)&iface->ksni_addr;
1811 if (sa->sin_family == AF_INET) {
1812 data->ioc_u32[0] = ntohl(sa->sin_addr.s_addr);
1813 data->ioc_u32[1] = iface->ksni_netmask;
1815 data->ioc_u32[0] = 0xFFFFFFFF;
1816 data->ioc_u32[1] = 0;
1818 data->ioc_u32[2] = iface->ksni_npeers;
1819 data->ioc_u32[3] = iface->ksni_nroutes;
1822 read_unlock(&ksocknal_data.ksnd_global_lock);
1826 case IOC_LIBCFS_GET_PEER: {
1831 int share_count = 0;
1833 rc = ksocknal_get_peer_info(ni, data->ioc_count,
1834 &id, &myip, &ip, &port,
1835 &conn_count, &share_count);
1839 if (!nid_is_nid4(&id.nid))
1841 data->ioc_nid = lnet_nid_to_nid4(&id.nid);
1842 data->ioc_count = share_count;
1843 data->ioc_u32[0] = ip;
1844 data->ioc_u32[1] = port;
1845 data->ioc_u32[2] = myip;
1846 data->ioc_u32[3] = conn_count;
1847 data->ioc_u32[4] = id.pid;
1851 case IOC_LIBCFS_ADD_PEER: {
1852 struct sockaddr_in sa = {.sin_family = AF_INET};
1854 id.pid = LNET_PID_LUSTRE;
1855 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1856 sa.sin_addr.s_addr = htonl(data->ioc_u32[0]);
1857 sa.sin_port = htons(data->ioc_u32[1]);
1858 return ksocknal_add_peer(ni, &id, (struct sockaddr *)&sa);
1860 case IOC_LIBCFS_DEL_PEER:
1861 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1862 id.pid = LNET_PID_ANY;
1863 return ksocknal_del_peer(ni, &id);
1865 case IOC_LIBCFS_GET_CONN: {
1869 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
1870 struct sockaddr_in *psa = (void *)&conn->ksnc_peeraddr;
1871 struct sockaddr_in *mysa = (void *)&conn->ksnc_myaddr;
1876 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
1878 data->ioc_count = txmem;
1879 data->ioc_nid = lnet_nid_to_nid4(&conn->ksnc_peer->ksnp_id.nid);
1880 data->ioc_flags = nagle;
1881 if (psa->sin_family == AF_INET)
1882 data->ioc_u32[0] = ntohl(psa->sin_addr.s_addr);
1884 data->ioc_u32[0] = 0xFFFFFFFF;
1885 data->ioc_u32[1] = rpc_get_port((struct sockaddr *)
1886 &conn->ksnc_peeraddr);
1887 if (mysa->sin_family == AF_INET)
1888 data->ioc_u32[2] = ntohl(mysa->sin_addr.s_addr);
1890 data->ioc_u32[2] = 0xFFFFFFFF;
1891 data->ioc_u32[3] = conn->ksnc_type;
1892 data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
1893 data->ioc_u32[5] = rxmem;
1894 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
1895 ksocknal_conn_decref(conn);
1899 case IOC_LIBCFS_CLOSE_CONNECTION:
1900 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1901 id.pid = LNET_PID_ANY;
1902 return ksocknal_close_matching_conns(&id,
1905 case IOC_LIBCFS_REGISTER_MYNID:
1906 /* Ignore if this is a noop */
1907 if (nid_is_nid4(&ni->ni_nid) &&
1908 data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid))
1911 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1912 libcfs_nid2str(data->ioc_nid),
1913 libcfs_nidstr(&ni->ni_nid));
1916 case IOC_LIBCFS_PUSH_CONNECTION:
1917 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1918 id.pid = LNET_PID_ANY;
1919 return ksocknal_push(ni, &id);
1928 ksocknal_free_buffers (void)
1930 LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
1932 if (ksocknal_data.ksnd_schedulers != NULL)
1933 cfs_percpt_free(ksocknal_data.ksnd_schedulers);
1935 spin_lock(&ksocknal_data.ksnd_tx_lock);
1937 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
1939 struct ksock_tx *tx;
1941 list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist);
1942 spin_unlock(&ksocknal_data.ksnd_tx_lock);
1944 while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
1945 tx_list)) != NULL) {
1946 list_del(&tx->tx_list);
1947 LIBCFS_FREE(tx, tx->tx_desc_size);
1950 spin_unlock(&ksocknal_data.ksnd_tx_lock);
1954 static int ksocknal_get_link_status(struct net_device *dev)
1960 if (!netif_running(dev)) {
1962 CDEBUG(D_NET, "device not running\n");
1964 /* Some devices may not be providing link settings */
1965 else if (dev->ethtool_ops->get_link) {
1966 ret = dev->ethtool_ops->get_link(dev);
1967 CDEBUG(D_NET, "get_link returns %u\n", ret);
1974 ksocknal_handle_link_state_change(struct net_device *dev,
1975 unsigned char operstate)
1977 struct lnet_ni *ni = NULL;
1978 struct ksock_net *net;
1979 struct ksock_net *cnxt;
1981 unsigned char link_down = !(operstate == IF_OPER_UP);
1982 struct in_device *in_dev;
1983 bool found_ip = false;
1984 struct ksock_interface *ksi = NULL;
1985 struct sockaddr_in *sa;
1986 __u32 ni_state_before;
1987 bool update_ping_buf = false;
1988 DECLARE_CONST_IN_IFADDR(ifa);
1990 ifindex = dev->ifindex;
1992 if (!ksocknal_data.ksnd_nnets)
1995 list_for_each_entry_safe(net, cnxt, &ksocknal_data.ksnd_nets,
1998 ksi = &net->ksnn_interface;
1999 sa = (void *)&ksi->ksni_addr;
2002 if (strcmp(ksi->ksni_name, dev->name))
2005 if (ksi->ksni_index == -1) {
2006 if (dev->reg_state != NETREG_REGISTERED)
2008 /* A registration just happened: save the new index for
2010 ksi->ksni_index = ifindex;
2014 if (ksi->ksni_index != ifindex)
2017 if (dev->reg_state == NETREG_UNREGISTERING) {
2018 /* Device is being unregitering, we need to clear the
2019 * index, it can change when device will be back */
2020 ksi->ksni_index = -1;
2026 in_dev = __in_dev_get_rtnl(dev);
2028 CDEBUG(D_NET, "Interface %s has no IPv4 status.\n",
2030 CDEBUG(D_NET, "set link fatal state to 1\n");
2031 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
2035 in_dev_for_each_ifa_rtnl(ifa, in_dev) {
2036 if (sa->sin_addr.s_addr == ifa->ifa_local)
2042 CDEBUG(D_NET, "Interface %s has no matching ip\n",
2044 CDEBUG(D_NET, "set link fatal state to 1\n");
2045 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
2051 CDEBUG(D_NET, "set link fatal state to 1\n");
2052 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
2055 CDEBUG(D_NET, "set link fatal state to %u\n",
2056 (ksocknal_get_link_status(dev) == 0));
2057 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
2058 (ksocknal_get_link_status(dev) == 0));
2061 if (!update_ping_buf &&
2062 (atomic_read(&ni->ni_fatal_error_on) != ni_state_before))
2063 update_ping_buf = true;
2066 if (update_ping_buf)
2067 lnet_update_ping_buffer();
2074 ksocknal_handle_inetaddr_change(struct in_ifaddr *ifa, unsigned long event)
2077 struct ksock_net *net;
2078 struct ksock_net *cnxt;
2079 struct net_device *event_netdev = ifa->ifa_dev->dev;
2081 struct ksock_interface *ksi = NULL;
2082 struct sockaddr_in *sa;
2083 __u32 ni_state_before;
2084 bool update_ping_buf = false;
2086 if (!ksocknal_data.ksnd_nnets)
2089 ifindex = event_netdev->ifindex;
2091 list_for_each_entry_safe(net, cnxt, &ksocknal_data.ksnd_nets,
2094 ksi = &net->ksnn_interface;
2095 sa = (void *)&ksi->ksni_addr;
2097 if (ksi->ksni_index != ifindex ||
2098 strcmp(ksi->ksni_name, event_netdev->name))
2101 if (sa->sin_addr.s_addr == ifa->ifa_local) {
2102 CDEBUG(D_NET, "set link fatal state to %u\n",
2103 (event == NETDEV_DOWN));
2105 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
2106 (event == NETDEV_DOWN));
2107 if (!update_ping_buf &&
2108 ((event == NETDEV_DOWN) != ni_state_before))
2109 update_ping_buf = true;
2113 if (update_ping_buf)
2114 lnet_update_ping_buffer();
2119 /************************************
2120 * Net device notifier event handler
2121 ************************************/
2122 static int ksocknal_device_event(struct notifier_block *unused,
2123 unsigned long event, void *ptr)
2125 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2126 unsigned char operstate;
2128 operstate = dev->operstate;
2130 CDEBUG(D_NET, "devevent: status=%ld, iface=%s ifindex %d state %u\n",
2131 event, dev->name, dev->ifindex, operstate);
2137 case NETDEV_REGISTER:
2138 case NETDEV_UNREGISTER:
2139 ksocknal_handle_link_state_change(dev, operstate);
2146 /************************************
2147 * Inetaddr notifier event handler
2148 ************************************/
2149 static int ksocknal_inetaddr_event(struct notifier_block *unused,
2150 unsigned long event, void *ptr)
2152 struct in_ifaddr *ifa = ptr;
2154 CDEBUG(D_NET, "addrevent: status %ld ip addr %pI4, netmask %pI4.\n",
2155 event, &ifa->ifa_address, &ifa->ifa_mask);
2161 ksocknal_handle_inetaddr_change(ifa, event);
2168 static struct notifier_block ksocknal_dev_notifier_block = {
2169 .notifier_call = ksocknal_device_event,
2172 static struct notifier_block ksocknal_inetaddr_notifier_block = {
2173 .notifier_call = ksocknal_inetaddr_event,
2177 ksocknal_base_shutdown(void)
2179 struct ksock_sched *sched;
2180 struct ksock_peer_ni *peer_ni;
2183 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %lld\n",
2184 libcfs_kmem_read());
2185 LASSERT (ksocknal_data.ksnd_nnets == 0);
2187 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL) {
2188 unregister_netdevice_notifier(&ksocknal_dev_notifier_block);
2189 unregister_inetaddr_notifier(&ksocknal_inetaddr_notifier_block);
2192 switch (ksocknal_data.ksnd_init) {
2197 case SOCKNAL_INIT_ALL:
2198 case SOCKNAL_INIT_DATA:
2199 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list)
2202 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2203 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2204 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2205 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2206 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2208 if (ksocknal_data.ksnd_schedulers != NULL) {
2209 cfs_percpt_for_each(sched, i,
2210 ksocknal_data.ksnd_schedulers) {
2212 LASSERT(list_empty(&sched->kss_tx_conns));
2213 LASSERT(list_empty(&sched->kss_rx_conns));
2214 LASSERT(list_empty(&sched->kss_zombie_noop_txs));
2215 LASSERT(sched->kss_nconns == 0);
2219 /* flag threads to terminate; wake and wait for them to die */
2220 ksocknal_data.ksnd_shuttingdown = 1;
2221 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2222 wake_up(&ksocknal_data.ksnd_reaper_waitq);
2224 if (ksocknal_data.ksnd_schedulers != NULL) {
2225 cfs_percpt_for_each(sched, i,
2226 ksocknal_data.ksnd_schedulers)
2227 wake_up_all(&sched->kss_waitq);
2230 wait_var_event_warning(&ksocknal_data.ksnd_nthreads,
2231 atomic_read(&ksocknal_data.ksnd_nthreads) == 0,
2232 "waiting for %d threads to terminate\n",
2233 atomic_read(&ksocknal_data.ksnd_nthreads));
2235 ksocknal_free_buffers();
2237 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2241 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2242 libcfs_kmem_read());
2244 module_put(THIS_MODULE);
2248 ksocknal_base_startup(void)
2250 struct ksock_sched *sched;
2254 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2255 LASSERT(ksocknal_data.ksnd_nnets == 0);
2257 memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
2259 hash_init(ksocknal_data.ksnd_peers);
2261 rwlock_init(&ksocknal_data.ksnd_global_lock);
2262 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2264 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2265 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2266 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2267 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2268 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2270 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2271 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2272 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2273 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2275 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2276 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2278 /* NB memset above zeros whole of ksocknal_data */
2280 /* flag lists/ptrs/locks initialised */
2281 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2282 if (!try_module_get(THIS_MODULE))
2285 /* Create a scheduler block per available CPT */
2286 ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
2288 if (ksocknal_data.ksnd_schedulers == NULL)
2291 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
2295 * make sure not to allocate more threads than there are
2296 * cores/CPUs in teh CPT
2298 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2299 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2300 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2303 * max to half of CPUs, assume another half should be
2304 * reserved for upper layer modules
2306 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2309 sched->kss_nthreads_max = nthrs;
2312 spin_lock_init(&sched->kss_lock);
2313 INIT_LIST_HEAD(&sched->kss_rx_conns);
2314 INIT_LIST_HEAD(&sched->kss_tx_conns);
2315 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2316 init_waitqueue_head(&sched->kss_waitq);
2319 ksocknal_data.ksnd_connd_starting = 0;
2320 ksocknal_data.ksnd_connd_failed_stamp = 0;
2321 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2322 /* must have at least 2 connds to remain responsive to accepts while
2324 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2325 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2327 if (*ksocknal_tunables.ksnd_nconnds_max <
2328 *ksocknal_tunables.ksnd_nconnds) {
2329 ksocknal_tunables.ksnd_nconnds_max =
2330 ksocknal_tunables.ksnd_nconnds;
2333 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2334 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2335 ksocknal_data.ksnd_connd_starting++;
2336 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2338 rc = ksocknal_thread_start(ksocknal_connd,
2339 (void *)((uintptr_t)i),
2340 "socknal_cd%02d", i);
2342 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2343 ksocknal_data.ksnd_connd_starting--;
2344 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2345 CERROR("Can't spawn socknal connd: %d\n", rc);
2350 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2352 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2356 register_netdevice_notifier(&ksocknal_dev_notifier_block);
2357 register_inetaddr_notifier(&ksocknal_inetaddr_notifier_block);
2359 /* flag everything initialised */
2360 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2365 ksocknal_base_shutdown();
2370 ksocknal_debug_peerhash(struct lnet_ni *ni)
2372 struct ksock_peer_ni *peer_ni;
2375 read_lock(&ksocknal_data.ksnd_global_lock);
2377 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
2378 struct ksock_conn_cb *conn_cb;
2379 struct ksock_conn *conn;
2381 if (peer_ni->ksnp_ni != ni)
2384 CWARN("Active peer_ni on shutdown: %s, ref %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
2385 libcfs_idstr(&peer_ni->ksnp_id),
2386 refcount_read(&peer_ni->ksnp_refcount),
2387 peer_ni->ksnp_closing,
2388 peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2389 peer_ni->ksnp_zc_next_cookie,
2390 !list_empty(&peer_ni->ksnp_tx_queue),
2391 !list_empty(&peer_ni->ksnp_zc_req_list));
2393 conn_cb = peer_ni->ksnp_conn_cb;
2395 CWARN("ConnCB: ref %d, schd %d, conn %d, cnted %d, del %d\n",
2396 refcount_read(&conn_cb->ksnr_refcount),
2397 conn_cb->ksnr_scheduled, conn_cb->ksnr_connecting,
2398 conn_cb->ksnr_connected, conn_cb->ksnr_deleted);
2401 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
2402 CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
2403 refcount_read(&conn->ksnc_conn_refcount),
2404 refcount_read(&conn->ksnc_sock_refcount),
2405 conn->ksnc_type, conn->ksnc_closing);
2410 read_unlock(&ksocknal_data.ksnd_global_lock);
2415 ksocknal_shutdown(struct lnet_ni *ni)
2417 struct ksock_net *net = ni->ni_data;
2419 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2420 LASSERT(ksocknal_data.ksnd_nnets > 0);
2422 /* prevent new peers */
2423 atomic_add(SOCKNAL_SHUTDOWN_BIAS, &net->ksnn_npeers);
2425 /* Delete all peers */
2426 ksocknal_del_peer(ni, NULL);
2428 /* Wait for all peer_ni state to clean up */
2429 wait_var_event_warning(&net->ksnn_npeers,
2430 atomic_read(&net->ksnn_npeers) ==
2431 SOCKNAL_SHUTDOWN_BIAS,
2432 "waiting for %d peers to disconnect\n",
2433 ksocknal_debug_peerhash(ni) +
2434 atomic_read(&net->ksnn_npeers) -
2435 SOCKNAL_SHUTDOWN_BIAS);
2437 LASSERT(net->ksnn_interface.ksni_npeers == 0);
2438 LASSERT(net->ksnn_interface.ksni_nroutes == 0);
2440 list_del(&net->ksnn_list);
2441 LIBCFS_FREE(net, sizeof(*net));
2443 ksocknal_data.ksnd_nnets--;
2444 if (ksocknal_data.ksnd_nnets == 0)
2445 ksocknal_base_shutdown();
2449 ksocknal_search_new_ipif(struct ksock_net *net)
2452 char *ifnam = &net->ksnn_interface.ksni_name[0];
2453 char *colon = strchr(ifnam, ':');
2455 struct ksock_net *tmp;
2460 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) {
2461 char *ifnam2 = &tmp->ksnn_interface.ksni_name[0];
2462 char *colon2 = strchr(ifnam2, ':');
2467 found = strcmp(ifnam, ifnam2) == 0;
2480 ksocknal_start_schedulers(struct ksock_sched *sched)
2486 if (sched->kss_nthreads == 0) {
2487 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2488 nthrs = sched->kss_nthreads_max;
2490 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2492 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2493 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2495 nthrs = min(nthrs, sched->kss_nthreads_max);
2497 LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
2498 /* increase two threads if there is new interface */
2499 nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
2502 for (i = 0; i < nthrs; i++) {
2505 id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
2506 rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id,
2507 "socknal_sd%02d_%02d",
2509 (int)KSOCK_THREAD_SID(id));
2513 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2514 sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
2518 sched->kss_nthreads += i;
2523 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2525 int newif = ksocknal_search_new_ipif(net);
2529 if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2532 for (i = 0; i < ncpts; i++) {
2533 struct ksock_sched *sched;
2534 int cpt = (cpts == NULL) ? i : cpts[i];
2536 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2537 sched = ksocknal_data.ksnd_schedulers[cpt];
2539 if (!newif && sched->kss_nthreads > 0)
2542 rc = ksocknal_start_schedulers(sched);
2550 ksocknal_startup(struct lnet_ni *ni)
2552 struct ksock_net *net;
2553 struct ksock_interface *ksi = NULL;
2554 struct lnet_inetdev *ifaces = NULL;
2557 LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2558 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2559 rc = ksocknal_base_startup();
2563 LIBCFS_ALLOC(net, sizeof(*net));
2567 net->ksnn_incarnation = ktime_get_real_ns();
2570 ksocknal_tunables_setup(ni);
2572 rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns, true);
2576 ksi = &net->ksnn_interface;
2578 /* Interface and/or IP address is specified otherwise default to
2579 * the first Interface
2581 if_idx = lnet_inet_select(ni, ifaces, rc);
2585 if (!ni->ni_interface) {
2586 rc = lnet_ni_add_interface(ni, ifaces[if_idx].li_name);
2588 CWARN("ksocklnd failed to allocate ni_interface\n");
2591 ni->ni_dev_cpt = ifaces[if_idx].li_cpt;
2592 ksi->ksni_index = ifaces[if_idx].li_index;
2593 if (ifaces[if_idx].li_size == sizeof(struct in6_addr)) {
2594 struct sockaddr_in6 *sa;
2595 sa = (void *)&ksi->ksni_addr;
2596 memset(sa, 0, sizeof(*sa));
2597 sa->sin6_family = AF_INET6;
2598 memcpy(&sa->sin6_addr, ifaces[if_idx].li_ipv6addr,
2599 sizeof(struct in6_addr));
2600 ni->ni_nid.nid_size = sizeof(struct in6_addr) - 4;
2601 memcpy(&ni->ni_nid.nid_addr, ifaces[if_idx].li_ipv6addr,
2602 sizeof(struct in6_addr));
2604 struct sockaddr_in *sa;
2605 sa = (void *)&ksi->ksni_addr;
2606 memset(sa, 0, sizeof(*sa));
2607 sa->sin_family = AF_INET;
2608 sa->sin_addr.s_addr = ifaces[if_idx].li_ipaddr;
2609 ksi->ksni_netmask = ifaces[if_idx].li_netmask;
2610 ni->ni_nid.nid_size = 0;
2611 ni->ni_nid.nid_addr[0] = sa->sin_addr.s_addr;
2613 strlcpy(ksi->ksni_name, ifaces[if_idx].li_name, sizeof(ksi->ksni_name));
2615 /* call it before add it to ksocknal_data.ksnd_nets */
2616 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2620 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2622 ksocknal_data.ksnd_nnets++;
2627 LIBCFS_FREE(net, sizeof(*net));
2629 if (ksocknal_data.ksnd_nnets == 0)
2630 ksocknal_base_shutdown();
2635 static void __exit ksocklnd_exit(void)
2637 lnet_unregister_lnd(&the_ksocklnd);
2640 static const struct lnet_lnd the_ksocklnd = {
2641 .lnd_type = SOCKLND,
2642 .lnd_startup = ksocknal_startup,
2643 .lnd_shutdown = ksocknal_shutdown,
2644 .lnd_ctl = ksocknal_ctl,
2645 .lnd_send = ksocknal_send,
2646 .lnd_recv = ksocknal_recv,
2647 .lnd_notify_peer_down = ksocknal_notify_gw_down,
2648 .lnd_accept = ksocknal_accept,
2649 .lnd_nl_set = ksocknal_nl_set,
2650 .lnd_keys = &ksocknal_tunables_keys,
2653 static int __init ksocklnd_init(void)
2657 /* check ksnr_connected/connecting field large enough */
2658 BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
2659 BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
2661 rc = ksocknal_tunables_init();
2665 lnet_register_lnd(&the_ksocklnd);
2670 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2671 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2672 MODULE_VERSION("2.8.0");
2673 MODULE_LICENSE("GPL");
2675 module_init(ksocklnd_init);
2676 module_exit(ksocklnd_exit);