4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lnet/klnds/socklnd/socklnd.c
33 * Author: Zach Brown <zab@zabbo.net>
34 * Author: Peter J. Braam <braam@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
36 * Author: Eric Barton <eric@bartonsoftware.com>
39 #include <linux/ethtool.h>
40 #include <linux/inetdevice.h>
41 #include <linux/kernel.h>
42 #include <linux/sunrpc/addr.h>
43 #include <net/addrconf.h>
46 static const struct lnet_lnd the_ksocklnd;
47 struct ksock_nal_data ksocknal_data;
49 static struct ksock_conn_cb *
50 ksocknal_create_conn_cb(struct sockaddr *addr)
52 struct ksock_conn_cb *conn_cb;
54 LIBCFS_ALLOC(conn_cb, sizeof(*conn_cb));
58 refcount_set(&conn_cb->ksnr_refcount, 1);
59 conn_cb->ksnr_peer = NULL;
60 conn_cb->ksnr_retry_interval = 0; /* OK to connect at any time */
61 rpc_copy_addr((struct sockaddr *)&conn_cb->ksnr_addr, addr);
62 rpc_set_port((struct sockaddr *)&conn_cb->ksnr_addr,
64 conn_cb->ksnr_scheduled = 0;
65 conn_cb->ksnr_connecting = 0;
66 conn_cb->ksnr_connected = 0;
67 conn_cb->ksnr_deleted = 0;
68 conn_cb->ksnr_conn_count = 0;
69 conn_cb->ksnr_ctrl_conn_count = 0;
70 conn_cb->ksnr_blki_conn_count = 0;
71 conn_cb->ksnr_blko_conn_count = 0;
72 conn_cb->ksnr_max_conns = 0;
73 conn_cb->ksnr_busy_retry_count = 0;
79 ksocknal_destroy_conn_cb(struct ksock_conn_cb *conn_cb)
81 LASSERT(refcount_read(&conn_cb->ksnr_refcount) == 0);
83 if (conn_cb->ksnr_peer)
84 ksocknal_peer_decref(conn_cb->ksnr_peer);
86 LIBCFS_FREE(conn_cb, sizeof(*conn_cb));
89 static struct ksock_peer_ni *
90 ksocknal_create_peer(struct lnet_ni *ni, struct lnet_processid *id)
92 int cpt = lnet_nid2cpt(&id->nid, ni);
93 struct ksock_net *net = ni->ni_data;
94 struct ksock_peer_ni *peer_ni;
96 LASSERT(!LNET_NID_IS_ANY(&id->nid));
97 LASSERT(id->pid != LNET_PID_ANY);
98 LASSERT(!in_interrupt());
100 if (!atomic_inc_unless_negative(&net->ksnn_npeers)) {
101 CERROR("Can't create peer_ni: network shutdown\n");
102 return ERR_PTR(-ESHUTDOWN);
105 LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
107 atomic_dec(&net->ksnn_npeers);
108 return ERR_PTR(-ENOMEM);
111 peer_ni->ksnp_ni = ni;
112 peer_ni->ksnp_id = *id;
113 refcount_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
114 peer_ni->ksnp_closing = 0;
115 peer_ni->ksnp_accepting = 0;
116 peer_ni->ksnp_proto = NULL;
117 peer_ni->ksnp_last_alive = 0;
118 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
119 peer_ni->ksnp_conn_cb = NULL;
121 INIT_LIST_HEAD(&peer_ni->ksnp_conns);
122 INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
123 INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
124 spin_lock_init(&peer_ni->ksnp_lock);
130 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
132 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
134 CDEBUG (D_NET, "peer_ni %s %p deleted\n",
135 libcfs_idstr(&peer_ni->ksnp_id), peer_ni);
137 LASSERT(refcount_read(&peer_ni->ksnp_refcount) == 0);
138 LASSERT(peer_ni->ksnp_accepting == 0);
139 LASSERT(list_empty(&peer_ni->ksnp_conns));
140 LASSERT(peer_ni->ksnp_conn_cb == NULL);
141 LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
142 LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
144 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
146 /* NB a peer_ni's connections and conn_cb keep a reference on their
147 * peer_ni until they are destroyed, so we can be assured that _all_
148 * state to do with this peer_ni has been cleaned up when its refcount
151 if (atomic_dec_and_test(&net->ksnn_npeers))
152 wake_up_var(&net->ksnn_npeers);
155 struct ksock_peer_ni *
156 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_processid *id)
158 struct ksock_peer_ni *peer_ni;
159 unsigned long hash = nidhash(&id->nid);
161 hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni,
163 LASSERT(!peer_ni->ksnp_closing);
165 if (peer_ni->ksnp_ni != ni)
168 if (!nid_same(&peer_ni->ksnp_id.nid, &id->nid) ||
169 peer_ni->ksnp_id.pid != id->pid)
172 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
173 peer_ni, libcfs_idstr(id),
174 refcount_read(&peer_ni->ksnp_refcount));
180 struct ksock_peer_ni *
181 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_processid *id)
183 struct ksock_peer_ni *peer_ni;
185 read_lock(&ksocknal_data.ksnd_global_lock);
186 peer_ni = ksocknal_find_peer_locked(ni, id);
187 if (peer_ni != NULL) /* +1 ref for caller? */
188 ksocknal_peer_addref(peer_ni);
189 read_unlock(&ksocknal_data.ksnd_global_lock);
195 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
197 LASSERT(list_empty(&peer_ni->ksnp_conns));
198 LASSERT(peer_ni->ksnp_conn_cb == NULL);
199 LASSERT(!peer_ni->ksnp_closing);
200 peer_ni->ksnp_closing = 1;
201 hlist_del(&peer_ni->ksnp_list);
202 /* lose peerlist's ref */
203 ksocknal_peer_decref(peer_ni);
208 ksocknal_dump_peer_debug_info(struct ksock_peer_ni *peer_ni)
210 struct ksock_conn *conn;
211 struct list_head *ctmp;
212 struct list_head *txtmp;
216 list_for_each(ctmp, &peer_ni->ksnp_conns) {
217 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
219 if (!list_empty(&conn->ksnc_tx_queue))
220 list_for_each(txtmp, &conn->ksnc_tx_queue) txcount++;
222 CDEBUG(D_CONSOLE, "Conn %d [type, closing, crefcnt, srefcnt]: %d, %d, %d, %d\n",
226 refcount_read(&conn->ksnc_conn_refcount),
227 refcount_read(&conn->ksnc_sock_refcount));
228 CDEBUG(D_CONSOLE, "Conn %d rx [scheduled, ready, state]: %d, %d, %d\n",
230 conn->ksnc_rx_scheduled,
232 conn->ksnc_rx_state);
233 CDEBUG(D_CONSOLE, "Conn %d tx [txqcnt, scheduled, last_post, ready, deadline]: %d, %d, %lld, %d, %lld\n",
236 conn->ksnc_tx_scheduled,
237 conn->ksnc_tx_last_post,
239 conn->ksnc_rx_deadline);
241 if (conn->ksnc_scheduler)
242 CDEBUG(D_CONSOLE, "Conn %d sched [nconns, cpt]: %d, %d\n",
244 conn->ksnc_scheduler->kss_nconns,
245 conn->ksnc_scheduler->kss_cpt);
253 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
254 struct lnet_processid *id, __u32 *myip, __u32 *peer_ip,
255 int *port, int *conn_count, int *share_count)
257 struct ksock_peer_ni *peer_ni;
258 struct ksock_conn_cb *conn_cb;
261 struct ksock_net *net;
263 read_lock(&ksocknal_data.ksnd_global_lock);
265 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
267 if (peer_ni->ksnp_ni != ni)
272 *id = peer_ni->ksnp_id;
273 conn_cb = peer_ni->ksnp_conn_cb;
274 if (conn_cb == NULL) {
282 ksocknal_dump_peer_debug_info(peer_ni);
284 if (conn_cb->ksnr_addr.ss_family == AF_INET) {
285 struct sockaddr_in *sa =
286 (void *)&conn_cb->ksnr_addr;
288 rc = choose_ipv4_src(myip,
289 net->ksnn_interface.ksni_index,
290 ntohl(sa->sin_addr.s_addr),
292 *peer_ip = ntohl(sa->sin_addr.s_addr);
293 *port = ntohs(sa->sin_port);
297 *peer_ip = 0xFFFFFFFF;
301 *conn_count = conn_cb->ksnr_conn_count;
306 read_unlock(&ksocknal_data.ksnd_global_lock);
311 ksocknal_get_conn_count_by_type(struct ksock_conn_cb *conn_cb,
314 unsigned int count = 0;
317 case SOCKLND_CONN_CONTROL:
318 count = conn_cb->ksnr_ctrl_conn_count;
320 case SOCKLND_CONN_BULK_IN:
321 count = conn_cb->ksnr_blki_conn_count;
323 case SOCKLND_CONN_BULK_OUT:
324 count = conn_cb->ksnr_blko_conn_count;
326 case SOCKLND_CONN_ANY:
327 count = conn_cb->ksnr_conn_count;
338 ksocknal_get_conns_per_peer(struct ksock_peer_ni *peer_ni)
340 struct lnet_ni *ni = peer_ni->ksnp_ni;
341 struct lnet_ioctl_config_socklnd_tunables *tunables;
345 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_sock;
347 return tunables->lnd_conns_per_peer;
351 ksocknal_incr_conn_count(struct ksock_conn_cb *conn_cb,
354 conn_cb->ksnr_conn_count++;
356 /* check if all connections of the given type got created */
358 case SOCKLND_CONN_CONTROL:
359 conn_cb->ksnr_ctrl_conn_count++;
360 /* there's a single control connection per peer,
361 * two in case of loopback
363 conn_cb->ksnr_connected |= BIT(type);
365 case SOCKLND_CONN_BULK_IN:
366 conn_cb->ksnr_blki_conn_count++;
367 if (conn_cb->ksnr_blki_conn_count >= conn_cb->ksnr_max_conns)
368 conn_cb->ksnr_connected |= BIT(type);
370 case SOCKLND_CONN_BULK_OUT:
371 conn_cb->ksnr_blko_conn_count++;
372 if (conn_cb->ksnr_blko_conn_count >= conn_cb->ksnr_max_conns)
373 conn_cb->ksnr_connected |= BIT(type);
375 case SOCKLND_CONN_ANY:
376 if (conn_cb->ksnr_conn_count >= conn_cb->ksnr_max_conns)
377 conn_cb->ksnr_connected |= BIT(type);
384 CDEBUG(D_NET, "Add conn type %d, ksnr_connected %x ksnr_max_conns %d\n",
385 type, conn_cb->ksnr_connected, conn_cb->ksnr_max_conns);
390 ksocknal_decr_conn_count(struct ksock_conn_cb *conn_cb,
393 conn_cb->ksnr_conn_count--;
395 /* check if all connections of the given type got created */
397 case SOCKLND_CONN_CONTROL:
398 conn_cb->ksnr_ctrl_conn_count--;
399 /* there's a single control connection per peer,
400 * two in case of loopback
402 if (conn_cb->ksnr_ctrl_conn_count == 0)
403 conn_cb->ksnr_connected &= ~BIT(type);
405 case SOCKLND_CONN_BULK_IN:
406 conn_cb->ksnr_blki_conn_count--;
407 if (conn_cb->ksnr_blki_conn_count < conn_cb->ksnr_max_conns)
408 conn_cb->ksnr_connected &= ~BIT(type);
410 case SOCKLND_CONN_BULK_OUT:
411 conn_cb->ksnr_blko_conn_count--;
412 if (conn_cb->ksnr_blko_conn_count < conn_cb->ksnr_max_conns)
413 conn_cb->ksnr_connected &= ~BIT(type);
415 case SOCKLND_CONN_ANY:
416 if (conn_cb->ksnr_conn_count < conn_cb->ksnr_max_conns)
417 conn_cb->ksnr_connected &= ~BIT(type);
424 CDEBUG(D_NET, "Del conn type %d, ksnr_connected %x ksnr_max_conns %d\n",
425 type, conn_cb->ksnr_connected, conn_cb->ksnr_max_conns);
429 ksocknal_associate_cb_conn_locked(struct ksock_conn_cb *conn_cb,
430 struct ksock_conn *conn)
432 int type = conn->ksnc_type;
434 conn->ksnc_conn_cb = conn_cb;
435 ksocknal_conn_cb_addref(conn_cb);
436 ksocknal_incr_conn_count(conn_cb, type);
438 /* Successful connection => further attempts can
439 * proceed immediately
441 conn_cb->ksnr_retry_interval = 0;
445 ksocknal_add_conn_cb_locked(struct ksock_peer_ni *peer_ni,
446 struct ksock_conn_cb *conn_cb)
448 struct ksock_conn *conn;
449 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
451 LASSERT(!peer_ni->ksnp_closing);
452 LASSERT(!conn_cb->ksnr_peer);
453 LASSERT(!conn_cb->ksnr_scheduled);
454 LASSERT(!conn_cb->ksnr_connecting);
455 LASSERT(conn_cb->ksnr_connected == 0);
457 conn_cb->ksnr_peer = peer_ni;
458 ksocknal_peer_addref(peer_ni);
460 /* peer_ni's route list takes over my ref on 'route' */
461 peer_ni->ksnp_conn_cb = conn_cb;
462 net->ksnn_interface.ksni_nroutes++;
464 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
465 if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
466 (struct sockaddr *)&conn_cb->ksnr_addr))
468 CDEBUG(D_NET, "call ksocknal_associate_cb_conn_locked\n");
469 ksocknal_associate_cb_conn_locked(conn_cb, conn);
470 /* keep going (typed conns) */
475 ksocknal_del_conn_cb_locked(struct ksock_conn_cb *conn_cb)
477 struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer;
478 struct ksock_conn *conn;
479 struct ksock_conn *cnxt;
480 struct ksock_net *net;
482 LASSERT(!conn_cb->ksnr_deleted);
484 /* Close associated conns */
485 list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns, ksnc_list) {
486 if (conn->ksnc_conn_cb != conn_cb)
489 ksocknal_close_conn_locked(conn, 0);
492 net = (struct ksock_net *)(peer_ni->ksnp_ni->ni_data);
493 net->ksnn_interface.ksni_nroutes--;
494 LASSERT(net->ksnn_interface.ksni_nroutes >= 0);
496 conn_cb->ksnr_deleted = 1;
497 ksocknal_conn_cb_decref(conn_cb); /* drop peer_ni's ref */
498 peer_ni->ksnp_conn_cb = NULL;
500 if (list_empty(&peer_ni->ksnp_conns)) {
501 /* I've just removed the last route to a peer_ni with no active
504 ksocknal_unlink_peer_locked(peer_ni);
509 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_processid *id,
510 struct sockaddr *addr)
512 struct ksock_peer_ni *peer_ni;
513 struct ksock_peer_ni *peer2;
514 struct ksock_conn_cb *conn_cb;
516 if (LNET_NID_IS_ANY(&id->nid) ||
517 id->pid == LNET_PID_ANY)
520 /* Have a brand new peer_ni ready... */
521 peer_ni = ksocknal_create_peer(ni, id);
523 return PTR_ERR(peer_ni);
525 conn_cb = ksocknal_create_conn_cb(addr);
527 ksocknal_peer_decref(peer_ni);
531 write_lock_bh(&ksocknal_data.ksnd_global_lock);
533 /* always called with a ref on ni, so shutdown can't have started */
534 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers)
537 peer2 = ksocknal_find_peer_locked(ni, id);
539 ksocknal_peer_decref(peer_ni);
542 /* peer_ni table takes my ref on peer_ni */
543 hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list,
547 if (peer_ni->ksnp_conn_cb) {
548 ksocknal_conn_cb_decref(conn_cb);
550 ksocknal_add_conn_cb_locked(peer_ni, conn_cb);
551 /* Remember conns_per_peer setting at the time
552 * of connection initiation. It will define the
553 * max number of conns per type for this conn_cb
556 conn_cb->ksnr_max_conns = ksocknal_get_conns_per_peer(peer_ni);
559 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
565 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni)
567 struct ksock_conn *conn;
568 struct ksock_conn *cnxt;
569 struct ksock_conn_cb *conn_cb;
571 LASSERT(!peer_ni->ksnp_closing);
573 /* Extra ref prevents peer_ni disappearing until I'm done with it */
574 ksocknal_peer_addref(peer_ni);
575 conn_cb = peer_ni->ksnp_conn_cb;
577 ksocknal_del_conn_cb_locked(conn_cb);
579 list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns,
581 ksocknal_close_conn_locked(conn, 0);
583 ksocknal_peer_decref(peer_ni);
584 /* NB peer_ni unlinks itself when last conn/conn_cb is removed */
588 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_processid *id)
591 struct hlist_node *pnxt;
592 struct ksock_peer_ni *peer_ni;
598 write_lock_bh(&ksocknal_data.ksnd_global_lock);
600 if (id && !LNET_NID_IS_ANY(&id->nid)) {
601 lo = hash_min(nidhash(&id->nid),
602 HASH_BITS(ksocknal_data.ksnd_peers));
606 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
609 for (i = lo; i <= hi; i++) {
610 hlist_for_each_entry_safe(peer_ni, pnxt,
611 &ksocknal_data.ksnd_peers[i],
613 if (peer_ni->ksnp_ni != ni)
616 if (!((!id || LNET_NID_IS_ANY(&id->nid) ||
617 nid_same(&peer_ni->ksnp_id.nid, &id->nid)) &&
618 (!id || id->pid == LNET_PID_ANY ||
619 peer_ni->ksnp_id.pid == id->pid)))
622 ksocknal_peer_addref(peer_ni); /* a ref for me... */
624 ksocknal_del_peer_locked(peer_ni);
626 if (peer_ni->ksnp_closing &&
627 !list_empty(&peer_ni->ksnp_tx_queue)) {
628 LASSERT(list_empty(&peer_ni->ksnp_conns));
629 LASSERT(peer_ni->ksnp_conn_cb == NULL);
631 list_splice_init(&peer_ni->ksnp_tx_queue,
635 ksocknal_peer_decref(peer_ni); /* ...till here */
637 rc = 0; /* matched! */
641 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
643 ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
648 static struct ksock_conn *
649 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
651 struct ksock_peer_ni *peer_ni;
652 struct ksock_conn *conn;
655 read_lock(&ksocknal_data.ksnd_global_lock);
657 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
658 LASSERT(!peer_ni->ksnp_closing);
660 if (peer_ni->ksnp_ni != ni)
663 list_for_each_entry(conn, &peer_ni->ksnp_conns,
668 ksocknal_conn_addref(conn);
669 read_unlock(&ksocknal_data.ksnd_global_lock);
674 read_unlock(&ksocknal_data.ksnd_global_lock);
678 static struct ksock_sched *
679 ksocknal_choose_scheduler_locked(unsigned int cpt)
681 struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
684 if (sched->kss_nthreads == 0) {
685 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
686 if (sched->kss_nthreads > 0) {
687 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
688 cpt, sched->kss_cpt);
699 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
701 struct ksock_connreq *cr;
703 struct sockaddr_storage peer;
705 rc = lnet_sock_getaddr(sock, true, &peer);
707 CERROR("Can't determine new connection's address\n");
711 LIBCFS_ALLOC(cr, sizeof(*cr));
713 LCONSOLE_ERROR_MSG(0x12f,
714 "Dropping connection request from %pISc: memory exhausted\n",
721 cr->ksncr_sock = sock;
723 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
725 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
726 wake_up(&ksocknal_data.ksnd_connd_waitq);
728 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
732 static const struct ln_key_list ksocknal_tunables_keys = {
733 .lkl_maxattr = LNET_NET_SOCKLND_TUNABLES_ATTR_MAX,
735 [LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER] = {
736 .lkp_value = "conns_per_peer",
737 .lkp_data_type = NLA_S32
743 ksocknal_nl_set(int cmd, struct nlattr *attr, int type, void *data)
745 struct lnet_lnd_tunables *tunables = data;
748 if (cmd != LNET_CMD_NETS)
751 if (type != LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER ||
752 nla_type(attr) != LN_SCALAR_ATTR_INT_VALUE)
755 num = nla_get_s64(attr);
756 clamp_t(s64, num, 1, 127);
757 tunables->lnd_tun_u.lnd_sock.lnd_conns_per_peer = num;
763 ksocknal_connecting(struct ksock_conn_cb *conn_cb, struct sockaddr *sa)
766 rpc_cmp_addr((struct sockaddr *)&conn_cb->ksnr_addr, sa))
767 return conn_cb->ksnr_connecting;
772 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_conn_cb *conn_cb,
773 struct socket *sock, int type)
775 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
777 struct lnet_processid peerid;
779 struct ksock_conn *conn;
780 struct ksock_conn *conn2;
781 struct ksock_peer_ni *peer_ni = NULL;
782 struct ksock_peer_ni *peer2;
783 struct ksock_sched *sched;
784 struct ksock_hello_msg *hello;
787 struct ksock_tx *txtmp;
794 active = (conn_cb != NULL);
796 LASSERT(active == (type != SOCKLND_CONN_NONE));
798 LIBCFS_ALLOC(conn, sizeof(*conn));
804 conn->ksnc_peer = NULL;
805 conn->ksnc_conn_cb = NULL;
806 conn->ksnc_sock = sock;
807 /* 2 ref, 1 for conn, another extra ref prevents socket
808 * being closed before establishment of connection */
809 refcount_set(&conn->ksnc_sock_refcount, 2);
810 conn->ksnc_type = type;
811 ksocknal_lib_save_callback(sock, conn);
812 refcount_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
814 conn->ksnc_rx_ready = 0;
815 conn->ksnc_rx_scheduled = 0;
817 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
818 conn->ksnc_tx_ready = 0;
819 conn->ksnc_tx_scheduled = 0;
820 conn->ksnc_tx_carrier = NULL;
821 atomic_set (&conn->ksnc_tx_nob, 0);
823 LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
824 kshm_ips[LNET_INTERFACES_NUM]));
830 /* stash conn's local and remote addrs */
831 rc = ksocknal_lib_get_conn_addrs(conn);
835 /* Find out/confirm peer_ni's NID and connection type and get the
836 * vector of interfaces she's willing to let me connect to.
837 * Passive connections use the listener timeout since the peer_ni sends
842 peer_ni = conn_cb->ksnr_peer;
843 LASSERT(ni == peer_ni->ksnp_ni);
845 /* Active connection sends HELLO eagerly */
846 hello->kshm_nips = 0;
847 peerid = peer_ni->ksnp_id;
849 write_lock_bh(global_lock);
850 conn->ksnc_proto = peer_ni->ksnp_proto;
851 write_unlock_bh(global_lock);
853 if (conn->ksnc_proto == NULL) {
854 conn->ksnc_proto = &ksocknal_protocol_v3x;
855 #if SOCKNAL_VERSION_DEBUG
856 if (*ksocknal_tunables.ksnd_protocol == 2)
857 conn->ksnc_proto = &ksocknal_protocol_v2x;
858 else if (*ksocknal_tunables.ksnd_protocol == 1)
859 conn->ksnc_proto = &ksocknal_protocol_v1x;
863 rc = ksocknal_send_hello(ni, conn, &peerid.nid, hello);
867 peerid.nid = LNET_ANY_NID;
868 peerid.pid = LNET_PID_ANY;
870 /* Passive, get protocol from peer_ni */
871 conn->ksnc_proto = NULL;
874 rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
878 LASSERT(rc == 0 || active);
879 LASSERT(conn->ksnc_proto != NULL);
880 LASSERT(!LNET_NID_IS_ANY(&peerid.nid));
882 cpt = lnet_nid2cpt(&peerid.nid, ni);
885 ksocknal_peer_addref(peer_ni);
886 write_lock_bh(global_lock);
888 peer_ni = ksocknal_create_peer(ni, &peerid);
889 if (IS_ERR(peer_ni)) {
890 rc = PTR_ERR(peer_ni);
894 write_lock_bh(global_lock);
896 /* called with a ref on ni, so shutdown can't have started */
897 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers) >= 0);
899 peer2 = ksocknal_find_peer_locked(ni, &peerid);
901 /* NB this puts an "empty" peer_ni in the peer_ni
902 * table (which takes my ref) */
903 hash_add(ksocknal_data.ksnd_peers,
904 &peer_ni->ksnp_list, nidhash(&peerid.nid));
906 ksocknal_peer_decref(peer_ni);
911 ksocknal_peer_addref(peer_ni);
912 peer_ni->ksnp_accepting++;
914 /* Am I already connecting to this guy? Resolve in
915 * favour of higher NID...
917 if (memcmp(&peerid.nid, &ni->ni_nid, sizeof(peerid.nid)) < 0 &&
918 ksocknal_connecting(peer_ni->ksnp_conn_cb,
919 ((struct sockaddr *) &conn->ksnc_peeraddr))) {
921 warn = "connection race resolution";
926 if (peer_ni->ksnp_closing ||
927 (active && conn_cb->ksnr_deleted)) {
928 /* peer_ni/conn_cb got closed under me */
930 warn = "peer_ni/conn_cb removed";
934 if (peer_ni->ksnp_proto == NULL) {
935 /* Never connected before.
936 * NB recv_hello may have returned EPROTO to signal my peer_ni
937 * wants a different protocol than the one I asked for.
939 LASSERT(list_empty(&peer_ni->ksnp_conns));
941 peer_ni->ksnp_proto = conn->ksnc_proto;
942 peer_ni->ksnp_incarnation = incarnation;
945 if (peer_ni->ksnp_proto != conn->ksnc_proto ||
946 peer_ni->ksnp_incarnation != incarnation) {
947 /* peer_ni rebooted or I've got the wrong protocol version */
948 ksocknal_close_peer_conns_locked(peer_ni, NULL, 0);
950 peer_ni->ksnp_proto = NULL;
952 warn = peer_ni->ksnp_incarnation != incarnation ?
954 "wrong proto version";
964 warn = "lost conn race";
967 warn = "retry with different protocol version";
971 /* Refuse to duplicate an existing connection, unless this is a
972 * loopback connection */
973 if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
974 (struct sockaddr *)&conn->ksnc_myaddr)) {
975 list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
977 (struct sockaddr *)&conn2->ksnc_peeraddr,
978 (struct sockaddr *)&conn->ksnc_peeraddr) ||
980 (struct sockaddr *)&conn2->ksnc_myaddr,
981 (struct sockaddr *)&conn->ksnc_myaddr) ||
982 conn2->ksnc_type != conn->ksnc_type)
986 /* If max conns per type is not registered in conn_cb
987 * as ksnr_max_conns, use ni's conns_per_peer
989 if ((peer_ni->ksnp_conn_cb &&
990 num_dup < peer_ni->ksnp_conn_cb->ksnr_max_conns) ||
991 (!peer_ni->ksnp_conn_cb &&
992 num_dup < ksocknal_get_conns_per_peer(peer_ni)))
995 /* Reply on a passive connection attempt so the peer_ni
996 * realises we're connected.
1006 /* If the connection created by this route didn't bind to the IP
1007 * address the route connected to, the connection/route matching
1008 * code below probably isn't going to work.
1011 !rpc_cmp_addr((struct sockaddr *)&conn_cb->ksnr_addr,
1012 (struct sockaddr *)&conn->ksnc_peeraddr)) {
1013 CERROR("Route %s %pISc connected to %pISc\n",
1014 libcfs_idstr(&peer_ni->ksnp_id),
1015 &conn_cb->ksnr_addr,
1016 &conn->ksnc_peeraddr);
1019 /* Search for a conn_cb corresponding to the new connection and
1020 * create an association. This allows incoming connections created
1021 * by conn_cbs in my peer_ni to match my own conn_cb entries so I don't
1022 * continually create duplicate conn_cbs.
1024 conn_cb = peer_ni->ksnp_conn_cb;
1026 if (conn_cb && rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
1027 (struct sockaddr *)&conn_cb->ksnr_addr))
1028 ksocknal_associate_cb_conn_locked(conn_cb, conn);
1030 conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
1031 peer_ni->ksnp_last_alive = ktime_get_seconds();
1032 peer_ni->ksnp_send_keepalive = 0;
1033 peer_ni->ksnp_error = 0;
1035 sched = ksocknal_choose_scheduler_locked(cpt);
1037 CERROR("no schedulers available. node is unhealthy\n");
1041 * The cpt might have changed if we ended up selecting a non cpt
1042 * native scheduler. So use the scheduler's cpt instead.
1044 cpt = sched->kss_cpt;
1045 sched->kss_nconns++;
1046 conn->ksnc_scheduler = sched;
1048 conn->ksnc_tx_last_post = ktime_get_seconds();
1049 /* Set the deadline for the outgoing HELLO to drain */
1050 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1051 conn->ksnc_tx_deadline = ktime_get_seconds() +
1053 smp_mb(); /* order with adding to peer_ni's conn list */
1055 list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1056 ksocknal_conn_addref(conn);
1058 ksocknal_new_packet(conn, 0);
1060 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1062 /* Take packets blocking for this connection. */
1063 list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1064 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1068 list_del(&tx->tx_list);
1069 ksocknal_queue_tx_locked(tx, conn);
1072 write_unlock_bh(global_lock);
1073 /* We've now got a new connection. Any errors from here on are just
1074 * like "normal" comms errors and we close the connection normally.
1075 * NB (a) we still have to send the reply HELLO for passive
1077 * (b) normal I/O on the conn is blocked until I setup and call the
1081 CDEBUG(D_NET, "New conn %s p %d.x %pISc -> %pIScp"
1082 " incarnation:%lld sched[%d]\n",
1083 libcfs_idstr(&peerid), conn->ksnc_proto->pro_version,
1084 &conn->ksnc_myaddr, &conn->ksnc_peeraddr,
1088 hello->kshm_nips = 0;
1089 rc = ksocknal_send_hello(ni, conn, &peerid.nid, hello);
1092 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1093 kshm_ips[LNET_INTERFACES_NUM]));
1095 /* setup the socket AFTER I've received hello (it disables
1096 * SO_LINGER). I might call back to the acceptor who may want
1097 * to send a protocol version response and then close the
1098 * socket; this ensures the socket only tears down after the
1099 * response has been sent.
1102 rc = ksocknal_lib_setup_sock(sock);
1104 write_lock_bh(global_lock);
1106 /* NB my callbacks block while I hold ksnd_global_lock */
1107 ksocknal_lib_set_callback(sock, conn);
1110 peer_ni->ksnp_accepting--;
1112 write_unlock_bh(global_lock);
1115 write_lock_bh(global_lock);
1116 if (!conn->ksnc_closing) {
1117 /* could be closed by another thread */
1118 ksocknal_close_conn_locked(conn, rc);
1120 write_unlock_bh(global_lock);
1121 } else if (ksocknal_connsock_addref(conn) == 0) {
1122 /* Allow I/O to proceed. */
1123 ksocknal_read_callback(conn);
1124 ksocknal_write_callback(conn);
1125 ksocknal_connsock_decref(conn);
1128 ksocknal_connsock_decref(conn);
1129 ksocknal_conn_decref(conn);
1134 if (!peer_ni->ksnp_closing &&
1135 list_empty(&peer_ni->ksnp_conns) &&
1136 peer_ni->ksnp_conn_cb == NULL) {
1137 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
1138 ksocknal_unlink_peer_locked(peer_ni);
1141 write_unlock_bh(global_lock);
1145 CERROR("Not creating conn %s type %d: %s\n",
1146 libcfs_idstr(&peerid), conn->ksnc_type, warn);
1148 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1149 libcfs_idstr(&peerid), conn->ksnc_type, warn);
1154 /* Request retry by replying with CONN_NONE
1155 * ksnc_proto has been set already
1157 conn->ksnc_type = SOCKLND_CONN_NONE;
1158 hello->kshm_nips = 0;
1159 ksocknal_send_hello(ni, conn, &peerid.nid, hello);
1162 write_lock_bh(global_lock);
1163 peer_ni->ksnp_accepting--;
1164 write_unlock_bh(global_lock);
1168 * If we get here without an error code, just use -EALREADY.
1169 * Depending on how we got here, the error may be positive
1170 * or negative. Normalize the value for ksocknal_txlist_done().
1172 rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1173 ksocknal_txlist_done(ni, &zombies, rc2);
1174 ksocknal_peer_decref(peer_ni);
1178 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1179 kshm_ips[LNET_INTERFACES_NUM]));
1181 LIBCFS_FREE(conn, sizeof(*conn));
1190 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1192 /* This just does the immmediate housekeeping, and queues the
1193 * connection for the reaper to terminate.
1194 * Caller holds ksnd_global_lock exclusively in irq context */
1195 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1196 struct ksock_conn_cb *conn_cb;
1197 struct ksock_conn *conn2;
1199 int duplicate_count = 0;
1201 LASSERT(peer_ni->ksnp_error == 0);
1202 LASSERT(!conn->ksnc_closing);
1203 conn->ksnc_closing = 1;
1205 /* ksnd_deathrow_conns takes over peer_ni's ref */
1206 list_del(&conn->ksnc_list);
1208 conn_cb = conn->ksnc_conn_cb;
1209 if (conn_cb != NULL) {
1210 /* dissociate conn from cb... */
1211 LASSERT(!conn_cb->ksnr_deleted);
1213 conn_count = ksocknal_get_conn_count_by_type(conn_cb,
1215 /* connected bit is set only if all connections
1216 * of the given type got created
1218 if (conn_count == conn_cb->ksnr_max_conns)
1219 LASSERT((conn_cb->ksnr_connected &
1220 BIT(conn->ksnc_type)) != 0);
1222 if (conn_count == 1) {
1223 list_for_each_entry(conn2, &peer_ni->ksnp_conns,
1225 if (conn2->ksnc_conn_cb == conn_cb &&
1226 conn2->ksnc_type == conn->ksnc_type)
1227 duplicate_count += 1;
1229 if (duplicate_count > 0)
1230 CERROR("Found %d duplicate conns type %d\n",
1234 ksocknal_decr_conn_count(conn_cb, conn->ksnc_type);
1236 conn->ksnc_conn_cb = NULL;
1238 /* drop conn's ref on conn_cb */
1239 ksocknal_conn_cb_decref(conn_cb);
1242 if (list_empty(&peer_ni->ksnp_conns)) {
1243 /* No more connections to this peer_ni */
1245 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1246 struct ksock_tx *tx;
1248 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1250 /* throw them to the last connection...,
1251 * these TXs will be send to /dev/null by scheduler */
1252 list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1254 ksocknal_tx_prep(conn, tx);
1256 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1257 list_splice_init(&peer_ni->ksnp_tx_queue,
1258 &conn->ksnc_tx_queue);
1259 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1262 /* renegotiate protocol version */
1263 peer_ni->ksnp_proto = NULL;
1264 /* stash last conn close reason */
1265 peer_ni->ksnp_error = error;
1267 if (peer_ni->ksnp_conn_cb == NULL) {
1268 /* I've just closed last conn belonging to a
1269 * peer_ni with no connections to it
1271 ksocknal_unlink_peer_locked(peer_ni);
1275 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1277 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
1278 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1280 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1284 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1286 bool notify = false;
1287 time64_t last_alive = 0;
1289 /* There has been a connection failure or comms error; but I'll only
1290 * tell LNET I think the peer_ni is dead if it's to another kernel and
1291 * there are no connections or connection attempts in existence. */
1293 read_lock(&ksocknal_data.ksnd_global_lock);
1295 if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1296 list_empty(&peer_ni->ksnp_conns) &&
1297 peer_ni->ksnp_accepting == 0 &&
1298 !ksocknal_find_connecting_conn_cb_locked(peer_ni)) {
1300 last_alive = peer_ni->ksnp_last_alive;
1303 read_unlock(&ksocknal_data.ksnd_global_lock);
1306 lnet_notify(peer_ni->ksnp_ni,
1307 &peer_ni->ksnp_id.nid,
1308 false, false, last_alive);
1312 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1314 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1315 struct ksock_tx *tx;
1316 struct ksock_tx *tmp;
1319 /* NB safe to finalize TXs because closing of socket will
1320 * abort all buffered data */
1321 LASSERT(conn->ksnc_sock == NULL);
1323 spin_lock(&peer_ni->ksnp_lock);
1325 list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list,
1327 if (tx->tx_conn != conn)
1330 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1332 tx->tx_msg.ksm_zc_cookies[0] = 0;
1333 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1334 list_move(&tx->tx_zc_list, &zlist);
1337 spin_unlock(&peer_ni->ksnp_lock);
1339 while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
1340 tx_zc_list)) != NULL) {
1341 list_del(&tx->tx_zc_list);
1342 ksocknal_tx_decref(tx);
1347 ksocknal_terminate_conn(struct ksock_conn *conn)
1349 /* This gets called by the reaper (guaranteed thread context) to
1350 * disengage the socket from its callbacks and close it.
1351 * ksnc_refcount will eventually hit zero, and then the reaper will
1354 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1355 struct ksock_sched *sched = conn->ksnc_scheduler;
1356 bool failed = false;
1358 LASSERT(conn->ksnc_closing);
1360 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1361 spin_lock_bh(&sched->kss_lock);
1363 /* a closing conn is always ready to tx */
1364 conn->ksnc_tx_ready = 1;
1366 if (!conn->ksnc_tx_scheduled &&
1367 !list_empty(&conn->ksnc_tx_queue)) {
1368 list_add_tail(&conn->ksnc_tx_list,
1369 &sched->kss_tx_conns);
1370 conn->ksnc_tx_scheduled = 1;
1371 /* extra ref for scheduler */
1372 ksocknal_conn_addref(conn);
1374 wake_up(&sched->kss_waitq);
1377 spin_unlock_bh(&sched->kss_lock);
1379 /* serialise with callbacks */
1380 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1382 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1384 /* OK, so this conn may not be completely disengaged from its
1385 * scheduler yet, but it _has_ committed to terminate...
1387 conn->ksnc_scheduler->kss_nconns--;
1389 if (peer_ni->ksnp_error != 0) {
1390 /* peer_ni's last conn closed in error */
1391 LASSERT(list_empty(&peer_ni->ksnp_conns));
1393 peer_ni->ksnp_error = 0; /* avoid multiple notifications */
1396 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1399 ksocknal_peer_failed(peer_ni);
1401 /* The socket is closed on the final put; either here, or in
1402 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1403 * when the connection was established, this will close the socket
1404 * immediately, aborting anything buffered in it. Any hung
1405 * zero-copy transmits will therefore complete in finite time.
1407 ksocknal_connsock_decref(conn);
1411 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1413 /* Queue the conn for the reaper to destroy */
1414 LASSERT(refcount_read(&conn->ksnc_conn_refcount) == 0);
1415 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1417 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1418 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1420 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1424 ksocknal_destroy_conn(struct ksock_conn *conn)
1428 /* Final coup-de-grace of the reaper */
1429 CDEBUG(D_NET, "connection %p\n", conn);
1431 LASSERT(refcount_read(&conn->ksnc_conn_refcount) == 0);
1432 LASSERT(refcount_read(&conn->ksnc_sock_refcount) == 0);
1433 LASSERT(conn->ksnc_sock == NULL);
1434 LASSERT(conn->ksnc_conn_cb == NULL);
1435 LASSERT(!conn->ksnc_tx_scheduled);
1436 LASSERT(!conn->ksnc_rx_scheduled);
1437 LASSERT(list_empty(&conn->ksnc_tx_queue));
1439 /* complete current receive if any */
1440 switch (conn->ksnc_rx_state) {
1441 case SOCKNAL_RX_LNET_PAYLOAD:
1442 last_rcv = conn->ksnc_rx_deadline -
1444 CERROR("Completing partial receive from %s[%d], ip %pIScp, with error, wanted: %d, left: %d, last alive is %lld secs ago\n",
1445 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1447 &conn->ksnc_peeraddr,
1448 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1449 ktime_get_seconds() - last_rcv);
1450 if (conn->ksnc_lnet_msg)
1451 conn->ksnc_lnet_msg->msg_health_status =
1452 LNET_MSG_STATUS_REMOTE_ERROR;
1453 lnet_finalize(conn->ksnc_lnet_msg, -EIO);
1455 case SOCKNAL_RX_LNET_HEADER:
1456 if (conn->ksnc_rx_started)
1457 CERROR("Incomplete receive of lnet header from %s, ip %pIScp, with error, protocol: %d.x.\n",
1458 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1459 &conn->ksnc_peeraddr,
1460 conn->ksnc_proto->pro_version);
1462 case SOCKNAL_RX_KSM_HEADER:
1463 if (conn->ksnc_rx_started)
1464 CERROR("Incomplete receive of ksock message from %s, ip %pIScp, with error, protocol: %d.x.\n",
1465 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1466 &conn->ksnc_peeraddr,
1467 conn->ksnc_proto->pro_version);
1469 case SOCKNAL_RX_SLOP:
1470 if (conn->ksnc_rx_started)
1471 CERROR("Incomplete receive of slops from %s, ip %pIScp, with error\n",
1472 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1473 &conn->ksnc_peeraddr);
1480 ksocknal_peer_decref(conn->ksnc_peer);
1482 LIBCFS_FREE(conn, sizeof(*conn));
1486 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni,
1487 struct sockaddr *addr, int why)
1489 struct ksock_conn *conn;
1490 struct ksock_conn *cnxt;
1493 list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns, ksnc_list) {
1496 (struct sockaddr *)&conn->ksnc_peeraddr)) {
1498 ksocknal_close_conn_locked(conn, why);
1506 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1508 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1511 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1513 count = ksocknal_close_peer_conns_locked(
1514 peer_ni, (struct sockaddr *)&conn->ksnc_peeraddr, why);
1516 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1522 ksocknal_close_matching_conns(struct lnet_processid *id, __u32 ipaddr)
1524 struct ksock_peer_ni *peer_ni;
1525 struct hlist_node *pnxt;
1530 struct sockaddr_in sa = {.sin_family = AF_INET};
1532 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1534 if (!LNET_NID_IS_ANY(&id->nid)) {
1535 lo = hash_min(nidhash(&id->nid),
1536 HASH_BITS(ksocknal_data.ksnd_peers));
1540 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1543 sa.sin_addr.s_addr = htonl(ipaddr);
1544 for (i = lo; i <= hi; i++) {
1545 hlist_for_each_entry_safe(peer_ni, pnxt,
1546 &ksocknal_data.ksnd_peers[i],
1549 if (!((LNET_NID_IS_ANY(&id->nid) ||
1550 nid_same(&id->nid, &peer_ni->ksnp_id.nid)) &&
1551 (id->pid == LNET_PID_ANY ||
1552 id->pid == peer_ni->ksnp_id.pid)))
1555 count += ksocknal_close_peer_conns_locked(
1557 ipaddr ? (struct sockaddr *)&sa : NULL, 0);
1561 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1563 /* wildcards always succeed */
1564 if (LNET_NID_IS_ANY(&id->nid) || id->pid == LNET_PID_ANY ||
1568 return (count == 0 ? -ENOENT : 0);
1572 ksocknal_notify_gw_down(struct lnet_nid *gw_nid)
1574 /* The router is telling me she's been notified of a change in
1577 struct lnet_processid id = {
1578 .pid = LNET_PID_ANY,
1582 CDEBUG(D_NET, "gw %s down\n", libcfs_nidstr(gw_nid));
1584 /* If the gateway crashed, close all open connections... */
1585 ksocknal_close_matching_conns(&id, 0);
1588 /* We can only establish new connections
1589 * if we have autroutes, and these connect on demand.
1594 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1598 struct ksock_conn *conn;
1600 for (index = 0; ; index++) {
1601 read_lock(&ksocknal_data.ksnd_global_lock);
1606 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
1608 ksocknal_conn_addref(conn);
1613 read_unlock(&ksocknal_data.ksnd_global_lock);
1618 ksocknal_lib_push_conn (conn);
1619 ksocknal_conn_decref(conn);
1624 ksocknal_push(struct lnet_ni *ni, struct lnet_processid *id)
1631 if (!LNET_NID_IS_ANY(&id->nid)) {
1632 lo = hash_min(nidhash(&id->nid),
1633 HASH_BITS(ksocknal_data.ksnd_peers));
1637 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1640 for (bkt = lo; bkt <= hi; bkt++) {
1641 int peer_off; /* searching offset in peer_ni hash table */
1643 for (peer_off = 0; ; peer_off++) {
1644 struct ksock_peer_ni *peer_ni;
1647 read_lock(&ksocknal_data.ksnd_global_lock);
1648 hlist_for_each_entry(peer_ni,
1649 &ksocknal_data.ksnd_peers[bkt],
1651 if (!((LNET_NID_IS_ANY(&id->nid) ||
1653 &peer_ni->ksnp_id.nid)) &&
1654 (id->pid == LNET_PID_ANY ||
1655 id->pid == peer_ni->ksnp_id.pid)))
1658 if (i++ == peer_off) {
1659 ksocknal_peer_addref(peer_ni);
1663 read_unlock(&ksocknal_data.ksnd_global_lock);
1665 if (i <= peer_off) /* no match */
1669 ksocknal_push_peer(peer_ni);
1670 ksocknal_peer_decref(peer_ni);
1677 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1679 struct lnet_processid id = {};
1680 struct libcfs_ioctl_data *data = arg;
1684 case IOC_LIBCFS_GET_INTERFACE: {
1685 struct ksock_net *net = ni->ni_data;
1686 struct ksock_interface *iface;
1687 struct sockaddr_in *sa;
1689 read_lock(&ksocknal_data.ksnd_global_lock);
1691 if (data->ioc_count >= 1) {
1695 iface = &net->ksnn_interface;
1697 sa = (void *)&iface->ksni_addr;
1698 if (sa->sin_family == AF_INET) {
1699 data->ioc_u32[0] = ntohl(sa->sin_addr.s_addr);
1700 data->ioc_u32[1] = iface->ksni_netmask;
1702 data->ioc_u32[0] = 0xFFFFFFFF;
1703 data->ioc_u32[1] = 0;
1705 data->ioc_u32[2] = iface->ksni_npeers;
1706 data->ioc_u32[3] = iface->ksni_nroutes;
1709 read_unlock(&ksocknal_data.ksnd_global_lock);
1713 case IOC_LIBCFS_GET_PEER: {
1718 int share_count = 0;
1720 rc = ksocknal_get_peer_info(ni, data->ioc_count,
1721 &id, &myip, &ip, &port,
1722 &conn_count, &share_count);
1726 if (!nid_is_nid4(&id.nid))
1728 data->ioc_nid = lnet_nid_to_nid4(&id.nid);
1729 data->ioc_count = share_count;
1730 data->ioc_u32[0] = ip;
1731 data->ioc_u32[1] = port;
1732 data->ioc_u32[2] = myip;
1733 data->ioc_u32[3] = conn_count;
1734 data->ioc_u32[4] = id.pid;
1738 case IOC_LIBCFS_ADD_PEER: {
1739 struct sockaddr_in sa = {.sin_family = AF_INET};
1741 id.pid = LNET_PID_LUSTRE;
1742 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1743 sa.sin_addr.s_addr = htonl(data->ioc_u32[0]);
1744 sa.sin_port = htons(data->ioc_u32[1]);
1745 return ksocknal_add_peer(ni, &id, (struct sockaddr *)&sa);
1747 case IOC_LIBCFS_DEL_PEER:
1748 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1749 id.pid = LNET_PID_ANY;
1750 return ksocknal_del_peer(ni, &id);
1752 case IOC_LIBCFS_GET_CONN: {
1756 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
1757 struct sockaddr_in *psa = (void *)&conn->ksnc_peeraddr;
1758 struct sockaddr_in *mysa = (void *)&conn->ksnc_myaddr;
1763 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
1765 data->ioc_count = txmem;
1766 data->ioc_nid = lnet_nid_to_nid4(&conn->ksnc_peer->ksnp_id.nid);
1767 data->ioc_flags = nagle;
1768 if (psa->sin_family == AF_INET)
1769 data->ioc_u32[0] = ntohl(psa->sin_addr.s_addr);
1771 data->ioc_u32[0] = 0xFFFFFFFF;
1772 data->ioc_u32[1] = rpc_get_port((struct sockaddr *)
1773 &conn->ksnc_peeraddr);
1774 if (mysa->sin_family == AF_INET)
1775 data->ioc_u32[2] = ntohl(mysa->sin_addr.s_addr);
1777 data->ioc_u32[2] = 0xFFFFFFFF;
1778 data->ioc_u32[3] = conn->ksnc_type;
1779 data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
1780 data->ioc_u32[5] = rxmem;
1781 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
1782 ksocknal_conn_decref(conn);
1786 case IOC_LIBCFS_CLOSE_CONNECTION:
1787 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1788 id.pid = LNET_PID_ANY;
1789 return ksocknal_close_matching_conns(&id,
1792 case IOC_LIBCFS_REGISTER_MYNID:
1793 /* Ignore if this is a noop */
1794 if (nid_is_nid4(&ni->ni_nid) &&
1795 data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid))
1798 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1799 libcfs_nid2str(data->ioc_nid),
1800 libcfs_nidstr(&ni->ni_nid));
1803 case IOC_LIBCFS_PUSH_CONNECTION:
1804 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1805 id.pid = LNET_PID_ANY;
1806 return ksocknal_push(ni, &id);
1815 ksocknal_free_buffers (void)
1817 LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
1819 if (ksocknal_data.ksnd_schedulers != NULL)
1820 cfs_percpt_free(ksocknal_data.ksnd_schedulers);
1822 spin_lock(&ksocknal_data.ksnd_tx_lock);
1824 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
1826 struct ksock_tx *tx;
1828 list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist);
1829 spin_unlock(&ksocknal_data.ksnd_tx_lock);
1831 while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
1832 tx_list)) != NULL) {
1833 list_del(&tx->tx_list);
1834 LIBCFS_FREE(tx, tx->tx_desc_size);
1837 spin_unlock(&ksocknal_data.ksnd_tx_lock);
1841 static int ksocknal_get_link_status(struct net_device *dev)
1847 if (!netif_running(dev)) {
1849 CDEBUG(D_NET, "device not running\n");
1851 /* Some devices may not be providing link settings */
1852 else if (dev->ethtool_ops->get_link) {
1853 ret = dev->ethtool_ops->get_link(dev);
1854 CDEBUG(D_NET, "get_link returns %u\n", ret);
1861 ksocknal_handle_link_state_change(struct net_device *dev,
1862 unsigned char operstate)
1864 struct lnet_ni *ni = NULL;
1865 struct ksock_net *net;
1866 struct ksock_net *cnxt;
1868 unsigned char link_down = !(operstate == IF_OPER_UP);
1869 struct in_device *in_dev;
1870 bool found_ip = false;
1871 struct ksock_interface *ksi = NULL;
1872 struct sockaddr_in *sa;
1873 __u32 ni_state_before;
1874 bool update_ping_buf = false;
1875 DECLARE_CONST_IN_IFADDR(ifa);
1877 ifindex = dev->ifindex;
1879 if (!ksocknal_data.ksnd_nnets)
1882 list_for_each_entry_safe(net, cnxt, &ksocknal_data.ksnd_nets,
1885 ksi = &net->ksnn_interface;
1886 sa = (void *)&ksi->ksni_addr;
1889 if (strcmp(ksi->ksni_name, dev->name))
1892 if (ksi->ksni_index == -1) {
1893 if (dev->reg_state != NETREG_REGISTERED)
1895 /* A registration just happened: save the new index for
1897 ksi->ksni_index = ifindex;
1901 if (ksi->ksni_index != ifindex)
1904 if (dev->reg_state == NETREG_UNREGISTERING) {
1905 /* Device is being unregitering, we need to clear the
1906 * index, it can change when device will be back */
1907 ksi->ksni_index = -1;
1913 in_dev = __in_dev_get_rtnl(dev);
1915 CDEBUG(D_NET, "Interface %s has no IPv4 status.\n",
1917 CDEBUG(D_NET, "set link fatal state to 1\n");
1918 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
1922 in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1923 if (sa->sin_addr.s_addr == ifa->ifa_local)
1929 CDEBUG(D_NET, "Interface %s has no matching ip\n",
1931 CDEBUG(D_NET, "set link fatal state to 1\n");
1932 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
1938 CDEBUG(D_NET, "set link fatal state to 1\n");
1939 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
1942 CDEBUG(D_NET, "set link fatal state to %u\n",
1943 (ksocknal_get_link_status(dev) == 0));
1944 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
1945 (ksocknal_get_link_status(dev) == 0));
1948 if (!update_ping_buf &&
1949 (atomic_read(&ni->ni_fatal_error_on) != ni_state_before))
1950 update_ping_buf = true;
1953 if (update_ping_buf)
1954 lnet_update_ping_buffer();
1961 ksocknal_handle_inetaddr_change(struct in_ifaddr *ifa, unsigned long event)
1964 struct ksock_net *net;
1965 struct ksock_net *cnxt;
1966 struct net_device *event_netdev = ifa->ifa_dev->dev;
1968 struct ksock_interface *ksi = NULL;
1969 struct sockaddr_in *sa;
1970 __u32 ni_state_before;
1971 bool update_ping_buf = false;
1973 if (!ksocknal_data.ksnd_nnets)
1976 ifindex = event_netdev->ifindex;
1978 list_for_each_entry_safe(net, cnxt, &ksocknal_data.ksnd_nets,
1981 ksi = &net->ksnn_interface;
1982 sa = (void *)&ksi->ksni_addr;
1984 if (ksi->ksni_index != ifindex ||
1985 strcmp(ksi->ksni_name, event_netdev->name))
1988 if (sa->sin_addr.s_addr == ifa->ifa_local) {
1989 CDEBUG(D_NET, "set link fatal state to %u\n",
1990 (event == NETDEV_DOWN));
1992 ni_state_before = atomic_xchg(&ni->ni_fatal_error_on,
1993 (event == NETDEV_DOWN));
1994 if (!update_ping_buf &&
1995 ((event == NETDEV_DOWN) != ni_state_before))
1996 update_ping_buf = true;
2000 if (update_ping_buf)
2001 lnet_update_ping_buffer();
2006 /************************************
2007 * Net device notifier event handler
2008 ************************************/
2009 static int ksocknal_device_event(struct notifier_block *unused,
2010 unsigned long event, void *ptr)
2012 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2013 unsigned char operstate;
2015 operstate = dev->operstate;
2017 CDEBUG(D_NET, "devevent: status=%ld, iface=%s ifindex %d state %u\n",
2018 event, dev->name, dev->ifindex, operstate);
2024 case NETDEV_REGISTER:
2025 case NETDEV_UNREGISTER:
2026 ksocknal_handle_link_state_change(dev, operstate);
2033 /************************************
2034 * Inetaddr notifier event handler
2035 ************************************/
2036 static int ksocknal_inetaddr_event(struct notifier_block *unused,
2037 unsigned long event, void *ptr)
2039 struct in_ifaddr *ifa = ptr;
2041 CDEBUG(D_NET, "addrevent: status %ld ip addr %pI4, netmask %pI4.\n",
2042 event, &ifa->ifa_address, &ifa->ifa_mask);
2048 ksocknal_handle_inetaddr_change(ifa, event);
2055 static struct notifier_block ksocknal_dev_notifier_block = {
2056 .notifier_call = ksocknal_device_event,
2059 static struct notifier_block ksocknal_inetaddr_notifier_block = {
2060 .notifier_call = ksocknal_inetaddr_event,
2064 ksocknal_base_shutdown(void)
2066 struct ksock_sched *sched;
2067 struct ksock_peer_ni *peer_ni;
2070 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %lld\n",
2071 libcfs_kmem_read());
2072 LASSERT (ksocknal_data.ksnd_nnets == 0);
2074 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL) {
2075 unregister_netdevice_notifier(&ksocknal_dev_notifier_block);
2076 unregister_inetaddr_notifier(&ksocknal_inetaddr_notifier_block);
2079 switch (ksocknal_data.ksnd_init) {
2084 case SOCKNAL_INIT_ALL:
2085 case SOCKNAL_INIT_DATA:
2086 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list)
2089 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2090 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2091 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2092 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2093 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2095 if (ksocknal_data.ksnd_schedulers != NULL) {
2096 cfs_percpt_for_each(sched, i,
2097 ksocknal_data.ksnd_schedulers) {
2099 LASSERT(list_empty(&sched->kss_tx_conns));
2100 LASSERT(list_empty(&sched->kss_rx_conns));
2101 LASSERT(list_empty(&sched->kss_zombie_noop_txs));
2102 LASSERT(sched->kss_nconns == 0);
2106 /* flag threads to terminate; wake and wait for them to die */
2107 ksocknal_data.ksnd_shuttingdown = 1;
2108 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2109 wake_up(&ksocknal_data.ksnd_reaper_waitq);
2111 if (ksocknal_data.ksnd_schedulers != NULL) {
2112 cfs_percpt_for_each(sched, i,
2113 ksocknal_data.ksnd_schedulers)
2114 wake_up_all(&sched->kss_waitq);
2117 wait_var_event_warning(&ksocknal_data.ksnd_nthreads,
2118 atomic_read(&ksocknal_data.ksnd_nthreads) == 0,
2119 "waiting for %d threads to terminate\n",
2120 atomic_read(&ksocknal_data.ksnd_nthreads));
2122 ksocknal_free_buffers();
2124 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2128 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2129 libcfs_kmem_read());
2131 module_put(THIS_MODULE);
2135 ksocknal_base_startup(void)
2137 struct ksock_sched *sched;
2141 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2142 LASSERT(ksocknal_data.ksnd_nnets == 0);
2144 memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
2146 hash_init(ksocknal_data.ksnd_peers);
2148 rwlock_init(&ksocknal_data.ksnd_global_lock);
2149 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2151 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2152 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2153 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2154 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2155 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2157 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2158 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2159 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2160 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2162 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2163 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2165 /* NB memset above zeros whole of ksocknal_data */
2167 /* flag lists/ptrs/locks initialised */
2168 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2169 if (!try_module_get(THIS_MODULE))
2172 /* Create a scheduler block per available CPT */
2173 ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
2175 if (ksocknal_data.ksnd_schedulers == NULL)
2178 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
2182 * make sure not to allocate more threads than there are
2183 * cores/CPUs in teh CPT
2185 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2186 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2187 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2190 * max to half of CPUs, assume another half should be
2191 * reserved for upper layer modules
2193 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2196 sched->kss_nthreads_max = nthrs;
2199 spin_lock_init(&sched->kss_lock);
2200 INIT_LIST_HEAD(&sched->kss_rx_conns);
2201 INIT_LIST_HEAD(&sched->kss_tx_conns);
2202 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2203 init_waitqueue_head(&sched->kss_waitq);
2206 ksocknal_data.ksnd_connd_starting = 0;
2207 ksocknal_data.ksnd_connd_failed_stamp = 0;
2208 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2209 /* must have at least 2 connds to remain responsive to accepts while
2211 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2212 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2214 if (*ksocknal_tunables.ksnd_nconnds_max <
2215 *ksocknal_tunables.ksnd_nconnds) {
2216 ksocknal_tunables.ksnd_nconnds_max =
2217 ksocknal_tunables.ksnd_nconnds;
2220 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2221 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2222 ksocknal_data.ksnd_connd_starting++;
2223 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2225 rc = ksocknal_thread_start(ksocknal_connd,
2226 (void *)((uintptr_t)i),
2227 "socknal_cd%02d", i);
2229 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2230 ksocknal_data.ksnd_connd_starting--;
2231 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2232 CERROR("Can't spawn socknal connd: %d\n", rc);
2237 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2239 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2243 register_netdevice_notifier(&ksocknal_dev_notifier_block);
2244 register_inetaddr_notifier(&ksocknal_inetaddr_notifier_block);
2246 /* flag everything initialised */
2247 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2252 ksocknal_base_shutdown();
2257 ksocknal_debug_peerhash(struct lnet_ni *ni)
2259 struct ksock_peer_ni *peer_ni;
2262 read_lock(&ksocknal_data.ksnd_global_lock);
2264 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
2265 struct ksock_conn_cb *conn_cb;
2266 struct ksock_conn *conn;
2268 if (peer_ni->ksnp_ni != ni)
2271 CWARN("Active peer_ni on shutdown: %s, ref %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
2272 libcfs_idstr(&peer_ni->ksnp_id),
2273 refcount_read(&peer_ni->ksnp_refcount),
2274 peer_ni->ksnp_closing,
2275 peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2276 peer_ni->ksnp_zc_next_cookie,
2277 !list_empty(&peer_ni->ksnp_tx_queue),
2278 !list_empty(&peer_ni->ksnp_zc_req_list));
2280 conn_cb = peer_ni->ksnp_conn_cb;
2282 CWARN("ConnCB: ref %d, schd %d, conn %d, cnted %d, del %d\n",
2283 refcount_read(&conn_cb->ksnr_refcount),
2284 conn_cb->ksnr_scheduled, conn_cb->ksnr_connecting,
2285 conn_cb->ksnr_connected, conn_cb->ksnr_deleted);
2288 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
2289 CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
2290 refcount_read(&conn->ksnc_conn_refcount),
2291 refcount_read(&conn->ksnc_sock_refcount),
2292 conn->ksnc_type, conn->ksnc_closing);
2297 read_unlock(&ksocknal_data.ksnd_global_lock);
2302 ksocknal_shutdown(struct lnet_ni *ni)
2304 struct ksock_net *net = ni->ni_data;
2306 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2307 LASSERT(ksocknal_data.ksnd_nnets > 0);
2309 /* prevent new peers */
2310 atomic_add(SOCKNAL_SHUTDOWN_BIAS, &net->ksnn_npeers);
2312 /* Delete all peers */
2313 ksocknal_del_peer(ni, NULL);
2315 /* Wait for all peer_ni state to clean up */
2316 wait_var_event_warning(&net->ksnn_npeers,
2317 atomic_read(&net->ksnn_npeers) ==
2318 SOCKNAL_SHUTDOWN_BIAS,
2319 "waiting for %d peers to disconnect\n",
2320 ksocknal_debug_peerhash(ni) +
2321 atomic_read(&net->ksnn_npeers) -
2322 SOCKNAL_SHUTDOWN_BIAS);
2324 LASSERT(net->ksnn_interface.ksni_npeers == 0);
2325 LASSERT(net->ksnn_interface.ksni_nroutes == 0);
2327 list_del(&net->ksnn_list);
2328 LIBCFS_FREE(net, sizeof(*net));
2330 ksocknal_data.ksnd_nnets--;
2331 if (ksocknal_data.ksnd_nnets == 0)
2332 ksocknal_base_shutdown();
2336 ksocknal_search_new_ipif(struct ksock_net *net)
2339 char *ifnam = &net->ksnn_interface.ksni_name[0];
2340 char *colon = strchr(ifnam, ':');
2342 struct ksock_net *tmp;
2347 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) {
2348 char *ifnam2 = &tmp->ksnn_interface.ksni_name[0];
2349 char *colon2 = strchr(ifnam2, ':');
2354 found = strcmp(ifnam, ifnam2) == 0;
2367 ksocknal_start_schedulers(struct ksock_sched *sched)
2373 if (sched->kss_nthreads == 0) {
2374 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2375 nthrs = sched->kss_nthreads_max;
2377 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2379 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2380 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2382 nthrs = min(nthrs, sched->kss_nthreads_max);
2384 LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
2385 /* increase two threads if there is new interface */
2386 nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
2389 for (i = 0; i < nthrs; i++) {
2392 id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
2393 rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id,
2394 "socknal_sd%02d_%02d",
2396 (int)KSOCK_THREAD_SID(id));
2400 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2401 sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
2405 sched->kss_nthreads += i;
2410 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2412 int newif = ksocknal_search_new_ipif(net);
2416 if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2419 for (i = 0; i < ncpts; i++) {
2420 struct ksock_sched *sched;
2421 int cpt = (cpts == NULL) ? i : cpts[i];
2423 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2424 sched = ksocknal_data.ksnd_schedulers[cpt];
2426 if (!newif && sched->kss_nthreads > 0)
2429 rc = ksocknal_start_schedulers(sched);
2437 ksocknal_startup(struct lnet_ni *ni)
2439 struct ksock_net *net;
2440 struct ksock_interface *ksi = NULL;
2441 struct lnet_inetdev *ifaces = NULL;
2444 LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2445 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2446 rc = ksocknal_base_startup();
2450 LIBCFS_ALLOC(net, sizeof(*net));
2454 net->ksnn_incarnation = ktime_get_real_ns();
2457 ksocknal_tunables_setup(ni);
2459 rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns, true);
2463 ksi = &net->ksnn_interface;
2465 /* Interface and/or IP address is specified otherwise default to
2466 * the first Interface
2468 if_idx = lnet_inet_select(ni, ifaces, rc);
2472 if (!ni->ni_interface) {
2473 rc = lnet_ni_add_interface(ni, ifaces[if_idx].li_name);
2475 CWARN("ksocklnd failed to allocate ni_interface\n");
2478 ni->ni_dev_cpt = ifaces[if_idx].li_cpt;
2479 ksi->ksni_index = ifaces[if_idx].li_index;
2480 if (ifaces[if_idx].li_size == sizeof(struct in6_addr)) {
2481 struct sockaddr_in6 *sa;
2482 sa = (void *)&ksi->ksni_addr;
2483 memset(sa, 0, sizeof(*sa));
2484 sa->sin6_family = AF_INET6;
2485 memcpy(&sa->sin6_addr, ifaces[if_idx].li_ipv6addr,
2486 sizeof(struct in6_addr));
2487 ni->ni_nid.nid_size = sizeof(struct in6_addr) - 4;
2488 memcpy(&ni->ni_nid.nid_addr, ifaces[if_idx].li_ipv6addr,
2489 sizeof(struct in6_addr));
2491 struct sockaddr_in *sa;
2492 sa = (void *)&ksi->ksni_addr;
2493 memset(sa, 0, sizeof(*sa));
2494 sa->sin_family = AF_INET;
2495 sa->sin_addr.s_addr = ifaces[if_idx].li_ipaddr;
2496 ksi->ksni_netmask = ifaces[if_idx].li_netmask;
2497 ni->ni_nid.nid_size = 0;
2498 ni->ni_nid.nid_addr[0] = sa->sin_addr.s_addr;
2500 strlcpy(ksi->ksni_name, ifaces[if_idx].li_name, sizeof(ksi->ksni_name));
2502 /* call it before add it to ksocknal_data.ksnd_nets */
2503 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2507 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2509 ksocknal_data.ksnd_nnets++;
2514 LIBCFS_FREE(net, sizeof(*net));
2516 if (ksocknal_data.ksnd_nnets == 0)
2517 ksocknal_base_shutdown();
2522 static void __exit ksocklnd_exit(void)
2524 lnet_unregister_lnd(&the_ksocklnd);
2527 static const struct lnet_lnd the_ksocklnd = {
2528 .lnd_type = SOCKLND,
2529 .lnd_startup = ksocknal_startup,
2530 .lnd_shutdown = ksocknal_shutdown,
2531 .lnd_ctl = ksocknal_ctl,
2532 .lnd_send = ksocknal_send,
2533 .lnd_recv = ksocknal_recv,
2534 .lnd_notify_peer_down = ksocknal_notify_gw_down,
2535 .lnd_accept = ksocknal_accept,
2536 .lnd_nl_set = ksocknal_nl_set,
2537 .lnd_keys = &ksocknal_tunables_keys,
2540 static int __init ksocklnd_init(void)
2544 /* check ksnr_connected/connecting field large enough */
2545 BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
2546 BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
2548 rc = ksocknal_tunables_init();
2552 lnet_register_lnd(&the_ksocklnd);
2557 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2558 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2559 MODULE_VERSION("2.8.0");
2560 MODULE_LICENSE("GPL");
2562 module_init(ksocklnd_init);
2563 module_exit(ksocklnd_exit);