1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Use is subject to license terms.
6 * Copyright (c) 2011, 2017, Intel Corporation.
9 /* This file is part of Lustre, http://www.lustre.org/
11 * Author: Zach Brown <zab@zabbo.net>
12 * Author: Peter J. Braam <braam@clusterfs.com>
13 * Author: Phil Schwan <phil@clusterfs.com>
14 * Author: Eric Barton <eric@bartonsoftware.com>
17 #include <linux/ethtool.h>
18 #include <linux/inetdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/sunrpc/addr.h>
21 #include <net/addrconf.h>
24 static const struct lnet_lnd the_ksocklnd;
25 struct ksock_nal_data ksocknal_data;
27 static int ksocknal_ip2index(struct sockaddr *addr, struct lnet_ni *ni,
30 struct net_device *dev;
32 DECLARE_CONST_IN_IFADDR(ifa);
36 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
40 for_each_netdev(ni->ni_net_ns, dev) {
41 int flags = dev_get_flags(dev);
42 struct in_device *in_dev;
44 if (flags & IFF_LOOPBACK) /* skip the loopback IF */
47 if (!(flags & IFF_UP))
50 switch (addr->sa_family) {
52 in_dev = __in_dev_get_rcu(dev);
56 in_dev_for_each_ifa_rcu(ifa, in_dev) {
58 ((struct sockaddr_in *)addr)->sin_addr.s_addr)
63 #if IS_ENABLED(CONFIG_IPV6)
65 struct inet6_dev *in6_dev;
66 const struct inet6_ifaddr *ifa6;
67 struct sockaddr_in6 *addr6 = (struct sockaddr_in6*)addr;
69 in6_dev = __in6_dev_get(dev);
73 list_for_each_entry_rcu(ifa6, &in6_dev->addr_list, if_list) {
74 if (ipv6_addr_cmp(&ifa6->addr,
75 &addr6->sin6_addr) == 0)
80 #endif /* IS_ENABLED(CONFIG_IPV6) */
91 ((dev->reg_state == NETREG_UNREGISTERING) ||
92 ((dev->operstate != IF_OPER_UP) &&
93 (dev->operstate != IF_OPER_UNKNOWN))) ||
94 (lnet_get_link_status(dev) == 0))
100 static struct ksock_conn_cb *
101 ksocknal_create_conn_cb(struct sockaddr *addr)
103 struct ksock_conn_cb *conn_cb;
105 LIBCFS_ALLOC(conn_cb, sizeof(*conn_cb));
109 refcount_set(&conn_cb->ksnr_refcount, 1);
110 conn_cb->ksnr_peer = NULL;
111 conn_cb->ksnr_retry_interval = 0; /* OK to connect at any time */
112 rpc_copy_addr((struct sockaddr *)&conn_cb->ksnr_addr, addr);
113 rpc_set_port((struct sockaddr *)&conn_cb->ksnr_addr,
115 conn_cb->ksnr_scheduled = 0;
116 conn_cb->ksnr_connecting = 0;
117 conn_cb->ksnr_connected = 0;
118 conn_cb->ksnr_deleted = 0;
119 conn_cb->ksnr_conn_count = 0;
120 conn_cb->ksnr_ctrl_conn_count = 0;
121 conn_cb->ksnr_blki_conn_count = 0;
122 conn_cb->ksnr_blko_conn_count = 0;
123 conn_cb->ksnr_max_conns = 0;
124 conn_cb->ksnr_busy_retry_count = 0;
130 ksocknal_destroy_conn_cb(struct ksock_conn_cb *conn_cb)
132 LASSERT(refcount_read(&conn_cb->ksnr_refcount) == 0);
134 if (conn_cb->ksnr_peer)
135 ksocknal_peer_decref(conn_cb->ksnr_peer);
137 LIBCFS_FREE(conn_cb, sizeof(*conn_cb));
140 static struct ksock_peer_ni *
141 ksocknal_create_peer(struct lnet_ni *ni, struct lnet_processid *id)
143 int cpt = lnet_nid2cpt(&id->nid, ni);
144 struct ksock_net *net = ni->ni_data;
145 struct ksock_peer_ni *peer_ni;
147 LASSERT(!LNET_NID_IS_ANY(&id->nid));
148 LASSERT(id->pid != LNET_PID_ANY);
149 LASSERT(!in_interrupt());
151 if (!atomic_inc_unless_negative(&net->ksnn_npeers)) {
152 CERROR("Can't create peer_ni: network shutdown\n");
153 return ERR_PTR(-ESHUTDOWN);
156 LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
158 atomic_dec(&net->ksnn_npeers);
159 return ERR_PTR(-ENOMEM);
162 peer_ni->ksnp_ni = ni;
163 peer_ni->ksnp_id = *id;
164 refcount_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
165 peer_ni->ksnp_closing = 0;
166 peer_ni->ksnp_accepting = 0;
167 peer_ni->ksnp_proto = NULL;
168 peer_ni->ksnp_last_alive = 0;
169 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
170 peer_ni->ksnp_conn_cb = NULL;
172 INIT_LIST_HEAD(&peer_ni->ksnp_conns);
173 INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
174 INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
175 spin_lock_init(&peer_ni->ksnp_lock);
181 ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
183 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
185 CDEBUG (D_NET, "peer_ni %s %p deleted\n",
186 libcfs_idstr(&peer_ni->ksnp_id), peer_ni);
188 LASSERT(refcount_read(&peer_ni->ksnp_refcount) == 0);
189 LASSERT(peer_ni->ksnp_accepting == 0);
190 LASSERT(list_empty(&peer_ni->ksnp_conns));
191 LASSERT(peer_ni->ksnp_conn_cb == NULL);
192 LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
193 LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
195 LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
197 /* NB a peer_ni's connections and conn_cb keep a reference on their
198 * peer_ni until they are destroyed, so we can be assured that _all_
199 * state to do with this peer_ni has been cleaned up when its refcount
202 if (atomic_dec_and_test(&net->ksnn_npeers))
203 wake_up_var(&net->ksnn_npeers);
206 struct ksock_peer_ni *
207 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_processid *id)
209 struct ksock_peer_ni *peer_ni;
210 unsigned long hash = nidhash(&id->nid);
212 hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni,
214 LASSERT(!peer_ni->ksnp_closing);
216 if (peer_ni->ksnp_ni != ni)
219 if (!nid_same(&peer_ni->ksnp_id.nid, &id->nid) ||
220 peer_ni->ksnp_id.pid != id->pid)
223 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
224 peer_ni, libcfs_idstr(id),
225 refcount_read(&peer_ni->ksnp_refcount));
231 struct ksock_peer_ni *
232 ksocknal_find_peer(struct lnet_ni *ni, struct lnet_processid *id)
234 struct ksock_peer_ni *peer_ni;
236 read_lock(&ksocknal_data.ksnd_global_lock);
237 peer_ni = ksocknal_find_peer_locked(ni, id);
238 if (peer_ni != NULL) /* +1 ref for caller? */
239 ksocknal_peer_addref(peer_ni);
240 read_unlock(&ksocknal_data.ksnd_global_lock);
246 ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
248 LASSERT(list_empty(&peer_ni->ksnp_conns));
249 LASSERT(peer_ni->ksnp_conn_cb == NULL);
250 LASSERT(!peer_ni->ksnp_closing);
251 peer_ni->ksnp_closing = 1;
252 hlist_del(&peer_ni->ksnp_list);
253 /* lose peerlist's ref */
254 ksocknal_peer_decref(peer_ni);
259 ksocknal_dump_peer_debug_info(struct ksock_peer_ni *peer_ni)
261 struct ksock_conn *conn;
262 struct list_head *ctmp;
263 struct list_head *txtmp;
267 list_for_each(ctmp, &peer_ni->ksnp_conns) {
268 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
270 if (!list_empty(&conn->ksnc_tx_queue))
271 list_for_each(txtmp, &conn->ksnc_tx_queue) txcount++;
273 CDEBUG(D_CONSOLE, "Conn %d [type, closing, crefcnt, srefcnt]: %d, %d, %d, %d\n",
277 refcount_read(&conn->ksnc_conn_refcount),
278 refcount_read(&conn->ksnc_sock_refcount));
279 CDEBUG(D_CONSOLE, "Conn %d rx [scheduled, ready, state]: %d, %d, %d\n",
281 conn->ksnc_rx_scheduled,
283 conn->ksnc_rx_state);
284 CDEBUG(D_CONSOLE, "Conn %d tx [txqcnt, scheduled, last_post, ready, deadline]: %d, %d, %lld, %d, %lld\n",
287 conn->ksnc_tx_scheduled,
288 conn->ksnc_tx_last_post,
290 conn->ksnc_rx_deadline);
292 if (conn->ksnc_scheduler)
293 CDEBUG(D_CONSOLE, "Conn %d sched [nconns, cpt]: %d, %d\n",
295 conn->ksnc_scheduler->kss_nconns,
296 conn->ksnc_scheduler->kss_cpt);
304 ksocknal_get_peer_info(struct lnet_ni *ni, int index,
305 struct lnet_processid *id, __u32 *myip, __u32 *peer_ip,
306 int *port, int *conn_count, int *share_count)
308 struct ksock_peer_ni *peer_ni;
309 struct ksock_conn_cb *conn_cb;
312 struct ksock_net *net;
314 read_lock(&ksocknal_data.ksnd_global_lock);
316 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
318 if (peer_ni->ksnp_ni != ni)
323 *id = peer_ni->ksnp_id;
324 conn_cb = peer_ni->ksnp_conn_cb;
325 if (conn_cb == NULL) {
333 ksocknal_dump_peer_debug_info(peer_ni);
335 if (conn_cb->ksnr_addr.ss_family == AF_INET) {
336 struct sockaddr_in *sa =
337 (void *)&conn_cb->ksnr_addr;
339 rc = choose_ipv4_src(myip,
340 net->ksnn_interface.ksni_index,
341 ntohl(sa->sin_addr.s_addr),
343 *peer_ip = ntohl(sa->sin_addr.s_addr);
344 *port = ntohs(sa->sin_port);
348 *peer_ip = 0xFFFFFFFF;
352 *conn_count = conn_cb->ksnr_conn_count;
357 read_unlock(&ksocknal_data.ksnd_global_lock);
362 ksocknal_get_conns_per_peer(struct ksock_peer_ni *peer_ni)
364 struct lnet_ni *ni = peer_ni->ksnp_ni;
365 struct lnet_ioctl_config_socklnd_tunables *tunables;
369 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_sock;
371 return tunables->lnd_conns_per_peer;
375 ksocknal_incr_conn_count(struct ksock_conn_cb *conn_cb,
378 conn_cb->ksnr_conn_count++;
380 /* check if all connections of the given type got created */
382 case SOCKLND_CONN_CONTROL:
383 conn_cb->ksnr_ctrl_conn_count++;
384 /* there's a single control connection per peer,
385 * two in case of loopback
387 conn_cb->ksnr_connected |= BIT(type);
389 case SOCKLND_CONN_BULK_IN:
390 conn_cb->ksnr_blki_conn_count++;
391 if (conn_cb->ksnr_blki_conn_count >= conn_cb->ksnr_max_conns)
392 conn_cb->ksnr_connected |= BIT(type);
394 case SOCKLND_CONN_BULK_OUT:
395 conn_cb->ksnr_blko_conn_count++;
396 if (conn_cb->ksnr_blko_conn_count >= conn_cb->ksnr_max_conns)
397 conn_cb->ksnr_connected |= BIT(type);
399 case SOCKLND_CONN_ANY:
400 if (conn_cb->ksnr_conn_count >= conn_cb->ksnr_max_conns)
401 conn_cb->ksnr_connected |= BIT(type);
408 CDEBUG(D_NET, "Add conn type %d, ksnr_connected %x ksnr_max_conns %d\n",
409 type, conn_cb->ksnr_connected, conn_cb->ksnr_max_conns);
414 ksocknal_decr_conn_count(struct ksock_conn_cb *conn_cb,
417 conn_cb->ksnr_conn_count--;
419 /* check if all connections of the given type got created */
421 case SOCKLND_CONN_CONTROL:
422 conn_cb->ksnr_ctrl_conn_count--;
423 /* there's a single control connection per peer,
424 * two in case of loopback
426 if (conn_cb->ksnr_ctrl_conn_count == 0)
427 conn_cb->ksnr_connected &= ~BIT(type);
429 case SOCKLND_CONN_BULK_IN:
430 conn_cb->ksnr_blki_conn_count--;
431 if (conn_cb->ksnr_blki_conn_count == 0)
432 conn_cb->ksnr_connected &= ~BIT(type);
434 case SOCKLND_CONN_BULK_OUT:
435 conn_cb->ksnr_blko_conn_count--;
436 if (conn_cb->ksnr_blko_conn_count == 0)
437 conn_cb->ksnr_connected &= ~BIT(type);
439 case SOCKLND_CONN_ANY:
440 if (conn_cb->ksnr_conn_count == 0)
441 conn_cb->ksnr_connected &= ~BIT(type);
448 CDEBUG(D_NET, "Del conn type %d, ksnr_connected %x ksnr_max_conns %d\n",
449 type, conn_cb->ksnr_connected, conn_cb->ksnr_max_conns);
453 ksocknal_associate_cb_conn_locked(struct ksock_conn_cb *conn_cb,
454 struct ksock_conn *conn)
456 int type = conn->ksnc_type;
458 conn->ksnc_conn_cb = conn_cb;
459 ksocknal_conn_cb_addref(conn_cb);
460 ksocknal_incr_conn_count(conn_cb, type);
462 /* Successful connection => further attempts can
463 * proceed immediately
465 conn_cb->ksnr_retry_interval = 0;
469 ksocknal_add_conn_cb_locked(struct ksock_peer_ni *peer_ni,
470 struct ksock_conn_cb *conn_cb)
472 struct ksock_conn *conn;
473 struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
475 LASSERT(!peer_ni->ksnp_closing);
476 LASSERT(!conn_cb->ksnr_peer);
477 LASSERT(!conn_cb->ksnr_scheduled);
478 LASSERT(!conn_cb->ksnr_connecting);
479 LASSERT(conn_cb->ksnr_connected == 0);
481 conn_cb->ksnr_peer = peer_ni;
482 ksocknal_peer_addref(peer_ni);
484 /* peer_ni's route list takes over my ref on 'route' */
485 peer_ni->ksnp_conn_cb = conn_cb;
486 net->ksnn_interface.ksni_nroutes++;
488 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
489 if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
490 (struct sockaddr *)&conn_cb->ksnr_addr))
492 CDEBUG(D_NET, "call ksocknal_associate_cb_conn_locked\n");
493 ksocknal_associate_cb_conn_locked(conn_cb, conn);
494 /* keep going (typed conns) */
499 ksocknal_del_conn_cb_locked(struct ksock_conn_cb *conn_cb)
501 struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer;
502 struct ksock_conn *conn;
503 struct ksock_conn *cnxt;
504 struct ksock_net *net;
506 LASSERT(!conn_cb->ksnr_deleted);
508 /* Close associated conns */
509 list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns, ksnc_list) {
510 if (conn->ksnc_conn_cb != conn_cb)
513 ksocknal_close_conn_locked(conn, 0);
516 net = (struct ksock_net *)(peer_ni->ksnp_ni->ni_data);
517 net->ksnn_interface.ksni_nroutes--;
518 LASSERT(net->ksnn_interface.ksni_nroutes >= 0);
520 conn_cb->ksnr_deleted = 1;
521 ksocknal_conn_cb_decref(conn_cb); /* drop peer_ni's ref */
522 peer_ni->ksnp_conn_cb = NULL;
524 if (list_empty(&peer_ni->ksnp_conns)) {
525 /* I've just removed the last route to a peer_ni with no active
528 ksocknal_unlink_peer_locked(peer_ni);
533 ksocknal_get_conn_count_by_type(struct ksock_conn_cb *conn_cb,
536 unsigned int count = 0;
539 case SOCKLND_CONN_CONTROL:
540 count = conn_cb->ksnr_ctrl_conn_count;
542 case SOCKLND_CONN_BULK_IN:
543 count = conn_cb->ksnr_blki_conn_count;
545 case SOCKLND_CONN_BULK_OUT:
546 count = conn_cb->ksnr_blko_conn_count;
548 case SOCKLND_CONN_ANY:
549 count = conn_cb->ksnr_conn_count;
560 ksocknal_add_peer(struct lnet_ni *ni, struct lnet_processid *id,
561 struct sockaddr *addr)
563 struct ksock_peer_ni *peer_ni;
564 struct ksock_peer_ni *peer2;
565 struct ksock_conn_cb *conn_cb;
567 if (LNET_NID_IS_ANY(&id->nid) ||
568 id->pid == LNET_PID_ANY)
571 /* Have a brand new peer_ni ready... */
572 peer_ni = ksocknal_create_peer(ni, id);
574 return PTR_ERR(peer_ni);
576 conn_cb = ksocknal_create_conn_cb(addr);
578 ksocknal_peer_decref(peer_ni);
582 write_lock_bh(&ksocknal_data.ksnd_global_lock);
584 /* always called with a ref on ni, so shutdown can't have started */
585 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers)
588 peer2 = ksocknal_find_peer_locked(ni, id);
590 ksocknal_peer_decref(peer_ni);
593 /* peer_ni table takes my ref on peer_ni */
594 hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list,
598 if (peer_ni->ksnp_conn_cb) {
599 ksocknal_conn_cb_decref(conn_cb);
601 /* Remember conns_per_peer setting at the time
602 * of connection initiation. It will define the
603 * max number of conns per type for this conn_cb
606 conn_cb->ksnr_max_conns = ksocknal_get_conns_per_peer(peer_ni);
607 ksocknal_add_conn_cb_locked(peer_ni, conn_cb);
610 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
616 ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni)
618 struct ksock_conn *conn;
619 struct ksock_conn *cnxt;
620 struct ksock_conn_cb *conn_cb;
622 LASSERT(!peer_ni->ksnp_closing);
624 /* Extra ref prevents peer_ni disappearing until I'm done with it */
625 ksocknal_peer_addref(peer_ni);
626 conn_cb = peer_ni->ksnp_conn_cb;
628 ksocknal_del_conn_cb_locked(conn_cb);
630 list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns,
632 ksocknal_close_conn_locked(conn, 0);
634 ksocknal_peer_decref(peer_ni);
635 /* NB peer_ni unlinks itself when last conn/conn_cb is removed */
639 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_processid *id)
642 struct hlist_node *pnxt;
643 struct ksock_peer_ni *peer_ni;
649 write_lock_bh(&ksocknal_data.ksnd_global_lock);
651 if (id && !LNET_NID_IS_ANY(&id->nid)) {
652 lo = hash_min(nidhash(&id->nid),
653 HASH_BITS(ksocknal_data.ksnd_peers));
657 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
660 for (i = lo; i <= hi; i++) {
661 hlist_for_each_entry_safe(peer_ni, pnxt,
662 &ksocknal_data.ksnd_peers[i],
664 if (peer_ni->ksnp_ni != ni)
667 if (!((!id || LNET_NID_IS_ANY(&id->nid) ||
668 nid_same(&peer_ni->ksnp_id.nid, &id->nid)) &&
669 (!id || id->pid == LNET_PID_ANY ||
670 peer_ni->ksnp_id.pid == id->pid)))
673 ksocknal_peer_addref(peer_ni); /* a ref for me... */
675 ksocknal_del_peer_locked(peer_ni);
677 if (peer_ni->ksnp_closing &&
678 !list_empty(&peer_ni->ksnp_tx_queue)) {
679 LASSERT(list_empty(&peer_ni->ksnp_conns));
680 LASSERT(peer_ni->ksnp_conn_cb == NULL);
682 list_splice_init(&peer_ni->ksnp_tx_queue,
686 ksocknal_peer_decref(peer_ni); /* ...till here */
688 rc = 0; /* matched! */
692 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
694 ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
699 static struct ksock_conn *
700 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
702 struct ksock_peer_ni *peer_ni;
703 struct ksock_conn *conn;
706 read_lock(&ksocknal_data.ksnd_global_lock);
708 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
709 LASSERT(!peer_ni->ksnp_closing);
711 if (peer_ni->ksnp_ni != ni)
714 list_for_each_entry(conn, &peer_ni->ksnp_conns,
719 ksocknal_conn_addref(conn);
720 read_unlock(&ksocknal_data.ksnd_global_lock);
725 read_unlock(&ksocknal_data.ksnd_global_lock);
729 static struct ksock_sched *
730 ksocknal_choose_scheduler_locked(unsigned int cpt)
732 struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
735 if (sched->kss_nthreads == 0) {
736 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
737 if (sched->kss_nthreads > 0) {
738 CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
739 cpt, sched->kss_cpt);
750 ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
752 struct ksock_connreq *cr;
754 struct sockaddr_storage peer;
756 rc = lnet_sock_getaddr(sock, true, &peer);
758 CERROR("Can't determine new connection's address\n");
762 LIBCFS_ALLOC(cr, sizeof(*cr));
764 LCONSOLE_ERROR("Dropping connection request from %pISc: memory exhausted\n",
771 cr->ksncr_sock = sock;
773 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
775 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
776 wake_up(&ksocknal_data.ksnd_connd_waitq);
778 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
782 static const struct ln_key_list ksocknal_tunables_keys = {
783 .lkl_maxattr = LNET_NET_SOCKLND_TUNABLES_ATTR_MAX,
785 [LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER] = {
786 .lkp_value = "conns_per_peer",
787 .lkp_data_type = NLA_U16
789 [LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TIMEOUT] = {
790 .lkp_value = "timeout",
791 .lkp_data_type = NLA_U32
793 [LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TOS] = {
795 .lkp_data_type = NLA_S16,
801 ksocknal_nl_get(int cmd, struct sk_buff *msg, int type, void *data)
803 struct lnet_lnd_tunables *tun;
804 struct lnet_ni *ni = data;
809 if (cmd != LNET_CMD_NETS || type != LNET_NET_LOCAL_NI_ATTR_LND_TUNABLES)
812 tun = &ni->ni_lnd_tunables;
813 nla_put_u16(msg, LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER,
814 tun->lnd_tun_u.lnd_sock.lnd_conns_per_peer);
815 nla_put_u32(msg, LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TIMEOUT,
817 nla_put_s16(msg, LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TOS,
818 tun->lnd_tun_u.lnd_sock.lnd_tos);
824 ksocknal_nl_set_default(int cmd, int type, void *data)
826 struct lnet_lnd_tunables *tunables = data;
827 struct lnet_ioctl_config_socklnd_tunables *lt;
828 struct lnet_ioctl_config_socklnd_tunables *df;
830 lt = &tunables->lnd_tun_u.lnd_sock;
831 df = &ksock_default_tunables;
833 case LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER:
834 lt->lnd_conns_per_peer = df->lnd_conns_per_peer;
836 case LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TIMEOUT:
837 lt->lnd_timeout = df->lnd_timeout;
845 ksocknal_nl_set(int cmd, struct nlattr *attr, int type, void *data)
847 struct lnet_lnd_tunables *tunables = data;
851 if (cmd != LNET_CMD_NETS)
855 ksocknal_nl_set_default(cmd, type, data);
859 if (nla_type(attr) != LN_SCALAR_ATTR_INT_VALUE)
863 case LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER:
864 /* value values are 1 to 127. Zero mean calculate the value */
865 num = nla_get_s64(attr);
866 if (num > -1 && num < 128)
867 tunables->lnd_tun_u.lnd_sock.lnd_conns_per_peer = num;
871 case LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TIMEOUT:
872 num = nla_get_s64(attr);
873 tunables->lnd_tun_u.lnd_sock.lnd_timeout = num;
875 case LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TOS:
876 num = nla_get_s64(attr);
877 clamp_t(s64, num, -1, 0xff);
878 tunables->lnd_tun_u.lnd_sock.lnd_tos = num;
888 ksocknal_connecting(struct ksock_conn_cb *conn_cb, struct sockaddr *sa)
891 rpc_cmp_addr((struct sockaddr *)&conn_cb->ksnr_addr, sa))
892 return conn_cb->ksnr_connecting;
897 ksocknal_create_conn(struct lnet_ni *ni, struct ksock_conn_cb *conn_cb,
898 struct socket *sock, int type)
900 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
902 struct lnet_processid peerid;
904 struct ksock_conn *conn;
905 struct ksock_conn *conn2;
906 struct ksock_peer_ni *peer_ni = NULL;
907 struct ksock_peer_ni *peer2;
908 struct ksock_sched *sched;
909 struct ksock_hello_msg *hello;
912 struct ksock_tx *txtmp;
919 active = (conn_cb != NULL);
921 LASSERT(active == (type != SOCKLND_CONN_NONE));
923 LIBCFS_ALLOC(conn, sizeof(*conn));
929 conn->ksnc_peer = NULL;
930 conn->ksnc_conn_cb = NULL;
931 conn->ksnc_sock = sock;
932 /* 2 ref, 1 for conn, another extra ref prevents socket
933 * being closed before establishment of connection */
934 refcount_set(&conn->ksnc_sock_refcount, 2);
935 conn->ksnc_type = type;
936 ksocknal_lib_save_callback(sock, conn);
937 refcount_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
939 conn->ksnc_rx_ready = 0;
940 conn->ksnc_rx_scheduled = 0;
942 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
943 conn->ksnc_tx_ready = 0;
944 conn->ksnc_tx_scheduled = 0;
945 conn->ksnc_tx_carrier = NULL;
946 atomic_set (&conn->ksnc_tx_nob, 0);
948 LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
949 kshm_ips[LNET_INTERFACES_NUM]));
955 /* stash conn's local and remote addrs */
956 rc = ksocknal_lib_get_conn_addrs(conn);
960 /* Find out/confirm peer_ni's NID and connection type and get the
961 * vector of interfaces she's willing to let me connect to.
962 * Passive connections use the listener timeout since the peer_ni sends
966 struct sockaddr_in *psa = (void *)&conn->ksnc_peeraddr;
968 peer_ni = conn_cb->ksnr_peer;
969 LASSERT(ni == peer_ni->ksnp_ni);
971 /* Active connection sends HELLO eagerly */
972 hello->kshm_nips = 0;
973 peerid = peer_ni->ksnp_id;
975 write_lock_bh(global_lock);
976 conn->ksnc_proto = peer_ni->ksnp_proto;
977 write_unlock_bh(global_lock);
979 if (conn->ksnc_proto == NULL) {
980 if (psa->sin_family == AF_INET6)
981 conn->ksnc_proto = &ksocknal_protocol_v4x;
982 else if (psa->sin_family == AF_INET)
983 conn->ksnc_proto = &ksocknal_protocol_v3x;
984 #if SOCKNAL_VERSION_DEBUG
985 if (*ksocknal_tunables.ksnd_protocol == 2)
986 conn->ksnc_proto = &ksocknal_protocol_v2x;
987 else if (*ksocknal_tunables.ksnd_protocol == 1)
988 conn->ksnc_proto = &ksocknal_protocol_v1x;
991 if (!conn->ksnc_proto) {
996 rc = ksocknal_send_hello(ni, conn, &peerid.nid, hello);
1000 peerid.nid = LNET_ANY_NID;
1001 peerid.pid = LNET_PID_ANY;
1003 /* Passive, get protocol from peer_ni */
1004 conn->ksnc_proto = NULL;
1007 rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
1011 LASSERT(rc == 0 || active);
1012 LASSERT(conn->ksnc_proto != NULL);
1013 LASSERT(!LNET_NID_IS_ANY(&peerid.nid));
1015 cpt = lnet_nid2cpt(&peerid.nid, ni);
1018 ksocknal_peer_addref(peer_ni);
1019 write_lock_bh(global_lock);
1021 peer_ni = ksocknal_create_peer(ni, &peerid);
1022 if (IS_ERR(peer_ni)) {
1023 rc = PTR_ERR(peer_ni);
1027 write_lock_bh(global_lock);
1029 /* called with a ref on ni, so shutdown can't have started */
1030 LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers) >= 0);
1032 peer2 = ksocknal_find_peer_locked(ni, &peerid);
1033 if (peer2 == NULL) {
1034 /* NB this puts an "empty" peer_ni in the peer_ni
1035 * table (which takes my ref) */
1036 hash_add(ksocknal_data.ksnd_peers,
1037 &peer_ni->ksnp_list, nidhash(&peerid.nid));
1039 ksocknal_peer_decref(peer_ni);
1044 ksocknal_peer_addref(peer_ni);
1045 peer_ni->ksnp_accepting++;
1047 /* Am I already connecting to this guy? Resolve in
1048 * favour of higher NID...
1050 if (memcmp(&peerid.nid, &ni->ni_nid, sizeof(peerid.nid)) < 0 &&
1051 ksocknal_connecting(peer_ni->ksnp_conn_cb,
1052 ((struct sockaddr *) &conn->ksnc_peeraddr))) {
1054 warn = "connection race resolution";
1059 if (peer_ni->ksnp_closing ||
1060 (active && conn_cb->ksnr_deleted)) {
1061 /* peer_ni/conn_cb got closed under me */
1063 warn = "peer_ni/conn_cb removed";
1067 if (peer_ni->ksnp_proto == NULL) {
1068 /* Never connected before.
1069 * NB recv_hello may have returned EPROTO to signal my peer_ni
1070 * wants a different protocol than the one I asked for.
1072 LASSERT(list_empty(&peer_ni->ksnp_conns));
1074 peer_ni->ksnp_proto = conn->ksnc_proto;
1075 peer_ni->ksnp_incarnation = incarnation;
1078 if (peer_ni->ksnp_proto != conn->ksnc_proto ||
1079 peer_ni->ksnp_incarnation != incarnation) {
1080 /* peer_ni rebooted or I've got the wrong protocol version */
1081 ksocknal_close_peer_conns_locked(peer_ni, NULL, 0);
1083 peer_ni->ksnp_proto = NULL;
1085 warn = peer_ni->ksnp_incarnation != incarnation ?
1086 "peer_ni rebooted" :
1087 "wrong proto version";
1097 warn = "lost conn race";
1100 warn = "retry with different protocol version";
1104 /* Refuse to duplicate an existing connection, unless this is a
1105 * loopback connection */
1106 if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
1107 (struct sockaddr *)&conn->ksnc_myaddr)) {
1108 list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
1110 (struct sockaddr *)&conn2->ksnc_peeraddr,
1111 (struct sockaddr *)&conn->ksnc_peeraddr) ||
1113 (struct sockaddr *)&conn2->ksnc_myaddr,
1114 (struct sockaddr *)&conn->ksnc_myaddr) ||
1115 conn2->ksnc_type != conn->ksnc_type)
1119 /* If max conns per type is not registered in conn_cb
1120 * as ksnr_max_conns, use ni's conns_per_peer
1122 if ((peer_ni->ksnp_conn_cb &&
1123 num_dup < peer_ni->ksnp_conn_cb->ksnr_max_conns) ||
1124 (!peer_ni->ksnp_conn_cb &&
1125 num_dup < ksocknal_get_conns_per_peer(peer_ni)))
1128 /* Reply on a passive connection attempt so the peer_ni
1129 * realises we're connected.
1139 /* If the connection created by this route didn't bind to the IP
1140 * address the route connected to, the connection/route matching
1141 * code below probably isn't going to work.
1144 !rpc_cmp_addr((struct sockaddr *)&conn_cb->ksnr_addr,
1145 (struct sockaddr *)&conn->ksnc_peeraddr)) {
1146 CERROR("Route %s %pISc connected to %pISc\n",
1147 libcfs_idstr(&peer_ni->ksnp_id),
1148 &conn_cb->ksnr_addr,
1149 &conn->ksnc_peeraddr);
1152 /* Search for a conn_cb corresponding to the new connection and
1153 * create an association. This allows incoming connections created
1154 * by conn_cbs in my peer_ni to match my own conn_cb entries so I don't
1155 * continually create duplicate conn_cbs.
1157 conn_cb = peer_ni->ksnp_conn_cb;
1159 if (conn_cb && rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
1160 (struct sockaddr *)&conn_cb->ksnr_addr))
1161 ksocknal_associate_cb_conn_locked(conn_cb, conn);
1163 conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
1164 peer_ni->ksnp_last_alive = ktime_get_seconds();
1165 peer_ni->ksnp_send_keepalive = 0;
1166 peer_ni->ksnp_error = 0;
1168 sched = ksocknal_choose_scheduler_locked(cpt);
1170 CERROR("no schedulers available. node is unhealthy\n");
1174 * The cpt might have changed if we ended up selecting a non cpt
1175 * native scheduler. So use the scheduler's cpt instead.
1177 cpt = sched->kss_cpt;
1178 sched->kss_nconns++;
1179 conn->ksnc_scheduler = sched;
1181 conn->ksnc_tx_last_post = ktime_get_seconds();
1182 /* Set the deadline for the outgoing HELLO to drain */
1183 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1184 conn->ksnc_tx_deadline = ktime_get_seconds() +
1186 smp_mb(); /* order with adding to peer_ni's conn list */
1188 list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
1189 ksocknal_conn_addref(conn);
1191 ksocknal_new_packet(conn, 0);
1193 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1195 /* Take packets blocking for this connection. */
1196 list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
1197 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
1201 list_del(&tx->tx_list);
1202 ksocknal_queue_tx_locked(tx, conn);
1205 write_unlock_bh(global_lock);
1206 /* We've now got a new connection. Any errors from here on are just
1207 * like "normal" comms errors and we close the connection normally.
1208 * NB (a) we still have to send the reply HELLO for passive
1210 * (b) normal I/O on the conn is blocked until I setup and call the
1214 CDEBUG(D_NET, "New conn %s p %d.x %pISc -> %pIScp"
1215 " incarnation:%lld sched[%d]\n",
1216 libcfs_idstr(&peerid), conn->ksnc_proto->pro_version,
1217 &conn->ksnc_myaddr, &conn->ksnc_peeraddr,
1221 hello->kshm_nips = 0;
1222 rc = ksocknal_send_hello(ni, conn, &peerid.nid, hello);
1225 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1226 kshm_ips[LNET_INTERFACES_NUM]));
1228 /* setup the socket AFTER I've received hello (it disables
1229 * SO_LINGER). I might call back to the acceptor who may want
1230 * to send a protocol version response and then close the
1231 * socket; this ensures the socket only tears down after the
1232 * response has been sent.
1235 rc = ksocknal_lib_setup_sock(sock, ni);
1237 write_lock_bh(global_lock);
1239 /* NB my callbacks block while I hold ksnd_global_lock */
1240 ksocknal_lib_set_callback(sock, conn);
1243 peer_ni->ksnp_accepting--;
1245 write_unlock_bh(global_lock);
1248 write_lock_bh(global_lock);
1249 if (!conn->ksnc_closing) {
1250 /* could be closed by another thread */
1251 ksocknal_close_conn_locked(conn, rc);
1253 write_unlock_bh(global_lock);
1254 } else if (ksocknal_connsock_addref(conn) == 0) {
1255 /* Allow I/O to proceed. */
1256 ksocknal_read_callback(conn);
1257 ksocknal_write_callback(conn);
1258 ksocknal_connsock_decref(conn);
1261 ksocknal_connsock_decref(conn);
1262 ksocknal_conn_decref(conn);
1267 if (!peer_ni->ksnp_closing &&
1268 list_empty(&peer_ni->ksnp_conns) &&
1269 peer_ni->ksnp_conn_cb == NULL) {
1270 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
1271 ksocknal_unlink_peer_locked(peer_ni);
1274 write_unlock_bh(global_lock);
1278 CERROR("Not creating conn %s type %d: %s\n",
1279 libcfs_idstr(&peerid), conn->ksnc_type, warn);
1281 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1282 libcfs_idstr(&peerid), conn->ksnc_type, warn);
1287 /* Request retry by replying with CONN_NONE
1288 * ksnc_proto has been set already
1290 conn->ksnc_type = SOCKLND_CONN_NONE;
1291 hello->kshm_nips = 0;
1292 ksocknal_send_hello(ni, conn, &peerid.nid, hello);
1295 write_lock_bh(global_lock);
1296 peer_ni->ksnp_accepting--;
1297 write_unlock_bh(global_lock);
1301 * If we get here without an error code, just use -EALREADY.
1302 * Depending on how we got here, the error may be positive
1303 * or negative. Normalize the value for ksocknal_txlist_done().
1305 rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
1306 ksocknal_txlist_done(ni, &zombies, rc2);
1307 ksocknal_peer_decref(peer_ni);
1311 LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
1312 kshm_ips[LNET_INTERFACES_NUM]));
1314 LIBCFS_FREE(conn, sizeof(*conn));
1323 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1325 /* This just does the immmediate housekeeping, and queues the
1326 * connection for the reaper to terminate.
1327 * Caller holds ksnd_global_lock exclusively in irq context */
1328 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1329 struct ksock_conn_cb *conn_cb;
1330 struct ksock_conn *conn2;
1332 int duplicate_count = 0;
1334 LASSERT(peer_ni->ksnp_error == 0);
1335 LASSERT(!conn->ksnc_closing);
1336 conn->ksnc_closing = 1;
1338 /* ksnd_deathrow_conns takes over peer_ni's ref */
1339 list_del(&conn->ksnc_list);
1341 conn_cb = conn->ksnc_conn_cb;
1342 if (conn_cb != NULL) {
1343 /* dissociate conn from cb... */
1344 LASSERT(!conn_cb->ksnr_deleted);
1346 conn_count = ksocknal_get_conn_count_by_type(conn_cb,
1348 /* connected bit is set only if all connections
1349 * of the given type got created
1351 if (conn_count == conn_cb->ksnr_max_conns)
1352 LASSERT((conn_cb->ksnr_connected &
1353 BIT(conn->ksnc_type)) != 0);
1355 if (conn_count == 1) {
1356 list_for_each_entry(conn2, &peer_ni->ksnp_conns,
1358 if (conn2->ksnc_conn_cb == conn_cb &&
1359 conn2->ksnc_type == conn->ksnc_type)
1360 duplicate_count += 1;
1362 if (duplicate_count > 0)
1363 CERROR("Found %d duplicate conns type %d\n",
1367 ksocknal_decr_conn_count(conn_cb, conn->ksnc_type);
1369 conn->ksnc_conn_cb = NULL;
1371 /* drop conn's ref on conn_cb */
1372 ksocknal_conn_cb_decref(conn_cb);
1375 if (list_empty(&peer_ni->ksnp_conns)) {
1376 /* No more connections to this peer_ni */
1378 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
1379 struct ksock_tx *tx;
1381 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1383 /* throw them to the last connection...,
1384 * these TXs will be send to /dev/null by scheduler */
1385 list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
1387 ksocknal_tx_prep(conn, tx);
1389 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1390 list_splice_init(&peer_ni->ksnp_tx_queue,
1391 &conn->ksnc_tx_queue);
1392 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1395 /* renegotiate protocol version */
1396 peer_ni->ksnp_proto = NULL;
1397 /* stash last conn close reason */
1398 peer_ni->ksnp_error = error;
1400 if (peer_ni->ksnp_conn_cb == NULL) {
1401 /* I've just closed last conn belonging to a
1402 * peer_ni with no connections to it
1404 ksocknal_unlink_peer_locked(peer_ni);
1408 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1410 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
1411 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1413 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1417 ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
1419 bool notify = false;
1420 time64_t last_alive = 0;
1422 /* There has been a connection failure or comms error; but I'll only
1423 * tell LNET I think the peer_ni is dead if it's to another kernel and
1424 * there are no connections or connection attempts in existence. */
1426 read_lock(&ksocknal_data.ksnd_global_lock);
1428 if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1429 list_empty(&peer_ni->ksnp_conns) &&
1430 peer_ni->ksnp_accepting == 0 &&
1431 !ksocknal_find_connecting_conn_cb_locked(peer_ni)) {
1433 last_alive = peer_ni->ksnp_last_alive;
1436 read_unlock(&ksocknal_data.ksnd_global_lock);
1439 lnet_notify(peer_ni->ksnp_ni,
1440 &peer_ni->ksnp_id.nid,
1441 false, false, last_alive);
1445 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1447 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1448 struct ksock_tx *tx;
1449 struct ksock_tx *tmp;
1452 /* NB safe to finalize TXs because closing of socket will
1453 * abort all buffered data */
1454 LASSERT(conn->ksnc_sock == NULL);
1456 spin_lock(&peer_ni->ksnp_lock);
1458 list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list,
1460 if (tx->tx_conn != conn)
1463 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1465 tx->tx_msg.ksm_zc_cookies[0] = 0;
1466 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1467 list_move(&tx->tx_zc_list, &zlist);
1470 spin_unlock(&peer_ni->ksnp_lock);
1472 while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
1473 tx_zc_list)) != NULL) {
1474 list_del(&tx->tx_zc_list);
1475 ksocknal_tx_decref(tx);
1480 ksocknal_terminate_conn(struct ksock_conn *conn)
1482 /* This gets called by the reaper (guaranteed thread context) to
1483 * disengage the socket from its callbacks and close it.
1484 * ksnc_refcount will eventually hit zero, and then the reaper will
1487 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1488 struct ksock_sched *sched = conn->ksnc_scheduler;
1489 bool failed = false;
1491 LASSERT(conn->ksnc_closing);
1493 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1494 spin_lock_bh(&sched->kss_lock);
1496 /* a closing conn is always ready to tx */
1497 conn->ksnc_tx_ready = 1;
1499 if (!conn->ksnc_tx_scheduled &&
1500 !list_empty(&conn->ksnc_tx_queue)) {
1501 list_add_tail(&conn->ksnc_tx_list,
1502 &sched->kss_tx_conns);
1503 conn->ksnc_tx_scheduled = 1;
1504 /* extra ref for scheduler */
1505 ksocknal_conn_addref(conn);
1507 wake_up(&sched->kss_waitq);
1510 spin_unlock_bh(&sched->kss_lock);
1512 /* serialise with callbacks */
1513 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1515 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1517 /* OK, so this conn may not be completely disengaged from its
1518 * scheduler yet, but it _has_ committed to terminate...
1520 conn->ksnc_scheduler->kss_nconns--;
1522 if (peer_ni->ksnp_error != 0) {
1523 /* peer_ni's last conn closed in error */
1524 LASSERT(list_empty(&peer_ni->ksnp_conns));
1526 peer_ni->ksnp_error = 0; /* avoid multiple notifications */
1529 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1532 ksocknal_peer_failed(peer_ni);
1534 /* The socket is closed on the final put; either here, or in
1535 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1536 * when the connection was established, this will close the socket
1537 * immediately, aborting anything buffered in it. Any hung
1538 * zero-copy transmits will therefore complete in finite time.
1540 ksocknal_connsock_decref(conn);
1544 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1546 /* Queue the conn for the reaper to destroy */
1547 LASSERT(refcount_read(&conn->ksnc_conn_refcount) == 0);
1548 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1550 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1551 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1553 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1557 ksocknal_destroy_conn(struct ksock_conn *conn)
1561 /* Final coup-de-grace of the reaper */
1562 CDEBUG(D_NET, "connection %p\n", conn);
1564 LASSERT(refcount_read(&conn->ksnc_conn_refcount) == 0);
1565 LASSERT(refcount_read(&conn->ksnc_sock_refcount) == 0);
1566 LASSERT(conn->ksnc_sock == NULL);
1567 LASSERT(conn->ksnc_conn_cb == NULL);
1568 LASSERT(!conn->ksnc_tx_scheduled);
1569 LASSERT(!conn->ksnc_rx_scheduled);
1570 LASSERT(list_empty(&conn->ksnc_tx_queue));
1572 /* complete current receive if any */
1573 switch (conn->ksnc_rx_state) {
1574 case SOCKNAL_RX_LNET_PAYLOAD:
1575 last_rcv = conn->ksnc_rx_deadline -
1577 CERROR("Completing partial receive from %s[%d], ip %pIScp, with error, wanted: %d, left: %d, last alive is %lld secs ago\n",
1578 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1580 &conn->ksnc_peeraddr,
1581 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1582 ktime_get_seconds() - last_rcv);
1583 if (conn->ksnc_lnet_msg)
1584 conn->ksnc_lnet_msg->msg_health_status =
1585 LNET_MSG_STATUS_REMOTE_ERROR;
1586 lnet_finalize(conn->ksnc_lnet_msg, -EIO);
1588 case SOCKNAL_RX_LNET_HEADER:
1589 if (conn->ksnc_rx_started)
1590 CERROR("Incomplete receive of lnet header from %s, ip %pIScp, with error, protocol: %d.x.\n",
1591 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1592 &conn->ksnc_peeraddr,
1593 conn->ksnc_proto->pro_version);
1595 case SOCKNAL_RX_KSM_HEADER:
1596 if (conn->ksnc_rx_started)
1597 CERROR("Incomplete receive of ksock message from %s, ip %pIScp, with error, protocol: %d.x.\n",
1598 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1599 &conn->ksnc_peeraddr,
1600 conn->ksnc_proto->pro_version);
1602 case SOCKNAL_RX_SLOP:
1603 if (conn->ksnc_rx_started)
1604 CERROR("Incomplete receive of slops from %s, ip %pIScp, with error\n",
1605 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1606 &conn->ksnc_peeraddr);
1613 ksocknal_peer_decref(conn->ksnc_peer);
1615 LIBCFS_FREE(conn, sizeof(*conn));
1619 ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni,
1620 struct sockaddr *addr, int why)
1622 struct ksock_conn *conn;
1623 struct ksock_conn *cnxt;
1626 list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns, ksnc_list) {
1629 (struct sockaddr *)&conn->ksnc_peeraddr)) {
1631 ksocknal_close_conn_locked(conn, why);
1639 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1641 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
1644 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1646 count = ksocknal_close_peer_conns_locked(
1647 peer_ni, (struct sockaddr *)&conn->ksnc_peeraddr, why);
1649 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1655 ksocknal_close_matching_conns(struct lnet_processid *id, __u32 ipaddr)
1657 struct ksock_peer_ni *peer_ni;
1658 struct hlist_node *pnxt;
1663 struct sockaddr_in sa = {.sin_family = AF_INET};
1665 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1667 if (!LNET_NID_IS_ANY(&id->nid)) {
1668 lo = hash_min(nidhash(&id->nid),
1669 HASH_BITS(ksocknal_data.ksnd_peers));
1673 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1676 sa.sin_addr.s_addr = htonl(ipaddr);
1677 for (i = lo; i <= hi; i++) {
1678 hlist_for_each_entry_safe(peer_ni, pnxt,
1679 &ksocknal_data.ksnd_peers[i],
1682 if (!((LNET_NID_IS_ANY(&id->nid) ||
1683 nid_same(&id->nid, &peer_ni->ksnp_id.nid)) &&
1684 (id->pid == LNET_PID_ANY ||
1685 id->pid == peer_ni->ksnp_id.pid)))
1688 count += ksocknal_close_peer_conns_locked(
1690 ipaddr ? (struct sockaddr *)&sa : NULL, 0);
1694 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1696 /* wildcards always succeed */
1697 if (LNET_NID_IS_ANY(&id->nid) || id->pid == LNET_PID_ANY ||
1701 return (count == 0 ? -ENOENT : 0);
1705 ksocknal_notify_gw_down(struct lnet_nid *gw_nid)
1707 /* The router is telling me she's been notified of a change in
1710 struct lnet_processid id = {
1711 .pid = LNET_PID_ANY,
1715 CDEBUG(D_NET, "gw %s down\n", libcfs_nidstr(gw_nid));
1717 /* If the gateway crashed, close all open connections... */
1718 ksocknal_close_matching_conns(&id, 0);
1721 /* We can only establish new connections
1722 * if we have autroutes, and these connect on demand.
1727 ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
1731 struct ksock_conn *conn;
1733 for (index = 0; ; index++) {
1734 read_lock(&ksocknal_data.ksnd_global_lock);
1739 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
1741 ksocknal_conn_addref(conn);
1746 read_unlock(&ksocknal_data.ksnd_global_lock);
1751 ksocknal_lib_push_conn (conn);
1752 ksocknal_conn_decref(conn);
1757 ksocknal_push(struct lnet_ni *ni, struct lnet_processid *id)
1764 if (!LNET_NID_IS_ANY(&id->nid)) {
1765 lo = hash_min(nidhash(&id->nid),
1766 HASH_BITS(ksocknal_data.ksnd_peers));
1770 hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
1773 for (bkt = lo; bkt <= hi; bkt++) {
1774 int peer_off; /* searching offset in peer_ni hash table */
1776 for (peer_off = 0; ; peer_off++) {
1777 struct ksock_peer_ni *peer_ni;
1780 read_lock(&ksocknal_data.ksnd_global_lock);
1781 hlist_for_each_entry(peer_ni,
1782 &ksocknal_data.ksnd_peers[bkt],
1784 if (!((LNET_NID_IS_ANY(&id->nid) ||
1786 &peer_ni->ksnp_id.nid)) &&
1787 (id->pid == LNET_PID_ANY ||
1788 id->pid == peer_ni->ksnp_id.pid)))
1791 if (i++ == peer_off) {
1792 ksocknal_peer_addref(peer_ni);
1796 read_unlock(&ksocknal_data.ksnd_global_lock);
1798 if (i <= peer_off) /* no match */
1802 ksocknal_push_peer(peer_ni);
1803 ksocknal_peer_decref(peer_ni);
1810 ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1812 struct lnet_processid id = {};
1813 struct libcfs_ioctl_data *data = arg;
1817 case IOC_LIBCFS_GET_INTERFACE: {
1818 struct ksock_net *net = ni->ni_data;
1819 struct ksock_interface *iface;
1820 struct sockaddr_in *sa;
1822 read_lock(&ksocknal_data.ksnd_global_lock);
1824 if (data->ioc_count >= 1) {
1828 iface = &net->ksnn_interface;
1830 sa = (void *)&iface->ksni_addr;
1831 if (sa->sin_family == AF_INET) {
1832 data->ioc_u32[0] = ntohl(sa->sin_addr.s_addr);
1833 data->ioc_u32[1] = iface->ksni_netmask;
1835 data->ioc_u32[0] = 0xFFFFFFFF;
1836 data->ioc_u32[1] = 0;
1838 data->ioc_u32[2] = iface->ksni_npeers;
1839 data->ioc_u32[3] = iface->ksni_nroutes;
1842 read_unlock(&ksocknal_data.ksnd_global_lock);
1846 case IOC_LIBCFS_GET_PEER: {
1851 int share_count = 0;
1853 rc = ksocknal_get_peer_info(ni, data->ioc_count,
1854 &id, &myip, &ip, &port,
1855 &conn_count, &share_count);
1859 if (!nid_is_nid4(&id.nid))
1861 data->ioc_nid = lnet_nid_to_nid4(&id.nid);
1862 data->ioc_count = share_count;
1863 data->ioc_u32[0] = ip;
1864 data->ioc_u32[1] = port;
1865 data->ioc_u32[2] = myip;
1866 data->ioc_u32[3] = conn_count;
1867 data->ioc_u32[4] = id.pid;
1871 case IOC_LIBCFS_ADD_PEER: {
1872 struct sockaddr_in sa = {.sin_family = AF_INET};
1874 id.pid = LNET_PID_LUSTRE;
1875 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1876 sa.sin_addr.s_addr = htonl(data->ioc_u32[0]);
1877 sa.sin_port = htons(data->ioc_u32[1]);
1878 return ksocknal_add_peer(ni, &id, (struct sockaddr *)&sa);
1880 case IOC_LIBCFS_DEL_PEER:
1881 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1882 id.pid = LNET_PID_ANY;
1883 return ksocknal_del_peer(ni, &id);
1885 case IOC_LIBCFS_GET_CONN: {
1889 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
1890 struct sockaddr_in *psa = (void *)&conn->ksnc_peeraddr;
1891 struct sockaddr_in *mysa = (void *)&conn->ksnc_myaddr;
1896 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
1898 data->ioc_count = txmem;
1899 data->ioc_nid = lnet_nid_to_nid4(&conn->ksnc_peer->ksnp_id.nid);
1900 data->ioc_flags = nagle;
1901 if (psa->sin_family == AF_INET)
1902 data->ioc_u32[0] = ntohl(psa->sin_addr.s_addr);
1904 data->ioc_u32[0] = 0xFFFFFFFF;
1905 data->ioc_u32[1] = rpc_get_port((struct sockaddr *)
1906 &conn->ksnc_peeraddr);
1907 if (mysa->sin_family == AF_INET)
1908 data->ioc_u32[2] = ntohl(mysa->sin_addr.s_addr);
1910 data->ioc_u32[2] = 0xFFFFFFFF;
1911 data->ioc_u32[3] = conn->ksnc_type;
1912 data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
1913 data->ioc_u32[5] = rxmem;
1914 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
1915 ksocknal_conn_decref(conn);
1919 case IOC_LIBCFS_CLOSE_CONNECTION:
1920 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1921 id.pid = LNET_PID_ANY;
1922 return ksocknal_close_matching_conns(&id,
1925 case IOC_LIBCFS_REGISTER_MYNID:
1926 /* Ignore if this is a noop */
1927 if (nid_is_nid4(&ni->ni_nid) &&
1928 data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid))
1931 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1932 libcfs_nid2str(data->ioc_nid),
1933 libcfs_nidstr(&ni->ni_nid));
1936 case IOC_LIBCFS_PUSH_CONNECTION:
1937 lnet_nid4_to_nid(data->ioc_nid, &id.nid);
1938 id.pid = LNET_PID_ANY;
1939 return ksocknal_push(ni, &id);
1948 ksocknal_free_buffers (void)
1950 LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
1952 if (ksocknal_data.ksnd_schedulers != NULL)
1953 cfs_percpt_free(ksocknal_data.ksnd_schedulers);
1955 spin_lock(&ksocknal_data.ksnd_tx_lock);
1957 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
1959 struct ksock_tx *tx;
1961 list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist);
1962 spin_unlock(&ksocknal_data.ksnd_tx_lock);
1964 while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
1965 tx_list)) != NULL) {
1966 list_del(&tx->tx_list);
1967 LIBCFS_FREE(tx, tx->tx_desc_size);
1970 spin_unlock(&ksocknal_data.ksnd_tx_lock);
1975 ksocknal_handle_link_state_change(struct net_device *dev,
1976 unsigned char operstate)
1978 struct lnet_ni *ni = NULL;
1979 struct ksock_net *net;
1980 struct ksock_net *cnxt;
1982 unsigned char link_down;
1983 struct in_device *in_dev;
1984 bool found_ip = false;
1985 struct ksock_interface *ksi = NULL;
1986 struct sockaddr_in *sa;
1987 __u32 ni_state_before;
1988 bool update_ping_buf = false;
1990 DECLARE_CONST_IN_IFADDR(ifa);
1992 link_down = !((operstate == IF_OPER_UP) || (operstate == IF_OPER_UNKNOWN));
1993 ifindex = dev->ifindex;
1995 if (!ksocknal_data.ksnd_nnets)
1998 list_for_each_entry_safe(net, cnxt, &ksocknal_data.ksnd_nets,
2001 ksi = &net->ksnn_interface;
2002 sa = (void *)&ksi->ksni_addr;
2005 if (strcmp(ksi->ksni_name, dev->name))
2008 if (ksi->ksni_index == -1) {
2009 if (dev->reg_state != NETREG_REGISTERED)
2011 /* A registration just happened: save the new index for
2013 ksi->ksni_index = ifindex;
2017 if (ksi->ksni_index != ifindex)
2020 if (dev->reg_state == NETREG_UNREGISTERING) {
2021 /* Device is being unregistered, we need to clear the
2022 * index, it can change when device will be back */
2023 ksi->ksni_index = -1;
2029 in_dev = __in_dev_get_rtnl(dev);
2031 CDEBUG(D_NET, "Interface %s has no IPv4 status.\n",
2033 ni_state_before = lnet_set_link_fatal_state(ni, 1);
2036 in_dev_for_each_ifa_rtnl(ifa, in_dev) {
2037 if (sa->sin_addr.s_addr == ifa->ifa_local)
2043 CDEBUG(D_NET, "Interface %s has no matching ip\n",
2045 ni_state_before = lnet_set_link_fatal_state(ni, 1);
2050 ni_state_before = lnet_set_link_fatal_state(ni, 1);
2052 state = (lnet_get_link_status(dev) == 0);
2053 ni_state_before = lnet_set_link_fatal_state(ni,
2057 if (!update_ping_buf &&
2058 (ni->ni_state == LNET_NI_STATE_ACTIVE) &&
2059 (atomic_read(&ni->ni_fatal_error_on) != ni_state_before))
2060 update_ping_buf = true;
2063 if (update_ping_buf)
2064 lnet_mark_ping_buffer_for_update();
2071 ksocknal_handle_inetaddr_change(struct in_ifaddr *ifa, unsigned long event)
2073 struct lnet_ni *ni = NULL;
2074 struct ksock_net *net;
2075 struct ksock_net *cnxt;
2076 struct net_device *event_netdev = ifa->ifa_dev->dev;
2078 struct ksock_interface *ksi = NULL;
2079 struct sockaddr_in *sa;
2080 __u32 ni_state_before;
2081 bool update_ping_buf = false;
2084 if (!ksocknal_data.ksnd_nnets)
2087 ifindex = event_netdev->ifindex;
2089 list_for_each_entry_safe(net, cnxt, &ksocknal_data.ksnd_nets,
2092 ksi = &net->ksnn_interface;
2093 sa = (void *)&ksi->ksni_addr;
2095 if (ksi->ksni_index != ifindex ||
2096 strcmp(ksi->ksni_name, event_netdev->name))
2099 if (sa->sin_addr.s_addr == ifa->ifa_local) {
2101 link_down = (event == NETDEV_DOWN);
2102 ni_state_before = lnet_set_link_fatal_state(ni,
2105 if (!update_ping_buf &&
2106 (ni->ni_state == LNET_NI_STATE_ACTIVE) &&
2107 ((event == NETDEV_DOWN) != ni_state_before))
2108 update_ping_buf = true;
2112 if (update_ping_buf)
2113 lnet_mark_ping_buffer_for_update();
2118 /************************************
2119 * Net device notifier event handler
2120 ************************************/
2121 static int ksocknal_device_event(struct notifier_block *unused,
2122 unsigned long event, void *ptr)
2124 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2125 unsigned char operstate;
2127 operstate = dev->operstate;
2129 CDEBUG(D_NET, "devevent: status=%ld, iface=%s ifindex %d state %u\n",
2130 event, dev->name, dev->ifindex, operstate);
2136 case NETDEV_REGISTER:
2137 case NETDEV_UNREGISTER:
2138 ksocknal_handle_link_state_change(dev, operstate);
2145 /************************************
2146 * Inetaddr notifier event handler
2147 ************************************/
2148 static int ksocknal_inetaddr_event(struct notifier_block *unused,
2149 unsigned long event, void *ptr)
2151 struct in_ifaddr *ifa = ptr;
2153 CDEBUG(D_NET, "addrevent: status %ld ip addr %pI4, netmask %pI4.\n",
2154 event, &ifa->ifa_address, &ifa->ifa_mask);
2160 ksocknal_handle_inetaddr_change(ifa, event);
2167 static struct notifier_block ksocknal_dev_notifier_block = {
2168 .notifier_call = ksocknal_device_event,
2171 static struct notifier_block ksocknal_inetaddr_notifier_block = {
2172 .notifier_call = ksocknal_inetaddr_event,
2176 ksocknal_base_shutdown(void)
2178 struct ksock_sched *sched;
2179 struct ksock_peer_ni *peer_ni;
2182 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %lld\n",
2183 libcfs_kmem_read());
2184 LASSERT (ksocknal_data.ksnd_nnets == 0);
2186 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL) {
2187 unregister_netdevice_notifier(&ksocknal_dev_notifier_block);
2188 unregister_inetaddr_notifier(&ksocknal_inetaddr_notifier_block);
2191 switch (ksocknal_data.ksnd_init) {
2196 case SOCKNAL_INIT_ALL:
2197 case SOCKNAL_INIT_DATA:
2198 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list)
2201 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2202 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2203 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2204 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2205 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2207 if (ksocknal_data.ksnd_schedulers != NULL) {
2208 cfs_percpt_for_each(sched, i,
2209 ksocknal_data.ksnd_schedulers) {
2211 LASSERT(list_empty(&sched->kss_tx_conns));
2212 LASSERT(list_empty(&sched->kss_rx_conns));
2213 LASSERT(list_empty(&sched->kss_zombie_noop_txs));
2214 LASSERT(sched->kss_nconns == 0);
2218 /* flag threads to terminate; wake and wait for them to die */
2219 ksocknal_data.ksnd_shuttingdown = 1;
2220 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2221 wake_up(&ksocknal_data.ksnd_reaper_waitq);
2223 if (ksocknal_data.ksnd_schedulers != NULL) {
2224 cfs_percpt_for_each(sched, i,
2225 ksocknal_data.ksnd_schedulers)
2226 wake_up_all(&sched->kss_waitq);
2229 wait_var_event_warning(&ksocknal_data.ksnd_nthreads,
2230 atomic_read(&ksocknal_data.ksnd_nthreads) == 0,
2231 "waiting for %d threads to terminate\n",
2232 atomic_read(&ksocknal_data.ksnd_nthreads));
2234 ksocknal_free_buffers();
2236 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2240 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2241 libcfs_kmem_read());
2243 module_put(THIS_MODULE);
2247 ksocknal_base_startup(void)
2249 struct ksock_sched *sched;
2253 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2254 LASSERT(ksocknal_data.ksnd_nnets == 0);
2256 memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
2258 hash_init(ksocknal_data.ksnd_peers);
2260 rwlock_init(&ksocknal_data.ksnd_global_lock);
2261 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2263 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2264 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2265 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2266 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2267 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2269 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2270 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2271 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2272 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2274 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2275 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2277 /* NB memset above zeros whole of ksocknal_data */
2279 /* flag lists/ptrs/locks initialised */
2280 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2281 if (!try_module_get(THIS_MODULE))
2284 /* Create a scheduler block per available CPT */
2285 ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
2287 if (ksocknal_data.ksnd_schedulers == NULL)
2290 cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
2294 * make sure not to allocate more threads than there are
2295 * cores/CPUs in teh CPT
2297 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2298 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2299 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2302 * max to half of CPUs, assume another half should be
2303 * reserved for upper layer modules
2305 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2308 sched->kss_nthreads_max = nthrs;
2311 spin_lock_init(&sched->kss_lock);
2312 INIT_LIST_HEAD(&sched->kss_rx_conns);
2313 INIT_LIST_HEAD(&sched->kss_tx_conns);
2314 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2315 init_waitqueue_head(&sched->kss_waitq);
2318 ksocknal_data.ksnd_connd_starting = 0;
2319 ksocknal_data.ksnd_connd_failed_stamp = 0;
2320 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2321 /* must have at least 2 connds to remain responsive to accepts while
2323 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2324 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2326 if (*ksocknal_tunables.ksnd_nconnds_max <
2327 *ksocknal_tunables.ksnd_nconnds) {
2328 ksocknal_tunables.ksnd_nconnds_max =
2329 ksocknal_tunables.ksnd_nconnds;
2332 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2333 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2334 ksocknal_data.ksnd_connd_starting++;
2335 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2337 rc = ksocknal_thread_start(ksocknal_connd,
2338 (void *)((uintptr_t)i),
2339 "socknal_cd%02d", i);
2341 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2342 ksocknal_data.ksnd_connd_starting--;
2343 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2344 CERROR("Can't spawn socknal connd: %d\n", rc);
2349 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2351 CERROR ("Can't spawn socknal reaper: %d\n", rc);
2355 register_netdevice_notifier(&ksocknal_dev_notifier_block);
2356 register_inetaddr_notifier(&ksocknal_inetaddr_notifier_block);
2358 /* flag everything initialised */
2359 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2364 ksocknal_base_shutdown();
2369 ksocknal_debug_peerhash(struct lnet_ni *ni)
2371 struct ksock_peer_ni *peer_ni;
2374 read_lock(&ksocknal_data.ksnd_global_lock);
2376 hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
2377 struct ksock_conn_cb *conn_cb;
2378 struct ksock_conn *conn;
2380 if (peer_ni->ksnp_ni != ni)
2383 CWARN("Active peer_ni on shutdown: %s, ref %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
2384 libcfs_idstr(&peer_ni->ksnp_id),
2385 refcount_read(&peer_ni->ksnp_refcount),
2386 peer_ni->ksnp_closing,
2387 peer_ni->ksnp_accepting, peer_ni->ksnp_error,
2388 peer_ni->ksnp_zc_next_cookie,
2389 !list_empty(&peer_ni->ksnp_tx_queue),
2390 !list_empty(&peer_ni->ksnp_zc_req_list));
2392 conn_cb = peer_ni->ksnp_conn_cb;
2394 CWARN("ConnCB: ref %d, schd %d, conn %d, cnted %d, del %d\n",
2395 refcount_read(&conn_cb->ksnr_refcount),
2396 conn_cb->ksnr_scheduled, conn_cb->ksnr_connecting,
2397 conn_cb->ksnr_connected, conn_cb->ksnr_deleted);
2400 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
2401 CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
2402 refcount_read(&conn->ksnc_conn_refcount),
2403 refcount_read(&conn->ksnc_sock_refcount),
2404 conn->ksnc_type, conn->ksnc_closing);
2409 read_unlock(&ksocknal_data.ksnd_global_lock);
2414 ksocknal_shutdown(struct lnet_ni *ni)
2416 struct ksock_net *net = ni->ni_data;
2418 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2419 LASSERT(ksocknal_data.ksnd_nnets > 0);
2421 /* prevent new peers */
2422 atomic_add(SOCKNAL_SHUTDOWN_BIAS, &net->ksnn_npeers);
2424 /* Delete all peers */
2425 ksocknal_del_peer(ni, NULL);
2427 /* Wait for all peer_ni state to clean up */
2428 wait_var_event_warning(&net->ksnn_npeers,
2429 atomic_read(&net->ksnn_npeers) ==
2430 SOCKNAL_SHUTDOWN_BIAS,
2431 "waiting for %d peers to disconnect\n",
2432 ksocknal_debug_peerhash(ni) +
2433 atomic_read(&net->ksnn_npeers) -
2434 SOCKNAL_SHUTDOWN_BIAS);
2436 LASSERT(net->ksnn_interface.ksni_npeers == 0);
2437 LASSERT(net->ksnn_interface.ksni_nroutes == 0);
2439 list_del(&net->ksnn_list);
2440 LIBCFS_FREE(net, sizeof(*net));
2442 ksocknal_data.ksnd_nnets--;
2443 if (ksocknal_data.ksnd_nnets == 0)
2444 ksocknal_base_shutdown();
2448 ksocknal_search_new_ipif(struct ksock_net *net)
2451 char *ifnam = &net->ksnn_interface.ksni_name[0];
2452 char *colon = strchr(ifnam, ':');
2454 struct ksock_net *tmp;
2459 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) {
2460 char *ifnam2 = &tmp->ksnn_interface.ksni_name[0];
2461 char *colon2 = strchr(ifnam2, ':');
2466 found = strcmp(ifnam, ifnam2) == 0;
2479 ksocknal_start_schedulers(struct ksock_sched *sched)
2485 if (sched->kss_nthreads == 0) {
2486 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2487 nthrs = sched->kss_nthreads_max;
2489 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2491 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2492 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2494 nthrs = min(nthrs, sched->kss_nthreads_max);
2496 LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
2497 /* increase two threads if there is new interface */
2498 nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
2501 for (i = 0; i < nthrs; i++) {
2504 id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
2505 rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id,
2506 "socknal_sd%02d_%02d",
2508 (int)KSOCK_THREAD_SID(id));
2512 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2513 sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
2517 sched->kss_nthreads += i;
2522 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2524 int newif = ksocknal_search_new_ipif(net);
2528 if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
2531 for (i = 0; i < ncpts; i++) {
2532 struct ksock_sched *sched;
2533 int cpt = (cpts == NULL) ? i : cpts[i];
2535 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2536 sched = ksocknal_data.ksnd_schedulers[cpt];
2538 if (!newif && sched->kss_nthreads > 0)
2541 rc = ksocknal_start_schedulers(sched);
2549 ksocknal_startup(struct lnet_ni *ni)
2551 struct ksock_net *net;
2552 struct ksock_interface *ksi = NULL;
2553 struct lnet_inetdev *ifaces = NULL;
2557 LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
2558 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2559 rc = ksocknal_base_startup();
2563 LIBCFS_ALLOC(net, sizeof(*net));
2567 net->ksnn_incarnation = ktime_get_real_ns();
2570 ksocknal_tunables_setup(ni);
2572 rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns,
2573 the_lnet.ln_nis_use_large_nids);
2577 ksi = &net->ksnn_interface;
2579 /* Interface and/or IP address is specified otherwise default to
2580 * the first Interface
2582 if_idx = lnet_inet_select(ni, ifaces, rc);
2586 if (!ni->ni_interface) {
2587 rc = lnet_ni_add_interface(ni, ifaces[if_idx].li_name);
2589 CWARN("ksocklnd failed to allocate ni_interface\n");
2592 ni->ni_dev_cpt = ifaces[if_idx].li_cpt;
2593 ksi->ksni_index = ifaces[if_idx].li_index;
2594 if (ifaces[if_idx].li_size == sizeof(struct in6_addr)) {
2595 struct sockaddr_in6 *sa;
2596 sa = (void *)&ksi->ksni_addr;
2597 memset(sa, 0, sizeof(*sa));
2598 sa->sin6_family = AF_INET6;
2599 memcpy(&sa->sin6_addr, ifaces[if_idx].li_ipv6addr,
2600 sizeof(struct in6_addr));
2601 ni->ni_nid.nid_size = sizeof(struct in6_addr) - 4;
2602 memcpy(&ni->ni_nid.nid_addr, ifaces[if_idx].li_ipv6addr,
2603 sizeof(struct in6_addr));
2605 struct sockaddr_in *sa;
2606 sa = (void *)&ksi->ksni_addr;
2607 memset(sa, 0, sizeof(*sa));
2608 sa->sin_family = AF_INET;
2609 sa->sin_addr.s_addr = ifaces[if_idx].li_ipaddr;
2610 ksi->ksni_netmask = ifaces[if_idx].li_netmask;
2611 ni->ni_nid.nid_size = 0;
2612 ni->ni_nid.nid_addr[0] = sa->sin_addr.s_addr;
2614 strscpy(ksi->ksni_name, ifaces[if_idx].li_name, sizeof(ksi->ksni_name));
2616 /* call it before add it to ksocknal_data.ksnd_nets */
2617 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2621 if ((ksocknal_ip2index((struct sockaddr *)&ksi->ksni_addr,
2623 &dev_status) < 0) ||
2625 lnet_set_link_fatal_state(ni, 1);
2627 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2629 ksocknal_data.ksnd_nnets++;
2635 LIBCFS_FREE(net, sizeof(*net));
2637 if (ksocknal_data.ksnd_nnets == 0)
2638 ksocknal_base_shutdown();
2644 static void __exit ksocklnd_exit(void)
2646 lnet_unregister_lnd(&the_ksocklnd);
2649 static const struct lnet_lnd the_ksocklnd = {
2650 .lnd_type = SOCKLND,
2651 .lnd_startup = ksocknal_startup,
2652 .lnd_shutdown = ksocknal_shutdown,
2653 .lnd_ctl = ksocknal_ctl,
2654 .lnd_send = ksocknal_send,
2655 .lnd_recv = ksocknal_recv,
2656 .lnd_notify_peer_down = ksocknal_notify_gw_down,
2657 .lnd_accept = ksocknal_accept,
2658 .lnd_nl_get = ksocknal_nl_get,
2659 .lnd_nl_set = ksocknal_nl_set,
2660 .lnd_keys = &ksocknal_tunables_keys,
2663 static int __init ksocklnd_init(void)
2667 /* check ksnr_connected/connecting field large enough */
2668 BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
2669 BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
2671 rc = ksocknal_tunables_init();
2675 rc = libcfs_setup();
2679 lnet_register_lnd(&the_ksocklnd);
2684 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2685 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2686 MODULE_VERSION("2.8.0");
2687 MODULE_LICENSE("GPL");
2689 module_init(ksocklnd_init);
2690 module_exit(ksocklnd_exit);