* Author: Eric Barton <eric@bartonsoftware.com>
*/
+#include <linux/ethtool.h>
#include <linux/inetdevice.h>
#include "socklnd.h"
#include <linux/sunrpc/addr.h>
conn_cb->ksnr_ctrl_conn_count = 0;
conn_cb->ksnr_blki_conn_count = 0;
conn_cb->ksnr_blko_conn_count = 0;
+ conn_cb->ksnr_max_conns = 0;
return conn_cb;
}
ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
{
struct ksock_peer_ni *peer_ni;
+ unsigned long hash = nidhash(id.nid);
hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni,
- ksnp_list, id.nid) {
+ ksnp_list, hash) {
LASSERT(!peer_ni->ksnp_closing);
if (peer_ni->ksnp_ni != ni)
return count;
}
+static unsigned int
+ksocknal_get_conns_per_peer(struct ksock_peer_ni *peer_ni)
+{
+ struct lnet_ni *ni = peer_ni->ksnp_ni;
+ struct lnet_ioctl_config_socklnd_tunables *tunables;
+
+ LASSERT(ni);
+
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_sock;
+
+ return tunables->lnd_conns_per_peer;
+}
+
static void
ksocknal_incr_conn_count(struct ksock_conn_cb *conn_cb,
int type)
break;
case SOCKLND_CONN_BULK_IN:
conn_cb->ksnr_blki_conn_count++;
- if (conn_cb->ksnr_blki_conn_count >=
- *ksocknal_tunables.ksnd_conns_per_peer)
+ if (conn_cb->ksnr_blki_conn_count >= conn_cb->ksnr_max_conns)
conn_cb->ksnr_connected |= BIT(type);
break;
case SOCKLND_CONN_BULK_OUT:
conn_cb->ksnr_blko_conn_count++;
- if (conn_cb->ksnr_blko_conn_count >=
- *ksocknal_tunables.ksnd_conns_per_peer)
+ if (conn_cb->ksnr_blko_conn_count >= conn_cb->ksnr_max_conns)
conn_cb->ksnr_connected |= BIT(type);
break;
case SOCKLND_CONN_ANY:
- if (conn_cb->ksnr_conn_count >=
- *ksocknal_tunables.ksnd_conns_per_peer)
+ if (conn_cb->ksnr_conn_count >= conn_cb->ksnr_max_conns)
conn_cb->ksnr_connected |= BIT(type);
break;
default:
LBUG();
break;
-
}
- CDEBUG(D_NET, "Add conn type %d, ksnr_connected %x conns_per_peer %d\n",
- type, conn_cb->ksnr_connected, *ksocknal_tunables.ksnd_conns_per_peer);
+ CDEBUG(D_NET, "Add conn type %d, ksnr_connected %x ksnr_max_conns %d\n",
+ type, conn_cb->ksnr_connected, conn_cb->ksnr_max_conns);
}
static void
ksocknal_add_conn_cb_locked(struct ksock_peer_ni *peer_ni,
struct ksock_conn_cb *conn_cb)
{
- struct list_head *tmp;
struct ksock_conn *conn;
struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
/* peer_ni's route list takes over my ref on 'route' */
peer_ni->ksnp_conn_cb = conn_cb;
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+ list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
(struct sockaddr *)&conn_cb->ksnr_addr))
continue;
peer_ni = peer2;
} else {
/* peer_ni table takes my ref on peer_ni */
- hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list, id.nid);
+ hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list,
+ nidhash(id.nid));
}
ksocknal_add_conn_cb_locked(peer_ni, conn_cb);
+ /* Remember conns_per_peer setting at the time
+ * of connection initiation. It will define the
+ * max number of conns per type for this conn_cb
+ * while it's in use.
+ */
+ conn_cb->ksnr_max_conns = ksocknal_get_conns_per_peer(peer_ni);
+
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return 0;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY) {
- lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+ lo = hash_min(nidhash(id.nid),
+ HASH_BITS(ksocknal_data.ksnd_peers));
hi = lo;
} else {
lo = 0;
{
struct ksock_peer_ni *peer_ni;
struct ksock_conn *conn;
- struct list_head *ctmp;
int i;
read_lock(&ksocknal_data.ksnd_global_lock);
if (peer_ni->ksnp_ni != ni)
continue;
- list_for_each(ctmp, &peer_ni->ksnp_conns) {
+ list_for_each_entry(conn, &peer_ni->ksnp_conns,
+ ksnc_list) {
if (index-- > 0)
continue;
- conn = list_entry(ctmp, struct ksock_conn,
- ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
return conn;
struct sockaddr_storage peer;
rc = lnet_sock_getaddr(sock, true, &peer);
- LASSERT(rc == 0); /* we succeeded before */
+ if (rc != 0) {
+ CERROR("Can't determine new connection's address\n");
+ return rc;
+ }
LIBCFS_ALLOC(cr, sizeof(*cr));
if (cr == NULL) {
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
LIST_HEAD(zombies);
struct lnet_process_id peerid;
- struct list_head *tmp;
u64 incarnation;
struct ksock_conn *conn;
struct ksock_conn *conn2;
/* NB this puts an "empty" peer_ni in the peer_ni
* table (which takes my ref) */
hash_add(ksocknal_data.ksnd_peers,
- &peer_ni->ksnp_list, peerid.nid);
+ &peer_ni->ksnp_list, nidhash(peerid.nid));
} else {
ksocknal_peer_decref(peer_ni);
peer_ni = peer2;
/* Am I already connecting to this guy? Resolve in
* favour of higher NID...
*/
- if (peerid.nid < ni->ni_nid &&
+ if (peerid.nid < lnet_nid_to_nid4(&ni->ni_nid) &&
ksocknal_connecting(peer_ni->ksnp_conn_cb,
((struct sockaddr *) &conn->ksnc_peeraddr))) {
rc = EALREADY;
* loopback connection */
if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
(struct sockaddr *)&conn->ksnc_myaddr)) {
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+ list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
if (!rpc_cmp_addr(
(struct sockaddr *)&conn2->ksnc_peeraddr,
(struct sockaddr *)&conn->ksnc_peeraddr) ||
continue;
num_dup++;
- if (num_dup < *ksocknal_tunables.ksnd_conns_per_peer)
+ /* If max conns per type is not registered in conn_cb
+ * as ksnr_max_conns, use ni's conns_per_peer
+ */
+ if ((peer_ni->ksnp_conn_cb &&
+ num_dup < peer_ni->ksnp_conn_cb->ksnr_max_conns) ||
+ (!peer_ni->ksnp_conn_cb &&
+ num_dup < ksocknal_get_conns_per_peer(peer_ni)))
continue;
/* Reply on a passive connection attempt so the peer_ni
struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
struct ksock_conn_cb *conn_cb;
struct ksock_conn *conn2;
- struct list_head *tmp;
LASSERT(peer_ni->ksnp_error == 0);
LASSERT(!conn->ksnc_closing);
* of the given type got created
*/
if (ksocknal_get_conn_count_by_type(conn_cb, conn->ksnc_type) ==
- *ksocknal_tunables.ksnd_conns_per_peer)
+ conn_cb->ksnr_max_conns)
LASSERT((conn_cb->ksnr_connected &
BIT(conn->ksnc_type)) != 0);
- conn2 = NULL;
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+ list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
if (conn2->ksnc_conn_cb == conn_cb &&
conn2->ksnc_type == conn->ksnc_type)
- break;
-
- conn2 = NULL;
+ goto conn2_found;
}
- if (conn2 == NULL)
- conn_cb->ksnr_connected &= ~BIT(conn->ksnc_type);
-
+ conn_cb->ksnr_connected &= ~BIT(conn->ksnc_type);
+conn2_found:
conn->ksnc_conn_cb = NULL;
/* drop conn's ref on conn_cb */
spin_lock(&peer_ni->ksnp_lock);
- list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
+ list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list,
+ tx_zc_list) {
if (tx->tx_conn != conn)
continue;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY) {
- lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+ lo = hash_min(nidhash(id.nid),
+ HASH_BITS(ksocknal_data.ksnd_peers));
hi = lo;
} else {
lo = 0;
{
int index;
int i;
- struct list_head *tmp;
struct ksock_conn *conn;
for (index = 0; ; index++) {
i = 0;
conn = NULL;
- list_for_each(tmp, &peer_ni->ksnp_conns) {
+ list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
if (i++ == index) {
- conn = list_entry(tmp, struct ksock_conn,
- ksnc_list);
ksocknal_conn_addref(conn);
break;
}
read_unlock(&ksocknal_data.ksnd_global_lock);
- if (conn == NULL)
+ if (i <= index)
break;
ksocknal_lib_push_conn (conn);
int rc = -ENOENT;
if (id.nid != LNET_NID_ANY) {
- lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+ lo = hash_min(nidhash(id.nid),
+ HASH_BITS(ksocknal_data.ksnd_peers));
hi = lo;
} else {
lo = 0;
return ksocknal_close_matching_conns (id,
data->ioc_u32[0]);
- case IOC_LIBCFS_REGISTER_MYNID:
- /* Ignore if this is a noop */
- if (data->ioc_nid == ni->ni_nid)
- return 0;
+ case IOC_LIBCFS_REGISTER_MYNID:
+ /* Ignore if this is a noop */
+ if (nid_is_nid4(&ni->ni_nid) &&
+ data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid))
+ return 0;
- CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
- libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(ni->ni_nid));
- return -EINVAL;
+ CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
+ libcfs_nid2str(data->ioc_nid),
+ libcfs_nidstr(&ni->ni_nid));
+ return -EINVAL;
case IOC_LIBCFS_PUSH_CONNECTION:
id.nid = data->ioc_nid;
}
}
+static int ksocknal_get_link_status(struct net_device *dev)
+{
+ int ret = -1;
+
+ LASSERT(dev);
+
+ if (!netif_running(dev))
+ ret = 0;
+ /* Some devices may not be providing link settings */
+ else if (dev->ethtool_ops->get_link)
+ ret = dev->ethtool_ops->get_link(dev);
+
+ return ret;
+}
+
+static int
+ksocknal_handle_link_state_change(struct net_device *dev,
+ unsigned char operstate)
+{
+ struct lnet_ni *ni;
+ struct ksock_net *net;
+ struct ksock_net *cnxt;
+ int ifindex;
+ unsigned char link_down = !(operstate == IF_OPER_UP);
+
+ ifindex = dev->ifindex;
+
+ if (!ksocknal_data.ksnd_nnets)
+ goto out;
+
+ list_for_each_entry_safe(net, cnxt, &ksocknal_data.ksnd_nets,
+ ksnn_list) {
+ if (net->ksnn_interface.ksni_index != ifindex)
+ continue;
+ ni = net->ksnn_ni;
+ if (link_down)
+ atomic_set(&ni->ni_fatal_error_on, link_down);
+ else
+ atomic_set(&ni->ni_fatal_error_on,
+ (ksocknal_get_link_status(dev) == 0));
+ }
+out:
+ return 0;
+}
+
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+static int ksocknal_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ unsigned char operstate;
+
+ operstate = dev->operstate;
+
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ ksocknal_handle_link_state_change(dev, operstate);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ksocknal_notifier_block = {
+ .notifier_call = ksocknal_device_event,
+};
+
static void
ksocknal_base_shutdown(void)
{
libcfs_kmem_read());
LASSERT (ksocknal_data.ksnd_nnets == 0);
+ if (ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL)
+ unregister_netdevice_notifier(&ksocknal_notifier_block);
+
switch (ksocknal_data.ksnd_init) {
default:
LASSERT(0);
}
for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
- char name[16];
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting++;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
- snprintf(name, sizeof(name), "socknal_cd%02d", i);
rc = ksocknal_thread_start(ksocknal_connd,
- (void *)((uintptr_t)i), name);
+ (void *)((uintptr_t)i),
+ "socknal_cd%02d", i);
if (rc != 0) {
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting--;
goto failed;
}
+ register_netdevice_notifier(&ksocknal_notifier_block);
+
/* flag everything initialised */
ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
for (i = 0; i < nthrs; i++) {
long id;
- char name[20];
id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
- snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
- sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
-
- rc = ksocknal_thread_start(ksocknal_scheduler,
- (void *)id, name);
+ rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id,
+ "socknal_sd%02d_%02d",
+ sched->kss_cpt,
+ (int)KSOCK_THREAD_SID(id));
if (rc == 0)
continue;
ksocknal_startup(struct lnet_ni *ni)
{
struct ksock_net *net;
- struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
struct ksock_interface *ksi = NULL;
struct lnet_inetdev *ifaces = NULL;
struct sockaddr_in *sa;
goto fail_0;
net->ksnn_incarnation = ktime_get_real_ns();
ni->ni_data = net;
- net_tunables = &ni->ni_net->net_tunables;
- if (net_tunables->lct_peer_timeout == -1)
- net_tunables->lct_peer_timeout =
- *ksocknal_tunables.ksnd_peertimeout;
- if (net_tunables->lct_max_tx_credits == -1)
- net_tunables->lct_max_tx_credits =
- *ksocknal_tunables.ksnd_credits;
-
- if (net_tunables->lct_peer_tx_credits == -1)
- net_tunables->lct_peer_tx_credits =
- *ksocknal_tunables.ksnd_peertxcredits;
-
- if (net_tunables->lct_peer_tx_credits >
- net_tunables->lct_max_tx_credits)
- net_tunables->lct_peer_tx_credits =
- net_tunables->lct_max_tx_credits;
-
- if (net_tunables->lct_peer_rtr_credits == -1)
- net_tunables->lct_peer_rtr_credits =
- *ksocknal_tunables.ksnd_peerrtrcredits;
+ ksocknal_tunables_setup(ni);
rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns);
if (rc < 0)
LASSERT(ksi);
LASSERT(ksi->ksni_addr.ss_family == AF_INET);
- ni->ni_nid = LNET_MKNID(
- LNET_NIDNET(ni->ni_nid),
- ntohl(((struct sockaddr_in *)
- &ksi->ksni_addr)->sin_addr.s_addr));
+ ni->ni_nid.nid_addr[0] =
+ ((struct sockaddr_in *)&ksi->ksni_addr)->sin_addr.s_addr;
list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
+ net->ksnn_ni = ni;
ksocknal_data.ksnd_nnets++;
return 0;
return -ENETDOWN;
}
-
static void __exit ksocklnd_exit(void)
{
lnet_unregister_lnd(&the_ksocklnd);