*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/klnds/socklnd/socklnd.c
*
* Author: Eric Barton <eric@bartonsoftware.com>
*/
+#include <linux/ethtool.h>
#include <linux/inetdevice.h>
#include "socklnd.h"
#include <linux/sunrpc/addr.h>
return ret;
}
-static struct ksock_route *
-ksocknal_create_route(struct sockaddr *addr)
+static struct ksock_conn_cb *
+ksocknal_create_conn_cb(struct sockaddr *addr)
{
- struct ksock_route *route;
-
- LIBCFS_ALLOC (route, sizeof (*route));
- if (route == NULL)
- return (NULL);
-
- refcount_set(&route->ksnr_refcount, 1);
- route->ksnr_peer = NULL;
- route->ksnr_retry_interval = 0; /* OK to connect at any time */
- rpc_copy_addr((struct sockaddr *)&route->ksnr_addr, addr);
- rpc_set_port((struct sockaddr *)&route->ksnr_addr, rpc_get_port(addr));
- route->ksnr_myiface = -1;
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
- route->ksnr_connected = 0;
- route->ksnr_deleted = 0;
- route->ksnr_conn_count = 0;
- route->ksnr_share_count = 0;
-
- return route;
+ struct ksock_conn_cb *conn_cb;
+
+ LIBCFS_ALLOC(conn_cb, sizeof(*conn_cb));
+ if (!conn_cb)
+ return NULL;
+
+ refcount_set(&conn_cb->ksnr_refcount, 1);
+ conn_cb->ksnr_peer = NULL;
+ conn_cb->ksnr_retry_interval = 0; /* OK to connect at any time */
+ rpc_copy_addr((struct sockaddr *)&conn_cb->ksnr_addr, addr);
+ rpc_set_port((struct sockaddr *)&conn_cb->ksnr_addr,
+ rpc_get_port(addr));
+ conn_cb->ksnr_myiface = -1;
+ conn_cb->ksnr_scheduled = 0;
+ conn_cb->ksnr_connecting = 0;
+ conn_cb->ksnr_connected = 0;
+ conn_cb->ksnr_deleted = 0;
+ conn_cb->ksnr_conn_count = 0;
+ conn_cb->ksnr_ctrl_conn_count = 0;
+ conn_cb->ksnr_blki_conn_count = 0;
+ conn_cb->ksnr_blko_conn_count = 0;
+ conn_cb->ksnr_max_conns = 0;
+
+ return conn_cb;
}
void
-ksocknal_destroy_route(struct ksock_route *route)
+ksocknal_destroy_conn_cb(struct ksock_conn_cb *conn_cb)
{
- LASSERT(refcount_read(&route->ksnr_refcount) == 0);
+ LASSERT(refcount_read(&conn_cb->ksnr_refcount) == 0);
- if (route->ksnr_peer != NULL)
- ksocknal_peer_decref(route->ksnr_peer);
+ if (conn_cb->ksnr_peer)
+ ksocknal_peer_decref(conn_cb->ksnr_peer);
- LIBCFS_FREE (route, sizeof (*route));
+ LIBCFS_FREE(conn_cb, sizeof(*conn_cb));
}
static struct ksock_peer_ni *
-ksocknal_create_peer(struct lnet_ni *ni, struct lnet_process_id id)
+ksocknal_create_peer(struct lnet_ni *ni, struct lnet_processid *id)
{
- int cpt = lnet_cpt_of_nid(id.nid, ni);
+ int cpt = lnet_nid2cpt(&id->nid, ni);
struct ksock_net *net = ni->ni_data;
struct ksock_peer_ni *peer_ni;
- LASSERT(id.nid != LNET_NID_ANY);
- LASSERT(id.pid != LNET_PID_ANY);
+ LASSERT(!LNET_NID_IS_ANY(&id->nid));
+ LASSERT(id->pid != LNET_PID_ANY);
LASSERT(!in_interrupt());
if (!atomic_inc_unless_negative(&net->ksnn_npeers)) {
}
peer_ni->ksnp_ni = ni;
- peer_ni->ksnp_id = id;
+ peer_ni->ksnp_id = *id;
refcount_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
peer_ni->ksnp_closing = 0;
peer_ni->ksnp_accepting = 0;
peer_ni->ksnp_proto = NULL;
peer_ni->ksnp_last_alive = 0;
peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
+ peer_ni->ksnp_conn_cb = NULL;
INIT_LIST_HEAD(&peer_ni->ksnp_conns);
- INIT_LIST_HEAD(&peer_ni->ksnp_routes);
INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
spin_lock_init(&peer_ni->ksnp_lock);
struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
CDEBUG (D_NET, "peer_ni %s %p deleted\n",
- libcfs_id2str(peer_ni->ksnp_id), peer_ni);
+ libcfs_idstr(&peer_ni->ksnp_id), peer_ni);
LASSERT(refcount_read(&peer_ni->ksnp_refcount) == 0);
LASSERT(peer_ni->ksnp_accepting == 0);
LASSERT(list_empty(&peer_ni->ksnp_conns));
- LASSERT(list_empty(&peer_ni->ksnp_routes));
+ LASSERT(peer_ni->ksnp_conn_cb == NULL);
LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
- /* NB a peer_ni's connections and routes keep a reference on their
+ /* NB a peer_ni's connections and conn_cb keep a reference on their
* peer_ni until they are destroyed, so we can be assured that _all_
* state to do with this peer_ni has been cleaned up when its refcount
* drops to zero.
}
struct ksock_peer_ni *
-ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
+ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_processid *id)
{
struct ksock_peer_ni *peer_ni;
+ unsigned long hash = nidhash(&id->nid);
hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni,
- ksnp_list, id.nid) {
+ ksnp_list, hash) {
LASSERT(!peer_ni->ksnp_closing);
if (peer_ni->ksnp_ni != ni)
continue;
- if (peer_ni->ksnp_id.nid != id.nid ||
- peer_ni->ksnp_id.pid != id.pid)
+ if (!nid_same(&peer_ni->ksnp_id.nid, &id->nid) ||
+ peer_ni->ksnp_id.pid != id->pid)
continue;
CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
- peer_ni, libcfs_id2str(id),
+ peer_ni, libcfs_idstr(id),
refcount_read(&peer_ni->ksnp_refcount));
return peer_ni;
}
}
struct ksock_peer_ni *
-ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
+ksocknal_find_peer(struct lnet_ni *ni, struct lnet_processid *id)
{
struct ksock_peer_ni *peer_ni;
ksocknal_peer_addref(peer_ni);
read_unlock(&ksocknal_data.ksnd_global_lock);
- return (peer_ni);
+ return peer_ni;
}
static void
}
LASSERT(list_empty(&peer_ni->ksnp_conns));
- LASSERT(list_empty(&peer_ni->ksnp_routes));
+ LASSERT(peer_ni->ksnp_conn_cb == NULL);
LASSERT(!peer_ni->ksnp_closing);
peer_ni->ksnp_closing = 1;
hlist_del(&peer_ni->ksnp_list);
int *port, int *conn_count, int *share_count)
{
struct ksock_peer_ni *peer_ni;
- struct ksock_route *route;
- struct list_head *rtmp;
+ struct ksock_conn_cb *conn_cb;
int i;
int j;
int rc = -ENOENT;
continue;
if (peer_ni->ksnp_n_passive_ips == 0 &&
- list_empty(&peer_ni->ksnp_routes)) {
+ peer_ni->ksnp_conn_cb == NULL) {
if (index-- > 0)
continue;
- *id = peer_ni->ksnp_id;
+ id->pid = peer_ni->ksnp_id.pid;
+ id->nid = lnet_nid_to_nid4(&peer_ni->ksnp_id.nid);
*myip = 0;
*peer_ip = 0;
*port = 0;
if (index-- > 0)
continue;
- *id = peer_ni->ksnp_id;
+ id->pid = peer_ni->ksnp_id.pid;
+ id->nid = lnet_nid_to_nid4(&peer_ni->ksnp_id.nid);
*myip = peer_ni->ksnp_passive_ips[j];
*peer_ip = 0;
*port = 0;
goto out;
}
- list_for_each(rtmp, &peer_ni->ksnp_routes) {
+ if (peer_ni->ksnp_conn_cb) {
if (index-- > 0)
continue;
- route = list_entry(rtmp, struct ksock_route,
- ksnr_list);
+ conn_cb = peer_ni->ksnp_conn_cb;
- *id = peer_ni->ksnp_id;
- if (route->ksnr_addr.ss_family == AF_INET) {
+ id->pid = peer_ni->ksnp_id.pid;
+ id->nid = lnet_nid_to_nid4(&peer_ni->ksnp_id.nid);
+ if (conn_cb->ksnr_addr.ss_family == AF_INET) {
struct sockaddr_in *sa =
- (void *)&route->ksnr_addr;
- rc = choose_ipv4_src(
- myip,
- route->ksnr_myiface,
- ntohl(sa->sin_addr.s_addr),
- ni->ni_net_ns);
+ (void *)&conn_cb->ksnr_addr;
+
+ rc = choose_ipv4_src(myip,
+ conn_cb->ksnr_myiface,
+ ntohl(sa->sin_addr.s_addr),
+ ni->ni_net_ns);
*peer_ip = ntohl(sa->sin_addr.s_addr);
*port = ntohs(sa->sin_port);
} else {
*port = 0;
rc = -ENOTSUPP;
}
- *conn_count = route->ksnr_conn_count;
- *share_count = route->ksnr_share_count;
+ *conn_count = conn_cb->ksnr_conn_count;
+ *share_count = 1;
goto out;
}
}
return rc;
}
+static unsigned int
+ksocknal_get_conn_count_by_type(struct ksock_conn_cb *conn_cb,
+ int type)
+{
+ unsigned int count = 0;
+
+ switch (type) {
+ case SOCKLND_CONN_CONTROL:
+ count = conn_cb->ksnr_ctrl_conn_count;
+ break;
+ case SOCKLND_CONN_BULK_IN:
+ count = conn_cb->ksnr_blki_conn_count;
+ break;
+ case SOCKLND_CONN_BULK_OUT:
+ count = conn_cb->ksnr_blko_conn_count;
+ break;
+ case SOCKLND_CONN_ANY:
+ count = conn_cb->ksnr_conn_count;
+ break;
+ default:
+ LBUG();
+ break;
+ }
+
+ return count;
+}
+
+static unsigned int
+ksocknal_get_conns_per_peer(struct ksock_peer_ni *peer_ni)
+{
+ struct lnet_ni *ni = peer_ni->ksnp_ni;
+ struct lnet_ioctl_config_socklnd_tunables *tunables;
+
+ LASSERT(ni);
+
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_sock;
+
+ return tunables->lnd_conns_per_peer;
+}
+
+static void
+ksocknal_incr_conn_count(struct ksock_conn_cb *conn_cb,
+ int type)
+{
+ conn_cb->ksnr_conn_count++;
+
+ /* check if all connections of the given type got created */
+ switch (type) {
+ case SOCKLND_CONN_CONTROL:
+ conn_cb->ksnr_ctrl_conn_count++;
+ /* there's a single control connection per peer,
+ * two in case of loopback
+ */
+ conn_cb->ksnr_connected |= BIT(type);
+ break;
+ case SOCKLND_CONN_BULK_IN:
+ conn_cb->ksnr_blki_conn_count++;
+ if (conn_cb->ksnr_blki_conn_count >= conn_cb->ksnr_max_conns)
+ conn_cb->ksnr_connected |= BIT(type);
+ break;
+ case SOCKLND_CONN_BULK_OUT:
+ conn_cb->ksnr_blko_conn_count++;
+ if (conn_cb->ksnr_blko_conn_count >= conn_cb->ksnr_max_conns)
+ conn_cb->ksnr_connected |= BIT(type);
+ break;
+ case SOCKLND_CONN_ANY:
+ if (conn_cb->ksnr_conn_count >= conn_cb->ksnr_max_conns)
+ conn_cb->ksnr_connected |= BIT(type);
+ break;
+ default:
+ LBUG();
+ break;
+ }
+
+ CDEBUG(D_NET, "Add conn type %d, ksnr_connected %x ksnr_max_conns %d\n",
+ type, conn_cb->ksnr_connected, conn_cb->ksnr_max_conns);
+}
+
+
+static void
+ksocknal_decr_conn_count(struct ksock_conn_cb *conn_cb,
+ int type)
+{
+ conn_cb->ksnr_conn_count--;
+
+ /* check if all connections of the given type got created */
+ switch (type) {
+ case SOCKLND_CONN_CONTROL:
+ conn_cb->ksnr_ctrl_conn_count--;
+ /* there's a single control connection per peer,
+ * two in case of loopback
+ */
+ if (conn_cb->ksnr_ctrl_conn_count == 0)
+ conn_cb->ksnr_connected &= ~BIT(type);
+ break;
+ case SOCKLND_CONN_BULK_IN:
+ conn_cb->ksnr_blki_conn_count--;
+ if (conn_cb->ksnr_blki_conn_count < conn_cb->ksnr_max_conns)
+ conn_cb->ksnr_connected &= ~BIT(type);
+ break;
+ case SOCKLND_CONN_BULK_OUT:
+ conn_cb->ksnr_blko_conn_count--;
+ if (conn_cb->ksnr_blko_conn_count < conn_cb->ksnr_max_conns)
+ conn_cb->ksnr_connected &= ~BIT(type);
+ break;
+ case SOCKLND_CONN_ANY:
+ if (conn_cb->ksnr_conn_count < conn_cb->ksnr_max_conns)
+ conn_cb->ksnr_connected &= ~BIT(type);
+ break;
+ default:
+ LBUG();
+ break;
+ }
+
+ CDEBUG(D_NET, "Del conn type %d, ksnr_connected %x ksnr_max_conns %d\n",
+ type, conn_cb->ksnr_connected, conn_cb->ksnr_max_conns);
+}
+
static void
-ksocknal_associate_route_conn_locked(struct ksock_route *route,
- struct ksock_conn *conn)
+ksocknal_associate_cb_conn_locked(struct ksock_conn_cb *conn_cb,
+ struct ksock_conn *conn)
{
- struct ksock_peer_ni *peer_ni = route->ksnr_peer;
+ struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer;
int type = conn->ksnc_type;
struct ksock_interface *iface;
- int conn_iface =
- ksocknal_ip2index((struct sockaddr *)&conn->ksnc_myaddr,
- route->ksnr_peer->ksnp_ni);
+ int conn_iface;
- conn->ksnc_route = route;
- ksocknal_route_addref(route);
+ conn_iface = ksocknal_ip2index((struct sockaddr *)&conn->ksnc_myaddr,
+ peer_ni->ksnp_ni);
+ conn->ksnc_conn_cb = conn_cb;
+ ksocknal_conn_cb_addref(conn_cb);
- if (route->ksnr_myiface != conn_iface) {
- if (route->ksnr_myiface < 0) {
+ if (conn_cb->ksnr_myiface != conn_iface) {
+ if (conn_cb->ksnr_myiface < 0) {
/* route wasn't bound locally yet (the initial route) */
CDEBUG(D_NET, "Binding %s %pIS to interface %d\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &route->ksnr_addr,
+ libcfs_idstr(&peer_ni->ksnp_id),
+ &conn_cb->ksnr_addr,
conn_iface);
} else {
CDEBUG(D_NET,
"Rebinding %s %pIS from interface %d to %d\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &route->ksnr_addr,
- route->ksnr_myiface,
+ libcfs_idstr(&peer_ni->ksnp_id),
+ &conn_cb->ksnr_addr,
+ conn_cb->ksnr_myiface,
conn_iface);
- iface = ksocknal_index2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myiface);
+ iface = ksocknal_index2iface(peer_ni->ksnp_ni,
+ conn_cb->ksnr_myiface);
if (iface)
iface->ksni_nroutes--;
}
- route->ksnr_myiface = conn_iface;
- iface = ksocknal_index2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myiface);
+ conn_cb->ksnr_myiface = conn_iface;
+ iface = ksocknal_index2iface(peer_ni->ksnp_ni,
+ conn_cb->ksnr_myiface);
if (iface)
iface->ksni_nroutes++;
}
- route->ksnr_connected |= (1<<type);
- route->ksnr_conn_count++;
+ ksocknal_incr_conn_count(conn_cb, type);
/* Successful connection => further attempts can
* proceed immediately
*/
- route->ksnr_retry_interval = 0;
+ conn_cb->ksnr_retry_interval = 0;
}
static void
-ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
+ksocknal_add_conn_cb_locked(struct ksock_peer_ni *peer_ni,
+ struct ksock_conn_cb *conn_cb)
{
- struct list_head *tmp;
struct ksock_conn *conn;
- struct ksock_route *route2;
struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
LASSERT(!peer_ni->ksnp_closing);
- LASSERT(route->ksnr_peer == NULL);
- LASSERT(!route->ksnr_scheduled);
- LASSERT(!route->ksnr_connecting);
- LASSERT(route->ksnr_connected == 0);
-
- /* LASSERT(unique) */
- list_for_each(tmp, &peer_ni->ksnp_routes) {
- route2 = list_entry(tmp, struct ksock_route, ksnr_list);
-
- if (rpc_cmp_addr((struct sockaddr *)&route2->ksnr_addr,
- (struct sockaddr *)&route->ksnr_addr)) {
- CERROR("Duplicate route %s %pI4h\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &route->ksnr_addr);
- LBUG();
- }
- }
+ LASSERT(!conn_cb->ksnr_peer);
+ LASSERT(!conn_cb->ksnr_scheduled);
+ LASSERT(!conn_cb->ksnr_connecting);
+ LASSERT(conn_cb->ksnr_connected == 0);
- route->ksnr_peer = peer_ni;
+ conn_cb->ksnr_peer = peer_ni;
ksocknal_peer_addref(peer_ni);
- /* set the route's interface to the current net's interface */
- route->ksnr_myiface = net->ksnn_interface.ksni_index;
+ /* set the conn_cb's interface to the current net's interface */
+ conn_cb->ksnr_myiface = net->ksnn_interface.ksni_index;
net->ksnn_interface.ksni_nroutes++;
- /* peer_ni's routelist takes over my ref on 'route' */
- list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
-
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
+ /* peer_ni's route list takes over my ref on 'route' */
+ peer_ni->ksnp_conn_cb = conn_cb;
+ list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
- (struct sockaddr *)&route->ksnr_addr))
+ (struct sockaddr *)&conn_cb->ksnr_addr))
continue;
- ksocknal_associate_route_conn_locked(route, conn);
- /* keep going (typed routes) */
+ ksocknal_associate_cb_conn_locked(conn_cb, conn);
+ /* keep going (typed conns) */
}
}
static void
-ksocknal_del_route_locked(struct ksock_route *route)
+ksocknal_del_conn_cb_locked(struct ksock_conn_cb *conn_cb)
{
- struct ksock_peer_ni *peer_ni = route->ksnr_peer;
+ struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer;
struct ksock_interface *iface;
struct ksock_conn *conn;
struct ksock_conn *cnxt;
- LASSERT(!route->ksnr_deleted);
+ LASSERT(!conn_cb->ksnr_deleted);
/* Close associated conns */
list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns, ksnc_list) {
- if (conn->ksnc_route != route)
+ if (conn->ksnc_conn_cb != conn_cb)
continue;
ksocknal_close_conn_locked(conn, 0);
}
- if (route->ksnr_myiface >= 0) {
- iface = ksocknal_index2iface(route->ksnr_peer->ksnp_ni,
- route->ksnr_myiface);
+ if (conn_cb->ksnr_myiface >= 0) {
+ iface = ksocknal_index2iface(peer_ni->ksnp_ni,
+ conn_cb->ksnr_myiface);
if (iface)
iface->ksni_nroutes--;
}
- route->ksnr_deleted = 1;
- list_del(&route->ksnr_list);
- ksocknal_route_decref(route); /* drop peer_ni's ref */
+ conn_cb->ksnr_deleted = 1;
+ ksocknal_conn_cb_decref(conn_cb); /* drop peer_ni's ref */
+ peer_ni->ksnp_conn_cb = NULL;
- if (list_empty(&peer_ni->ksnp_routes) &&
- list_empty(&peer_ni->ksnp_conns)) {
+ if (list_empty(&peer_ni->ksnp_conns)) {
/* I've just removed the last route to a peer_ni with no active
- * connections */
+ * connections
+ */
ksocknal_unlink_peer_locked(peer_ni);
}
}
int
-ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id,
+ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id4,
struct sockaddr *addr)
{
- struct list_head *tmp;
struct ksock_peer_ni *peer_ni;
struct ksock_peer_ni *peer2;
- struct ksock_route *route;
- struct ksock_route *route2;
+ struct ksock_conn_cb *conn_cb;
+ struct lnet_processid id;
- if (id.nid == LNET_NID_ANY ||
- id.pid == LNET_PID_ANY)
+ if (id4.nid == LNET_NID_ANY ||
+ id4.pid == LNET_PID_ANY)
return (-EINVAL);
+ id.pid = id4.pid;
+ lnet_nid4_to_nid(id4.nid, &id.nid);
+
/* Have a brand new peer_ni ready... */
- peer_ni = ksocknal_create_peer(ni, id);
+ peer_ni = ksocknal_create_peer(ni, &id);
if (IS_ERR(peer_ni))
return PTR_ERR(peer_ni);
- route = ksocknal_create_route(addr);
- if (route == NULL) {
+ conn_cb = ksocknal_create_conn_cb(addr);
+ if (!conn_cb) {
ksocknal_peer_decref(peer_ni);
- return (-ENOMEM);
+ return -ENOMEM;
}
write_lock_bh(&ksocknal_data.ksnd_global_lock);
LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers)
>= 0);
- peer2 = ksocknal_find_peer_locked(ni, id);
+ peer2 = ksocknal_find_peer_locked(ni, &id);
if (peer2 != NULL) {
ksocknal_peer_decref(peer_ni);
peer_ni = peer2;
} else {
/* peer_ni table takes my ref on peer_ni */
- hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list, id.nid);
+ hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list,
+ nidhash(&id.nid));
}
- route2 = NULL;
- list_for_each(tmp, &peer_ni->ksnp_routes) {
- route2 = list_entry(tmp, struct ksock_route, ksnr_list);
-
- if (rpc_cmp_addr(addr, (struct sockaddr *)&route2->ksnr_addr))
- break;
+ ksocknal_add_conn_cb_locked(peer_ni, conn_cb);
- route2 = NULL;
- }
- if (route2 == NULL) {
- ksocknal_add_route_locked(peer_ni, route);
- route->ksnr_share_count++;
- } else {
- ksocknal_route_decref(route);
- route2->ksnr_share_count++;
- }
+ /* Remember conns_per_peer setting at the time
+ * of connection initiation. It will define the
+ * max number of conns per type for this conn_cb
+ * while it's in use.
+ */
+ conn_cb->ksnr_max_conns = ksocknal_get_conns_per_peer(peer_ni);
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
{
struct ksock_conn *conn;
struct ksock_conn *cnxt;
- struct ksock_route *route;
- struct ksock_route *rnxt;
- int nshared;
+ struct ksock_conn_cb *conn_cb;
LASSERT(!peer_ni->ksnp_closing);
/* Extra ref prevents peer_ni disappearing until I'm done with it */
ksocknal_peer_addref(peer_ni);
+ conn_cb = peer_ni->ksnp_conn_cb;
+ if (conn_cb)
+ ksocknal_del_conn_cb_locked(conn_cb);
- list_for_each_entry_safe(route, rnxt, &peer_ni->ksnp_routes,
- ksnr_list) {
- /* no match */
- if (ip) {
- if (route->ksnr_addr.ss_family != AF_INET)
- continue;
- if (((struct sockaddr_in *)&route->ksnr_addr)
- ->sin_addr.s_addr != htonl(ip))
- continue;
- }
-
- route->ksnr_share_count = 0;
- /* This deletes associated conns too */
- ksocknal_del_route_locked(route);
- }
-
- nshared = 0;
- list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list)
- nshared += route->ksnr_share_count;
-
- if (nshared == 0) {
- /* remove everything else if there are no explicit entries
- * left
- */
- list_for_each_entry_safe(route, rnxt, &peer_ni->ksnp_routes,
- ksnr_list) {
- /* we should only be removing auto-entries */
- LASSERT(route->ksnr_share_count == 0);
- ksocknal_del_route_locked(route);
- }
-
- list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns,
- ksnc_list)
- ksocknal_close_conn_locked(conn, 0);
- }
+ list_for_each_entry_safe(conn, cnxt, &peer_ni->ksnp_conns,
+ ksnc_list)
+ ksocknal_close_conn_locked(conn, 0);
ksocknal_peer_decref(peer_ni);
- /* NB peer_ni unlinks itself when last conn/route is removed */
+ /* NB peer_ni unlinks itself when last conn/conn_cb is removed */
}
static int
-ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
+ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id4, __u32 ip)
{
LIST_HEAD(zombies);
struct hlist_node *pnxt;
int hi;
int i;
int rc = -ENOENT;
+ struct lnet_processid id;
+
+ id.pid = id4.pid;
+ lnet_nid4_to_nid(id4.nid, &id.nid);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (id.nid != LNET_NID_ANY) {
- lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+ if (!LNET_NID_IS_ANY(&id.nid)) {
+ lo = hash_min(nidhash(&id.nid),
+ HASH_BITS(ksocknal_data.ksnd_peers));
hi = lo;
} else {
lo = 0;
if (peer_ni->ksnp_ni != ni)
continue;
- if (!((id.nid == LNET_NID_ANY ||
- peer_ni->ksnp_id.nid == id.nid) &&
+ if (!((LNET_NID_IS_ANY(&id.nid) ||
+ nid_same(&peer_ni->ksnp_id.nid, &id.nid)) &&
(id.pid == LNET_PID_ANY ||
peer_ni->ksnp_id.pid == id.pid)))
continue;
if (peer_ni->ksnp_closing &&
!list_empty(&peer_ni->ksnp_tx_queue)) {
LASSERT(list_empty(&peer_ni->ksnp_conns));
- LASSERT(list_empty(&peer_ni->ksnp_routes));
+ LASSERT(peer_ni->ksnp_conn_cb == NULL);
list_splice_init(&peer_ni->ksnp_tx_queue,
&zombies);
{
struct ksock_peer_ni *peer_ni;
struct ksock_conn *conn;
- struct list_head *ctmp;
int i;
read_lock(&ksocknal_data.ksnd_global_lock);
if (peer_ni->ksnp_ni != ni)
continue;
- list_for_each(ctmp, &peer_ni->ksnp_conns) {
+ list_for_each_entry(conn, &peer_ni->ksnp_conns,
+ ksnc_list) {
if (index-- > 0)
continue;
- conn = list_entry(ctmp, struct ksock_conn,
- ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
return conn;
struct sockaddr_storage peer;
rc = lnet_sock_getaddr(sock, true, &peer);
- LASSERT(rc == 0); /* we succeeded before */
+ if (rc != 0) {
+ CERROR("Can't determine new connection's address\n");
+ return rc;
+ }
LIBCFS_ALLOC(cr, sizeof(*cr));
if (cr == NULL) {
}
static int
-ksocknal_connecting(struct ksock_peer_ni *peer_ni, struct sockaddr *sa)
+ksocknal_connecting(struct ksock_conn_cb *conn_cb, struct sockaddr *sa)
{
- struct ksock_route *route;
-
- list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
- if (rpc_cmp_addr((struct sockaddr *)&route->ksnr_addr, sa))
- return route->ksnr_connecting;
- }
+ if (conn_cb &&
+ rpc_cmp_addr((struct sockaddr *)&conn_cb->ksnr_addr, sa))
+ return conn_cb->ksnr_connecting;
return 0;
}
int
-ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
+ksocknal_create_conn(struct lnet_ni *ni, struct ksock_conn_cb *conn_cb,
struct socket *sock, int type)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
LIST_HEAD(zombies);
- struct lnet_process_id peerid;
- struct list_head *tmp;
+ struct lnet_process_id peerid4;
u64 incarnation;
struct ksock_conn *conn;
struct ksock_conn *conn2;
int rc;
int rc2;
int active;
+ int num_dup = 0;
char *warn = NULL;
- active = (route != NULL);
+ active = (conn_cb != NULL);
- LASSERT (active == (type != SOCKLND_CONN_NONE));
+ LASSERT(active == (type != SOCKLND_CONN_NONE));
- LIBCFS_ALLOC(conn, sizeof(*conn));
- if (conn == NULL) {
- rc = -ENOMEM;
- goto failed_0;
- }
+ LIBCFS_ALLOC(conn, sizeof(*conn));
+ if (conn == NULL) {
+ rc = -ENOMEM;
+ goto failed_0;
+ }
- conn->ksnc_peer = NULL;
- conn->ksnc_route = NULL;
- conn->ksnc_sock = sock;
+ conn->ksnc_peer = NULL;
+ conn->ksnc_conn_cb = NULL;
+ conn->ksnc_sock = sock;
/* 2 ref, 1 for conn, another extra ref prevents socket
* being closed before establishment of connection */
refcount_set(&conn->ksnc_sock_refcount, 2);
LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
kshm_ips[LNET_INTERFACES_NUM]));
- if (hello == NULL) {
- rc = -ENOMEM;
- goto failed_1;
- }
+ if (hello == NULL) {
+ rc = -ENOMEM;
+ goto failed_1;
+ }
- /* stash conn's local and remote addrs */
- rc = ksocknal_lib_get_conn_addrs (conn);
- if (rc != 0)
- goto failed_1;
+ /* stash conn's local and remote addrs */
+ rc = ksocknal_lib_get_conn_addrs(conn);
+ if (rc != 0)
+ goto failed_1;
- /* Find out/confirm peer_ni's NID and connection type and get the
- * vector of interfaces she's willing to let me connect to.
- * Passive connections use the listener timeout since the peer_ni sends
- * eagerly */
+ /* Find out/confirm peer_ni's NID and connection type and get the
+ * vector of interfaces she's willing to let me connect to.
+ * Passive connections use the listener timeout since the peer_ni sends
+ * eagerly
+ */
if (active) {
- peer_ni = route->ksnr_peer;
+ peer_ni = conn_cb->ksnr_peer;
LASSERT(ni == peer_ni->ksnp_ni);
/* Active connection sends HELLO eagerly */
hello->kshm_nips = 0;
- peerid = peer_ni->ksnp_id;
+ peerid4 = lnet_pid_to_pid4(&peer_ni->ksnp_id);
write_lock_bh(global_lock);
- conn->ksnc_proto = peer_ni->ksnp_proto;
+ conn->ksnc_proto = peer_ni->ksnp_proto;
write_unlock_bh(global_lock);
- if (conn->ksnc_proto == NULL) {
- conn->ksnc_proto = &ksocknal_protocol_v3x;
+ if (conn->ksnc_proto == NULL) {
+ conn->ksnc_proto = &ksocknal_protocol_v3x;
#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol == 2)
- conn->ksnc_proto = &ksocknal_protocol_v2x;
- else if (*ksocknal_tunables.ksnd_protocol == 1)
- conn->ksnc_proto = &ksocknal_protocol_v1x;
+ if (*ksocknal_tunables.ksnd_protocol == 2)
+ conn->ksnc_proto = &ksocknal_protocol_v2x;
+ else if (*ksocknal_tunables.ksnd_protocol == 1)
+ conn->ksnc_proto = &ksocknal_protocol_v1x;
#endif
- }
+ }
- rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
- if (rc != 0)
- goto failed_1;
- } else {
- peerid.nid = LNET_NID_ANY;
- peerid.pid = LNET_PID_ANY;
+ rc = ksocknal_send_hello(ni, conn, peerid4.nid, hello);
+ if (rc != 0)
+ goto failed_1;
+ } else {
+ peerid4.nid = LNET_NID_ANY;
+ peerid4.pid = LNET_PID_ANY;
- /* Passive, get protocol from peer_ni */
- conn->ksnc_proto = NULL;
+ /* Passive, get protocol from peer_ni */
+ conn->ksnc_proto = NULL;
}
- rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
+ rc = ksocknal_recv_hello(ni, conn, hello, &peerid4, &incarnation);
if (rc < 0)
goto failed_1;
LASSERT(rc == 0 || active);
LASSERT(conn->ksnc_proto != NULL);
- LASSERT(peerid.nid != LNET_NID_ANY);
+ LASSERT(peerid4.nid != LNET_NID_ANY);
- cpt = lnet_cpt_of_nid(peerid.nid, ni);
+ cpt = lnet_cpt_of_nid(peerid4.nid, ni);
if (active) {
ksocknal_peer_addref(peer_ni);
write_lock_bh(global_lock);
} else {
- peer_ni = ksocknal_create_peer(ni, peerid);
+ struct lnet_processid peerid;
+
+ lnet_pid4_to_pid(peerid4, &peerid);
+ peer_ni = ksocknal_create_peer(ni, &peerid);
if (IS_ERR(peer_ni)) {
rc = PTR_ERR(peer_ni);
goto failed_1;
/* called with a ref on ni, so shutdown can't have started */
LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers) >= 0);
- peer2 = ksocknal_find_peer_locked(ni, peerid);
+ peer2 = ksocknal_find_peer_locked(ni, &peerid);
if (peer2 == NULL) {
/* NB this puts an "empty" peer_ni in the peer_ni
* table (which takes my ref) */
hash_add(ksocknal_data.ksnd_peers,
- &peer_ni->ksnp_list, peerid.nid);
+ &peer_ni->ksnp_list, nidhash(&peerid.nid));
} else {
ksocknal_peer_decref(peer_ni);
peer_ni = peer2;
}
- /* +1 ref for me */
- ksocknal_peer_addref(peer_ni);
- peer_ni->ksnp_accepting++;
+ /* +1 ref for me */
+ ksocknal_peer_addref(peer_ni);
+ peer_ni->ksnp_accepting++;
/* Am I already connecting to this guy? Resolve in
* favour of higher NID...
*/
- if (peerid.nid < ni->ni_nid &&
- ksocknal_connecting(peer_ni, ((struct sockaddr *)
- &conn->ksnc_peeraddr))) {
+ if (peerid4.nid < lnet_nid_to_nid4(&ni->ni_nid) &&
+ ksocknal_connecting(peer_ni->ksnp_conn_cb,
+ ((struct sockaddr *) &conn->ksnc_peeraddr))) {
rc = EALREADY;
warn = "connection race resolution";
goto failed_2;
}
- }
+ }
- if (peer_ni->ksnp_closing ||
- (active && route->ksnr_deleted)) {
- /* peer_ni/route got closed under me */
- rc = -ESTALE;
- warn = "peer_ni/route removed";
- goto failed_2;
+ if (peer_ni->ksnp_closing ||
+ (active && conn_cb->ksnr_deleted)) {
+ /* peer_ni/conn_cb got closed under me */
+ rc = -ESTALE;
+ warn = "peer_ni/conn_cb removed";
+ goto failed_2;
}
if (peer_ni->ksnp_proto == NULL) {
goto failed_2;
}
- switch (rc) {
- default:
- LBUG();
- case 0:
- break;
- case EALREADY:
- warn = "lost conn race";
- goto failed_2;
- case EPROTO:
- warn = "retry with different protocol version";
- goto failed_2;
- }
+ switch (rc) {
+ default:
+ LBUG();
+ case 0:
+ break;
+ case EALREADY:
+ warn = "lost conn race";
+ goto failed_2;
+ case EPROTO:
+ warn = "retry with different protocol version";
+ goto failed_2;
+ }
/* Refuse to duplicate an existing connection, unless this is a
* loopback connection */
if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
(struct sockaddr *)&conn->ksnc_myaddr)) {
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
+ list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
if (!rpc_cmp_addr(
(struct sockaddr *)&conn2->ksnc_peeraddr,
(struct sockaddr *)&conn->ksnc_peeraddr) ||
conn2->ksnc_type != conn->ksnc_type)
continue;
- /* Reply on a passive connection attempt so the peer_ni
- * realises we're connected. */
- LASSERT (rc == 0);
- if (!active)
- rc = EALREADY;
+ num_dup++;
+ /* If max conns per type is not registered in conn_cb
+ * as ksnr_max_conns, use ni's conns_per_peer
+ */
+ if ((peer_ni->ksnp_conn_cb &&
+ num_dup < peer_ni->ksnp_conn_cb->ksnr_max_conns) ||
+ (!peer_ni->ksnp_conn_cb &&
+ num_dup < ksocknal_get_conns_per_peer(peer_ni)))
+ continue;
- warn = "duplicate";
- goto failed_2;
- }
- }
+ /* Reply on a passive connection attempt so the peer_ni
+ * realises we're connected.
+ */
+ LASSERT(rc == 0);
+ if (!active)
+ rc = EALREADY;
- /* If the connection created by this route didn't bind to the IP
- * address the route connected to, the connection/route matching
- * code below probably isn't going to work. */
- if (active &&
- !rpc_cmp_addr((struct sockaddr *)&route->ksnr_addr,
+ warn = "duplicate";
+ goto failed_2;
+ }
+ }
+ /* If the connection created by this route didn't bind to the IP
+ * address the route connected to, the connection/route matching
+ * code below probably isn't going to work.
+ */
+ if (active &&
+ !rpc_cmp_addr((struct sockaddr *)&conn_cb->ksnr_addr,
(struct sockaddr *)&conn->ksnc_peeraddr)) {
CERROR("Route %s %pIS connected to %pIS\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &route->ksnr_addr,
+ libcfs_idstr(&peer_ni->ksnp_id),
+ &conn_cb->ksnr_addr,
&conn->ksnc_peeraddr);
- }
+ }
- /* Search for a route corresponding to the new connection and
+ /* Search for a conn_cb corresponding to the new connection and
* create an association. This allows incoming connections created
- * by routes in my peer_ni to match my own route entries so I don't
- * continually create duplicate routes. */
- list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
-
- if (!rpc_cmp_addr((struct sockaddr *)&route->ksnr_addr,
- (struct sockaddr *)&conn->ksnc_peeraddr))
- continue;
+ * by conn_cbs in my peer_ni to match my own conn_cb entries so I don't
+ * continually create duplicate conn_cbs.
+ */
+ conn_cb = peer_ni->ksnp_conn_cb;
- ksocknal_associate_route_conn_locked(route, conn);
- break;
- }
+ if (conn_cb && rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
+ (struct sockaddr *)&conn_cb->ksnr_addr))
+ ksocknal_associate_cb_conn_locked(conn_cb, conn);
conn->ksnc_peer = peer_ni; /* conn takes my ref on peer_ni */
peer_ni->ksnp_last_alive = ktime_get_seconds();
* native scheduler. So use the scheduler's cpt instead.
*/
cpt = sched->kss_cpt;
- sched->kss_nconns++;
- conn->ksnc_scheduler = sched;
+ sched->kss_nconns++;
+ conn->ksnc_scheduler = sched;
conn->ksnc_tx_last_post = ktime_get_seconds();
/* Set the deadline for the outgoing HELLO to drain */
ksocknal_new_packet(conn, 0);
- conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
+ conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
/* Take packets blocking for this connection. */
list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
write_unlock_bh(global_lock);
- /* We've now got a new connection. Any errors from here on are just
- * like "normal" comms errors and we close the connection normally.
- * NB (a) we still have to send the reply HELLO for passive
+ /* We've now got a new connection. Any errors from here on are just
+ * like "normal" comms errors and we close the connection normally.
+ * NB (a) we still have to send the reply HELLO for passive
* connections,
- * (b) normal I/O on the conn is blocked until I setup and call the
- * socket callbacks.
- */
+ * (b) normal I/O on the conn is blocked until I setup and call the
+ * socket callbacks.
+ */
CDEBUG(D_NET, "New conn %s p %d.x %pIS -> %pISp"
" incarnation:%lld sched[%d]\n",
- libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
+ libcfs_id2str(peerid4), conn->ksnc_proto->pro_version,
&conn->ksnc_myaddr, &conn->ksnc_peeraddr,
incarnation, cpt);
if (!active) {
hello->kshm_nips = 0;
- rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
- }
+ rc = ksocknal_send_hello(ni, conn, peerid4.nid, hello);
+ }
LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
kshm_ips[LNET_INTERFACES_NUM]));
- /* setup the socket AFTER I've received hello (it disables
- * SO_LINGER). I might call back to the acceptor who may want
- * to send a protocol version response and then close the
- * socket; this ensures the socket only tears down after the
- * response has been sent. */
- if (rc == 0)
- rc = ksocknal_lib_setup_sock(sock);
+ /* setup the socket AFTER I've received hello (it disables
+ * SO_LINGER). I might call back to the acceptor who may want
+ * to send a protocol version response and then close the
+ * socket; this ensures the socket only tears down after the
+ * response has been sent.
+ */
+ if (rc == 0)
+ rc = ksocknal_lib_setup_sock(sock);
write_lock_bh(global_lock);
- /* NB my callbacks block while I hold ksnd_global_lock */
- ksocknal_lib_set_callback(sock, conn);
+ /* NB my callbacks block while I hold ksnd_global_lock */
+ ksocknal_lib_set_callback(sock, conn);
- if (!active)
- peer_ni->ksnp_accepting--;
+ if (!active)
+ peer_ni->ksnp_accepting--;
write_unlock_bh(global_lock);
- if (rc != 0) {
+ if (rc != 0) {
write_lock_bh(global_lock);
- if (!conn->ksnc_closing) {
- /* could be closed by another thread */
- ksocknal_close_conn_locked(conn, rc);
- }
+ if (!conn->ksnc_closing) {
+ /* could be closed by another thread */
+ ksocknal_close_conn_locked(conn, rc);
+ }
write_unlock_bh(global_lock);
- } else if (ksocknal_connsock_addref(conn) == 0) {
- /* Allow I/O to proceed. */
- ksocknal_read_callback(conn);
- ksocknal_write_callback(conn);
- ksocknal_connsock_decref(conn);
- }
+ } else if (ksocknal_connsock_addref(conn) == 0) {
+ /* Allow I/O to proceed. */
+ ksocknal_read_callback(conn);
+ ksocknal_write_callback(conn);
+ ksocknal_connsock_decref(conn);
+ }
- ksocknal_connsock_decref(conn);
- ksocknal_conn_decref(conn);
- return rc;
+ ksocknal_connsock_decref(conn);
+ ksocknal_conn_decref(conn);
+ return rc;
failed_2:
+
if (!peer_ni->ksnp_closing &&
list_empty(&peer_ni->ksnp_conns) &&
- list_empty(&peer_ni->ksnp_routes)) {
+ peer_ni->ksnp_conn_cb == NULL) {
list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
ksocknal_unlink_peer_locked(peer_ni);
}
write_unlock_bh(global_lock);
- if (warn != NULL) {
- if (rc < 0)
- CERROR("Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid), conn->ksnc_type, warn);
- else
- CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid), conn->ksnc_type, warn);
- }
+ if (warn != NULL) {
+ if (rc < 0)
+ CERROR("Not creating conn %s type %d: %s\n",
+ libcfs_id2str(peerid4), conn->ksnc_type, warn);
+ else
+ CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
+ libcfs_id2str(peerid4), conn->ksnc_type, warn);
+ }
- if (!active) {
- if (rc > 0) {
+ if (!active) {
+ if (rc > 0) {
/* Request retry by replying with CONN_NONE
- * ksnc_proto has been set already */
- conn->ksnc_type = SOCKLND_CONN_NONE;
- hello->kshm_nips = 0;
- ksocknal_send_hello(ni, conn, peerid.nid, hello);
- }
+ * ksnc_proto has been set already
+ */
+ conn->ksnc_type = SOCKLND_CONN_NONE;
+ hello->kshm_nips = 0;
+ ksocknal_send_hello(ni, conn, peerid4.nid, hello);
+ }
write_lock_bh(global_lock);
- peer_ni->ksnp_accepting--;
+ peer_ni->ksnp_accepting--;
write_unlock_bh(global_lock);
- }
+ }
/*
* If we get here without an error code, just use -EALREADY.
*/
rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
ksocknal_txlist_done(ni, &zombies, rc2);
- ksocknal_peer_decref(peer_ni);
+ ksocknal_peer_decref(peer_ni);
failed_1:
if (hello != NULL)
failed_0:
sock_release(sock);
+
return rc;
}
* connection for the reaper to terminate.
* Caller holds ksnd_global_lock exclusively in irq context */
struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
- struct ksock_route *route;
+ struct ksock_conn_cb *conn_cb;
struct ksock_conn *conn2;
- struct list_head *tmp;
+ int conn_count;
+ int duplicate_count = 0;
LASSERT(peer_ni->ksnp_error == 0);
LASSERT(!conn->ksnc_closing);
/* ksnd_deathrow_conns takes over peer_ni's ref */
list_del(&conn->ksnc_list);
- route = conn->ksnc_route;
- if (route != NULL) {
- /* dissociate conn from route... */
- LASSERT(!route->ksnr_deleted);
- LASSERT((route->ksnr_connected & BIT(conn->ksnc_type)) != 0);
-
- conn2 = NULL;
- list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
-
- if (conn2->ksnc_route == route &&
- conn2->ksnc_type == conn->ksnc_type)
- break;
+ conn_cb = conn->ksnc_conn_cb;
+ if (conn_cb != NULL) {
+ /* dissociate conn from cb... */
+ LASSERT(!conn_cb->ksnr_deleted);
- conn2 = NULL;
+ conn_count = ksocknal_get_conn_count_by_type(conn_cb,
+ conn->ksnc_type);
+ /* connected bit is set only if all connections
+ * of the given type got created
+ */
+ if (conn_count == conn_cb->ksnr_max_conns)
+ LASSERT((conn_cb->ksnr_connected &
+ BIT(conn->ksnc_type)) != 0);
+
+ if (conn_count == 1) {
+ list_for_each_entry(conn2, &peer_ni->ksnp_conns,
+ ksnc_list) {
+ if (conn2->ksnc_conn_cb == conn_cb &&
+ conn2->ksnc_type == conn->ksnc_type)
+ duplicate_count += 1;
+ }
+ if (duplicate_count > 0)
+ CERROR("Found %d duplicate conns type %d\n",
+ duplicate_count,
+ conn->ksnc_type);
}
- if (conn2 == NULL)
- route->ksnr_connected &= ~BIT(conn->ksnc_type);
+ ksocknal_decr_conn_count(conn_cb, conn->ksnc_type);
- conn->ksnc_route = NULL;
+ conn->ksnc_conn_cb = NULL;
- ksocknal_route_decref(route); /* drop conn's ref on route */
+ /* drop conn's ref on conn_cb */
+ ksocknal_conn_cb_decref(conn_cb);
}
if (list_empty(&peer_ni->ksnp_conns)) {
/* stash last conn close reason */
peer_ni->ksnp_error = error;
- if (list_empty(&peer_ni->ksnp_routes)) {
+ if (peer_ni->ksnp_conn_cb == NULL) {
/* I've just closed last conn belonging to a
- * peer_ni with no routes to it */
+ * peer_ni with no connections to it
+ */
ksocknal_unlink_peer_locked(peer_ni);
}
}
if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
list_empty(&peer_ni->ksnp_conns) &&
peer_ni->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
+ !ksocknal_find_connecting_conn_cb_locked(peer_ni)) {
notify = true;
last_alive = peer_ni->ksnp_last_alive;
}
read_unlock(&ksocknal_data.ksnd_global_lock);
if (notify)
- lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid,
+ lnet_notify(peer_ni->ksnp_ni,
+ lnet_nid_to_nid4(&peer_ni->ksnp_id.nid),
false, false, last_alive);
}
spin_lock(&peer_ni->ksnp_lock);
- list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
+ list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list,
+ tx_zc_list) {
if (tx->tx_conn != conn)
continue;
spin_unlock(&peer_ni->ksnp_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
-
+ while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
+ tx_zc_list)) != NULL) {
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
- wake_up (&sched->kss_waitq);
+ wake_up(&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
time64_t last_rcv;
/* Final coup-de-grace of the reaper */
- CDEBUG (D_NET, "connection %p\n", conn);
+ CDEBUG(D_NET, "connection %p\n", conn);
LASSERT(refcount_read(&conn->ksnc_conn_refcount) == 0);
LASSERT(refcount_read(&conn->ksnc_sock_refcount) == 0);
- LASSERT (conn->ksnc_sock == NULL);
- LASSERT (conn->ksnc_route == NULL);
- LASSERT (!conn->ksnc_tx_scheduled);
- LASSERT (!conn->ksnc_rx_scheduled);
+ LASSERT(conn->ksnc_sock == NULL);
+ LASSERT(conn->ksnc_conn_cb == NULL);
+ LASSERT(!conn->ksnc_tx_scheduled);
+ LASSERT(!conn->ksnc_rx_scheduled);
LASSERT(list_empty(&conn->ksnc_tx_queue));
/* complete current receive if any */
last_rcv = conn->ksnc_rx_deadline -
ksocknal_timeout();
CERROR("Completing partial receive from %s[%d], ip %pISp, with error, wanted: %d, left: %d, last alive is %lld secs ago\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
+ libcfs_idstr(&conn->ksnc_peer->ksnp_id),
+ conn->ksnc_type,
&conn->ksnc_peeraddr,
- conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
+ conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
ktime_get_seconds() - last_rcv);
if (conn->ksnc_lnet_msg)
conn->ksnc_lnet_msg->msg_health_status =
case SOCKNAL_RX_LNET_HEADER:
if (conn->ksnc_rx_started)
CERROR("Incomplete receive of lnet header from %s, ip %pISp, with error, protocol: %d.x.\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ libcfs_idstr(&conn->ksnc_peer->ksnp_id),
&conn->ksnc_peeraddr,
conn->ksnc_proto->pro_version);
break;
- case SOCKNAL_RX_KSM_HEADER:
- if (conn->ksnc_rx_started)
+ case SOCKNAL_RX_KSM_HEADER:
+ if (conn->ksnc_rx_started)
CERROR("Incomplete receive of ksock message from %s, ip %pISp, with error, protocol: %d.x.\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ libcfs_idstr(&conn->ksnc_peer->ksnp_id),
&conn->ksnc_peeraddr,
conn->ksnc_proto->pro_version);
- break;
- case SOCKNAL_RX_SLOP:
- if (conn->ksnc_rx_started)
+ break;
+ case SOCKNAL_RX_SLOP:
+ if (conn->ksnc_rx_started)
CERROR("Incomplete receive of slops from %s, ip %pISp, with error\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ libcfs_idstr(&conn->ksnc_peer->ksnp_id),
&conn->ksnc_peeraddr);
- break;
- default:
- LBUG ();
- break;
- }
+ break;
+ default:
+ LBUG();
+ break;
+ }
- ksocknal_peer_decref(conn->ksnc_peer);
+ ksocknal_peer_decref(conn->ksnc_peer);
- LIBCFS_FREE (conn, sizeof (*conn));
+ LIBCFS_FREE(conn, sizeof(*conn));
}
int
}
int
-ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
+ksocknal_close_matching_conns(struct lnet_processid *id, __u32 ipaddr)
{
struct ksock_peer_ni *peer_ni;
struct hlist_node *pnxt;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (id.nid != LNET_NID_ANY) {
- lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+ if (!LNET_NID_IS_ANY(&id->nid)) {
+ lo = hash_min(nidhash(&id->nid),
+ HASH_BITS(ksocknal_data.ksnd_peers));
hi = lo;
} else {
lo = 0;
&ksocknal_data.ksnd_peers[i],
ksnp_list) {
- if (!((id.nid == LNET_NID_ANY ||
- id.nid == peer_ni->ksnp_id.nid) &&
- (id.pid == LNET_PID_ANY ||
- id.pid == peer_ni->ksnp_id.pid)))
+ if (!((LNET_NID_IS_ANY(&id->nid) ||
+ nid_same(&id->nid, &peer_ni->ksnp_id.nid)) &&
+ (id->pid == LNET_PID_ANY ||
+ id->pid == peer_ni->ksnp_id.pid)))
continue;
count += ksocknal_close_peer_conns_locked(
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
/* wildcards always succeed */
- if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
+ if (LNET_NID_IS_ANY(&id->nid) || id->pid == LNET_PID_ANY ||
+ ipaddr == 0)
return 0;
return (count == 0 ? -ENOENT : 0);
/* The router is telling me she's been notified of a change in
* gateway state....
*/
- struct lnet_process_id id = {
- .nid = gw_nid,
+ struct lnet_processid id = {
.pid = LNET_PID_ANY,
};
CDEBUG(D_NET, "gw %s down\n", libcfs_nid2str(gw_nid));
+ lnet_nid4_to_nid(gw_nid, &id.nid);
/* If the gateway crashed, close all open connections... */
- ksocknal_close_matching_conns(id, 0);
+ ksocknal_close_matching_conns(&id, 0);
return;
/* We can only establish new connections
{
int index;
int i;
- struct list_head *tmp;
struct ksock_conn *conn;
for (index = 0; ; index++) {
i = 0;
conn = NULL;
- list_for_each(tmp, &peer_ni->ksnp_conns) {
+ list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
if (i++ == index) {
- conn = list_entry(tmp, struct ksock_conn,
- ksnc_list);
ksocknal_conn_addref(conn);
break;
}
read_unlock(&ksocknal_data.ksnd_global_lock);
- if (conn == NULL)
+ if (i <= index)
break;
ksocknal_lib_push_conn (conn);
}
static int
-ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
+ksocknal_push(struct lnet_ni *ni, struct lnet_processid *id)
{
int lo;
int hi;
int bkt;
int rc = -ENOENT;
- if (id.nid != LNET_NID_ANY) {
- lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+ if (!LNET_NID_IS_ANY(&id->nid)) {
+ lo = hash_min(nidhash(&id->nid),
+ HASH_BITS(ksocknal_data.ksnd_peers));
hi = lo;
} else {
lo = 0;
hlist_for_each_entry(peer_ni,
&ksocknal_data.ksnd_peers[bkt],
ksnp_list) {
- if (!((id.nid == LNET_NID_ANY ||
- id.nid == peer_ni->ksnp_id.nid) &&
- (id.pid == LNET_PID_ANY ||
- id.pid == peer_ni->ksnp_id.pid)))
+ if (!((LNET_NID_IS_ANY(&id->nid) ||
+ nid_same(&id->nid,
+ &peer_ni->ksnp_id.nid)) &&
+ (id->pid == LNET_PID_ANY ||
+ id->pid == peer_ni->ksnp_id.pid)))
continue;
if (i++ == peer_off) {
int
ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
{
- struct lnet_process_id id = {0};
+ struct lnet_process_id id4 = {};
+ struct lnet_processid id = {};
struct libcfs_ioctl_data *data = arg;
int rc;
read_lock(&ksocknal_data.ksnd_global_lock);
if (data->ioc_count >= 1) {
- rc = -ENOENT;
- } else {
- rc = 0;
+ rc = -ENOENT;
+ } else {
+ rc = 0;
iface = &net->ksnn_interface;
sa = (void *)&iface->ksni_addr;
return rc;
}
- case IOC_LIBCFS_GET_PEER: {
- __u32 myip = 0;
- __u32 ip = 0;
- int port = 0;
- int conn_count = 0;
- int share_count = 0;
+ case IOC_LIBCFS_GET_PEER: {
+ __u32 myip = 0;
+ __u32 ip = 0;
+ int port = 0;
+ int conn_count = 0;
+ int share_count = 0;
- rc = ksocknal_get_peer_info(ni, data->ioc_count,
- &id, &myip, &ip, &port,
- &conn_count, &share_count);
- if (rc != 0)
- return rc;
+ rc = ksocknal_get_peer_info(ni, data->ioc_count,
+ &id4, &myip, &ip, &port,
+ &conn_count, &share_count);
+ if (rc != 0)
+ return rc;
- data->ioc_nid = id.nid;
- data->ioc_count = share_count;
- data->ioc_u32[0] = ip;
- data->ioc_u32[1] = port;
- data->ioc_u32[2] = myip;
- data->ioc_u32[3] = conn_count;
- data->ioc_u32[4] = id.pid;
- return 0;
- }
+ data->ioc_nid = id4.nid;
+ data->ioc_count = share_count;
+ data->ioc_u32[0] = ip;
+ data->ioc_u32[1] = port;
+ data->ioc_u32[2] = myip;
+ data->ioc_u32[3] = conn_count;
+ data->ioc_u32[4] = id4.pid;
+ return 0;
+ }
case IOC_LIBCFS_ADD_PEER: {
struct sockaddr_in sa = {.sin_family = AF_INET};
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_LUSTRE;
+ id4.nid = data->ioc_nid;
+ id4.pid = LNET_PID_LUSTRE;
sa.sin_addr.s_addr = htonl(data->ioc_u32[0]);
sa.sin_port = htons(data->ioc_u32[1]);
- return ksocknal_add_peer(ni, id, (struct sockaddr *)&sa);
+ return ksocknal_add_peer(ni, id4, (struct sockaddr *)&sa);
}
- case IOC_LIBCFS_DEL_PEER:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_ANY;
- return ksocknal_del_peer (ni, id,
- data->ioc_u32[0]); /* IP */
+ case IOC_LIBCFS_DEL_PEER:
+ id4.nid = data->ioc_nid;
+ id4.pid = LNET_PID_ANY;
+ return ksocknal_del_peer(ni, id4,
+ data->ioc_u32[0]); /* IP */
case IOC_LIBCFS_GET_CONN: {
int txmem;
ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
- data->ioc_count = txmem;
- data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
- data->ioc_flags = nagle;
+ data->ioc_count = txmem;
+ data->ioc_nid = lnet_nid_to_nid4(&conn->ksnc_peer->ksnp_id.nid);
+ data->ioc_flags = nagle;
if (psa->sin_family == AF_INET)
data->ioc_u32[0] = ntohl(psa->sin_addr.s_addr);
else
return 0;
}
- case IOC_LIBCFS_CLOSE_CONNECTION:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_ANY;
- return ksocknal_close_matching_conns (id,
- data->ioc_u32[0]);
-
- case IOC_LIBCFS_REGISTER_MYNID:
- /* Ignore if this is a noop */
- if (data->ioc_nid == ni->ni_nid)
- return 0;
-
- CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
- libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(ni->ni_nid));
- return -EINVAL;
-
- case IOC_LIBCFS_PUSH_CONNECTION:
- id.nid = data->ioc_nid;
- id.pid = LNET_PID_ANY;
- return ksocknal_push(ni, id);
-
- default:
- return -EINVAL;
- }
- /* not reached */
+ case IOC_LIBCFS_CLOSE_CONNECTION:
+ lnet_nid4_to_nid(data->ioc_nid, &id.nid);
+ id.pid = LNET_PID_ANY;
+ return ksocknal_close_matching_conns(&id,
+ data->ioc_u32[0]);
+
+ case IOC_LIBCFS_REGISTER_MYNID:
+ /* Ignore if this is a noop */
+ if (nid_is_nid4(&ni->ni_nid) &&
+ data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid))
+ return 0;
+
+ CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
+ libcfs_nid2str(data->ioc_nid),
+ libcfs_nidstr(&ni->ni_nid));
+ return -EINVAL;
+
+ case IOC_LIBCFS_PUSH_CONNECTION:
+ lnet_nid4_to_nid(data->ioc_nid, &id.nid);
+ id.pid = LNET_PID_ANY;
+ return ksocknal_push(ni, &id);
+
+ default:
+ return -EINVAL;
+ }
+ /* not reached */
}
static void
list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, struct ksock_tx, tx_list);
+ while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
}
}
+static int ksocknal_get_link_status(struct net_device *dev)
+{
+ int ret = -1;
+
+ LASSERT(dev);
+
+ if (!netif_running(dev)) {
+ ret = 0;
+ CDEBUG(D_NET, "device not running\n");
+ }
+ /* Some devices may not be providing link settings */
+ else if (dev->ethtool_ops->get_link) {
+ ret = dev->ethtool_ops->get_link(dev);
+ CDEBUG(D_NET, "get_link returns %u\n", ret);
+ }
+
+ return ret;
+}
+
+static int
+ksocknal_handle_link_state_change(struct net_device *dev,
+ unsigned char operstate)
+{
+ struct lnet_ni *ni = NULL;
+ struct ksock_net *net;
+ struct ksock_net *cnxt;
+ int ifindex;
+ unsigned char link_down = !(operstate == IF_OPER_UP);
+ struct in_device *in_dev;
+ bool found_ip = false;
+ struct ksock_interface *ksi = NULL;
+ struct sockaddr_in *sa;
+ DECLARE_CONST_IN_IFADDR(ifa);
+
+ ifindex = dev->ifindex;
+
+ if (!ksocknal_data.ksnd_nnets)
+ goto out;
+
+ list_for_each_entry_safe(net, cnxt, &ksocknal_data.ksnd_nets,
+ ksnn_list) {
+
+ ksi = &net->ksnn_interface;
+ sa = (void *)&ksi->ksni_addr;
+ found_ip = false;
+
+ if (ksi->ksni_index != ifindex ||
+ strcmp(ksi->ksni_name, dev->name))
+ continue;
+
+ ni = net->ksnn_ni;
+
+ in_dev = __in_dev_get_rtnl(dev);
+ if (!in_dev) {
+ CDEBUG(D_NET, "Interface %s has no IPv4 status.\n",
+ dev->name);
+ CDEBUG(D_NET, "set link fatal state to 1\n");
+ atomic_set(&ni->ni_fatal_error_on, 1);
+ continue;
+ }
+ in_dev_for_each_ifa_rtnl(ifa, in_dev) {
+ if (sa->sin_addr.s_addr == ifa->ifa_local)
+ found_ip = true;
+ }
+ endfor_ifa(in_dev);
+
+ if (!found_ip) {
+ CDEBUG(D_NET, "Interface %s has no matching ip\n",
+ dev->name);
+ CDEBUG(D_NET, "set link fatal state to 1\n");
+ atomic_set(&ni->ni_fatal_error_on, 1);
+ continue;
+ }
+
+ if (link_down) {
+ CDEBUG(D_NET, "set link fatal state to 1\n");
+ atomic_set(&ni->ni_fatal_error_on, link_down);
+ } else {
+ CDEBUG(D_NET, "set link fatal state to %u\n",
+ (ksocknal_get_link_status(dev) == 0));
+ atomic_set(&ni->ni_fatal_error_on,
+ (ksocknal_get_link_status(dev) == 0));
+ }
+ }
+out:
+ return 0;
+}
+
+
+static int
+ksocknal_handle_inetaddr_change(struct in_ifaddr *ifa, unsigned long event)
+{
+ struct lnet_ni *ni;
+ struct ksock_net *net;
+ struct ksock_net *cnxt;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+ int ifindex;
+ struct ksock_interface *ksi = NULL;
+ struct sockaddr_in *sa;
+
+ if (!ksocknal_data.ksnd_nnets)
+ goto out;
+
+ ifindex = event_netdev->ifindex;
+
+ list_for_each_entry_safe(net, cnxt, &ksocknal_data.ksnd_nets,
+ ksnn_list) {
+
+ ksi = &net->ksnn_interface;
+ sa = (void *)&ksi->ksni_addr;
+
+ if (ksi->ksni_index != ifindex ||
+ strcmp(ksi->ksni_name, event_netdev->name))
+ continue;
+
+ if (sa->sin_addr.s_addr == ifa->ifa_local) {
+ CDEBUG(D_NET, "set link fatal state to %u\n",
+ (event == NETDEV_DOWN));
+ ni = net->ksnn_ni;
+ atomic_set(&ni->ni_fatal_error_on,
+ (event == NETDEV_DOWN));
+ }
+ }
+out:
+ return 0;
+}
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+static int ksocknal_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ unsigned char operstate;
+
+ operstate = dev->operstate;
+
+ CDEBUG(D_NET, "devevent: status=%ld, iface=%s ifindex %d state %u\n",
+ event, dev->name, dev->ifindex, operstate);
+
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ ksocknal_handle_link_state_change(dev, operstate);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/************************************
+ * Inetaddr notifier event handler
+ ************************************/
+static int ksocknal_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+
+ CDEBUG(D_NET, "addrevent: status %ld ip addr %pI4, netmask %pI4.\n",
+ event, &ifa->ifa_address, &ifa->ifa_mask);
+
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ ksocknal_handle_inetaddr_change(ifa, event);
+ break;
+
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ksocknal_dev_notifier_block = {
+ .notifier_call = ksocknal_device_event,
+};
+
+static struct notifier_block ksocknal_inetaddr_notifier_block = {
+ .notifier_call = ksocknal_inetaddr_event,
+};
+
static void
ksocknal_base_shutdown(void)
{
libcfs_kmem_read());
LASSERT (ksocknal_data.ksnd_nnets == 0);
+ if (ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL) {
+ unregister_netdevice_notifier(&ksocknal_dev_notifier_block);
+ unregister_inetaddr_notifier(&ksocknal_inetaddr_notifier_block);
+ }
+
switch (ksocknal_data.ksnd_init) {
default:
LASSERT(0);
/* flag threads to terminate; wake and wait for them to die */
ksocknal_data.ksnd_shuttingdown = 1;
wake_up_all(&ksocknal_data.ksnd_connd_waitq);
- wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
if (ksocknal_data.ksnd_schedulers != NULL) {
cfs_percpt_for_each(sched, i,
}
for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
- char name[16];
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting++;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
- snprintf(name, sizeof(name), "socknal_cd%02d", i);
rc = ksocknal_thread_start(ksocknal_connd,
- (void *)((uintptr_t)i), name);
+ (void *)((uintptr_t)i),
+ "socknal_cd%02d", i);
if (rc != 0) {
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting--;
goto failed;
}
+ register_netdevice_notifier(&ksocknal_dev_notifier_block);
+ register_inetaddr_notifier(&ksocknal_inetaddr_notifier_block);
+
/* flag everything initialised */
ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
read_lock(&ksocknal_data.ksnd_global_lock);
hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
- struct ksock_route *route;
+ struct ksock_conn_cb *conn_cb;
struct ksock_conn *conn;
if (peer_ni->ksnp_ni != ni)
continue;
- CWARN("Active peer_ni on shutdown: %s, ref %d, "
- "closing %d, accepting %d, err %d, zcookie %llu, "
- "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
+ CWARN("Active peer_ni on shutdown: %s, ref %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
+ libcfs_idstr(&peer_ni->ksnp_id),
refcount_read(&peer_ni->ksnp_refcount),
peer_ni->ksnp_closing,
peer_ni->ksnp_accepting, peer_ni->ksnp_error,
!list_empty(&peer_ni->ksnp_tx_queue),
!list_empty(&peer_ni->ksnp_zc_req_list));
- list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
- CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
- refcount_read(&route->ksnr_refcount),
- route->ksnr_scheduled, route->ksnr_connecting,
- route->ksnr_connected, route->ksnr_deleted);
+ conn_cb = peer_ni->ksnp_conn_cb;
+ if (conn_cb) {
+ CWARN("ConnCB: ref %d, schd %d, conn %d, cnted %d, del %d\n",
+ refcount_read(&conn_cb->ksnr_refcount),
+ conn_cb->ksnr_scheduled, conn_cb->ksnr_connecting,
+ conn_cb->ksnr_connected, conn_cb->ksnr_deleted);
}
list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
for (i = 0; i < nthrs; i++) {
long id;
- char name[20];
id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
- snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
- sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
-
- rc = ksocknal_thread_start(ksocknal_scheduler,
- (void *)id, name);
+ rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id,
+ "socknal_sd%02d_%02d",
+ sched->kss_cpt,
+ (int)KSOCK_THREAD_SID(id));
if (rc == 0)
continue;
ksocknal_startup(struct lnet_ni *ni)
{
struct ksock_net *net;
- struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
struct ksock_interface *ksi = NULL;
struct lnet_inetdev *ifaces = NULL;
struct sockaddr_in *sa;
goto fail_0;
net->ksnn_incarnation = ktime_get_real_ns();
ni->ni_data = net;
- net_tunables = &ni->ni_net->net_tunables;
- if (net_tunables->lct_peer_timeout == -1)
- net_tunables->lct_peer_timeout =
- *ksocknal_tunables.ksnd_peertimeout;
- if (net_tunables->lct_max_tx_credits == -1)
- net_tunables->lct_max_tx_credits =
- *ksocknal_tunables.ksnd_credits;
-
- if (net_tunables->lct_peer_tx_credits == -1)
- net_tunables->lct_peer_tx_credits =
- *ksocknal_tunables.ksnd_peertxcredits;
-
- if (net_tunables->lct_peer_tx_credits >
- net_tunables->lct_max_tx_credits)
- net_tunables->lct_peer_tx_credits =
- net_tunables->lct_max_tx_credits;
-
- if (net_tunables->lct_peer_rtr_credits == -1)
- net_tunables->lct_peer_rtr_credits =
- *ksocknal_tunables.ksnd_peerrtrcredits;
+ ksocknal_tunables_setup(ni);
rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns);
if (rc < 0)
LASSERT(ksi);
LASSERT(ksi->ksni_addr.ss_family == AF_INET);
- ni->ni_nid = LNET_MKNID(
- LNET_NIDNET(ni->ni_nid),
- ntohl(((struct sockaddr_in *)
- &ksi->ksni_addr)->sin_addr.s_addr));
+ ni->ni_nid.nid_addr[0] =
+ ((struct sockaddr_in *)&ksi->ksni_addr)->sin_addr.s_addr;
list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
+ net->ksnn_ni = ni;
ksocknal_data.ksnd_nnets++;
return 0;
return -ENETDOWN;
}
-
static void __exit ksocklnd_exit(void)
{
lnet_unregister_lnd(&the_ksocklnd);