#include "socklnd.h"
static struct lnet_lnd the_ksocklnd;
-ksock_nal_data_t ksocknal_data;
+struct ksock_nal_data ksocknal_data;
-static ksock_interface_t *
+static struct ksock_interface *
ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int i;
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
for (i = 0; i < net->ksnn_ninterfaces; i++) {
LASSERT(i < LNET_NUM_INTERFACES);
return NULL;
}
-static ksock_route_t *
-ksocknal_create_route (__u32 ipaddr, int port)
+static struct ksock_route *
+ksocknal_create_route(__u32 ipaddr, int port)
{
- ksock_route_t *route;
+ struct ksock_route *route;
LIBCFS_ALLOC (route, sizeof (*route));
if (route == NULL)
}
void
-ksocknal_destroy_route (ksock_route_t *route)
+ksocknal_destroy_route(struct ksock_route *route)
{
LASSERT (atomic_read(&route->ksnr_refcount) == 0);
}
static int
-ksocknal_create_peer(ksock_peer_ni_t **peerp, struct lnet_ni *ni,
+ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
struct lnet_process_id id)
{
- int cpt = lnet_cpt_of_nid(id.nid, ni);
- ksock_net_t *net = ni->ni_data;
- ksock_peer_ni_t *peer_ni;
+ int cpt = lnet_cpt_of_nid(id.nid, ni);
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_peer_ni *peer_ni;
LASSERT(id.nid != LNET_NID_ANY);
LASSERT(id.pid != LNET_PID_ANY);
}
void
-ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni)
+ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
{
- ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
+ struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
CDEBUG (D_NET, "peer_ni %s %p deleted\n",
libcfs_id2str(peer_ni->ksnp_id), peer_ni);
spin_unlock_bh(&net->ksnn_lock);
}
-ksock_peer_ni_t *
+struct ksock_peer_ni *
ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
{
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
struct list_head *tmp;
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
list_for_each(tmp, peer_list) {
-
- peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
LASSERT(!peer_ni->ksnp_closing);
return NULL;
}
-ksock_peer_ni_t *
+struct ksock_peer_ni *
ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
{
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
read_lock(&ksocknal_data.ksnd_global_lock);
peer_ni = ksocknal_find_peer_locked(ni, id);
}
static void
-ksocknal_unlink_peer_locked(ksock_peer_ni_t *peer_ni)
+ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
{
int i;
__u32 ip;
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
LASSERT(i < LNET_NUM_INTERFACES);
struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
int *port, int *conn_count, int *share_count)
{
- ksock_peer_ni_t *peer_ni;
- struct list_head *ptmp;
- ksock_route_t *route;
- struct list_head *rtmp;
- int i;
- int j;
- int rc = -ENOENT;
+ struct ksock_peer_ni *peer_ni;
+ struct list_head *ptmp;
+ struct ksock_route *route;
+ struct list_head *rtmp;
+ int i;
+ int j;
+ int rc = -ENOENT;
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
if (peer_ni->ksnp_ni != ni)
continue;
if (index-- > 0)
continue;
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
*id = peer_ni->ksnp_id;
}
static void
-ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
{
- ksock_peer_ni_t *peer_ni = route->ksnr_peer;
- int type = conn->ksnc_type;
- ksock_interface_t *iface;
+ struct ksock_peer_ni *peer_ni = route->ksnr_peer;
+ int type = conn->ksnc_type;
+ struct ksock_interface *iface;
conn->ksnc_route = route;
ksocknal_route_addref(route);
}
static void
-ksocknal_add_route_locked (ksock_peer_ni_t *peer_ni, ksock_route_t *route)
+ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
{
struct list_head *tmp;
- ksock_conn_t *conn;
- ksock_route_t *route2;
+ struct ksock_conn *conn;
+ struct ksock_route *route2;
LASSERT(!peer_ni->ksnp_closing);
LASSERT(route->ksnr_peer == NULL);
/* LASSERT(unique) */
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ route2 = list_entry(tmp, struct ksock_route, ksnr_list);
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
CERROR("Duplicate route %s %pI4h\n",
list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
continue;
}
static void
-ksocknal_del_route_locked (ksock_route_t *route)
+ksocknal_del_route_locked(struct ksock_route *route)
{
- ksock_peer_ni_t *peer_ni = route->ksnr_peer;
- ksock_interface_t *iface;
- ksock_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
+ struct ksock_peer_ni *peer_ni = route->ksnr_peer;
+ struct ksock_interface *iface;
+ struct ksock_conn *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
LASSERT(!route->ksnr_deleted);
/* Close associated conns */
list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_route != route)
continue;
int port)
{
struct list_head *tmp;
- ksock_peer_ni_t *peer_ni;
- ksock_peer_ni_t *peer2;
- ksock_route_t *route;
- ksock_route_t *route2;
- int rc;
+ struct ksock_peer_ni *peer_ni;
+ struct ksock_peer_ni *peer2;
+ struct ksock_route *route;
+ struct ksock_route *route2;
+ int rc;
if (id.nid == LNET_NID_ANY ||
id.pid == LNET_PID_ANY)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
- LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
peer2 = ksocknal_find_peer_locked(ni, id);
if (peer2 != NULL) {
route2 = NULL;
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ route2 = list_entry(tmp, struct ksock_route, ksnr_list);
if (route2->ksnr_ipaddr == ipaddr)
break;
}
static void
-ksocknal_del_peer_locked (ksock_peer_ni_t *peer_ni, __u32 ip)
+ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
{
- ksock_conn_t *conn;
- ksock_route_t *route;
+ struct ksock_conn *conn;
+ struct ksock_route *route;
struct list_head *tmp;
struct list_head *nxt;
- int nshared;
+ int nshared;
LASSERT(!peer_ni->ksnp_closing);
ksocknal_peer_addref(peer_ni);
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
/* no match */
if (!(ip == 0 || route->ksnr_ipaddr == ip))
nshared = 0;
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
nshared += route->ksnr_share_count;
}
* left */
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
/* we should only be removing auto-entries */
LASSERT(route->ksnr_share_count == 0);
}
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
ksocknal_close_conn_locked(conn, 0);
}
static int
ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- ksock_peer_ni_t *peer_ni;
- int lo;
- int hi;
- int i;
- int rc = -ENOENT;
+ struct ksock_peer_ni *peer_ni;
+ int lo;
+ int hi;
+ int i;
+ int rc = -ENOENT;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt,
&ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
if (peer_ni->ksnp_ni != ni)
continue;
return rc;
}
-static ksock_conn_t *
+static struct ksock_conn *
ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
{
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
struct list_head *ptmp;
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
struct list_head *ctmp;
- int i;
+ int i;
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
LASSERT(!peer_ni->ksnp_closing);
if (index-- > 0)
continue;
- conn = list_entry(ctmp, ksock_conn_t,
+ conn = list_entry(ctmp, struct ksock_conn,
ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data. \
return NULL;
}
-static ksock_sched_t *
+static struct ksock_sched *
ksocknal_choose_scheduler_locked(unsigned int cpt)
{
struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
- ksock_sched_t *sched;
- int i;
+ struct ksock_sched *sched;
+ int i;
if (info->ksi_nthreads == 0) {
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
static int
ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int i;
int nip;
}
static int
-ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
+ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
{
- int best_netmatch = 0;
- int best_xor = 0;
- int best = -1;
- int this_xor;
- int this_netmatch;
- int i;
+ int best_netmatch = 0;
+ int best_xor = 0;
+ int best = -1;
+ int this_xor;
+ int this_netmatch;
+ int i;
for (i = 0; i < nips; i++) {
if (ips[i] == 0)
}
static int
-ksocknal_select_ips(ksock_peer_ni_t *peer_ni, __u32 *peerips, int n_peerips)
+ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
- int n_ips;
- int i;
- int j;
- int k;
- __u32 ip;
- __u32 xor;
- int this_netmatch;
- int best_netmatch;
- int best_npeers;
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
+ struct ksock_interface *iface;
+ struct ksock_interface *best_iface;
+ int n_ips;
+ int i;
+ int j;
+ int k;
+ u32 ip;
+ u32 xor;
+ int this_netmatch;
+ int best_netmatch;
+ int best_npeers;
/* CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
}
static void
-ksocknal_create_routes(ksock_peer_ni_t *peer_ni, int port,
+ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
- ksock_route_t *newroute = NULL;
+ struct ksock_route *newroute = NULL;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
struct lnet_ni *ni = peer_ni->ksnp_ni;
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
struct list_head *rtmp;
- ksock_route_t *route;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
+ struct ksock_route *route;
+ struct ksock_interface *iface;
+ struct ksock_interface *best_iface;
int best_netmatch;
int this_netmatch;
int best_nroutes;
/* Already got a route? */
route = NULL;
list_for_each(rtmp, &peer_ni->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t, ksnr_list);
+ route = list_entry(rtmp, struct ksock_route, ksnr_list);
if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
break;
/* Using this interface already? */
list_for_each(rtmp, &peer_ni->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
if (route->ksnr_myipaddr == iface->ksni_ipaddr)
int
ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
{
- ksock_connreq_t *cr;
- int rc;
- __u32 peer_ip;
- int peer_port;
+ struct ksock_connreq *cr;
+ int rc;
+ u32 peer_ip;
+ int peer_port;
rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
LASSERT(rc == 0); /* we succeeded before */
}
static int
-ksocknal_connecting (ksock_peer_ni_t *peer_ni, __u32 ipaddr)
+ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
{
- ksock_route_t *route;
+ struct ksock_route *route;
list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
if (route->ksnr_ipaddr == ipaddr)
}
int
-ksocknal_create_conn(struct lnet_ni *ni, ksock_route_t *route,
+ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
struct socket *sock, int type)
{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- struct list_head zombies = LIST_HEAD_INIT(zombies);
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
struct lnet_process_id peerid;
- struct list_head *tmp;
- __u64 incarnation;
- ksock_conn_t *conn;
- ksock_conn_t *conn2;
- ksock_peer_ni_t *peer_ni = NULL;
- ksock_peer_ni_t *peer2;
- ksock_sched_t *sched;
+ struct list_head *tmp;
+ u64 incarnation;
+ struct ksock_conn *conn;
+ struct ksock_conn *conn2;
+ struct ksock_peer_ni *peer_ni = NULL;
+ struct ksock_peer_ni *peer2;
+ struct ksock_sched *sched;
struct ksock_hello_msg *hello;
- int cpt;
- ksock_tx_t *tx;
- ksock_tx_t *txtmp;
- int rc;
- int rc2;
- int active;
- char *warn = NULL;
+ int cpt;
+ struct ksock_tx *tx;
+ struct ksock_tx *txtmp;
+ int rc;
+ int rc2;
+ int active;
+ char *warn = NULL;
active = (route != NULL);
write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
- LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
peer2 = ksocknal_find_peer_locked(ni, peerid);
if (peer2 == NULL) {
* loopback connection */
if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
* by routes in my peer_ni to match my own route entries so I don't
* continually create duplicate routes. */
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
continue;
}
void
-ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
+ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
{
/* This just does the immmediate housekeeping, and queues the
* connection for the reaper to terminate.
* Caller holds ksnd_global_lock exclusively in irq context */
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
- ksock_route_t *route;
- ksock_conn_t *conn2;
- struct list_head *tmp;
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+ struct ksock_route *route;
+ struct ksock_conn *conn2;
+ struct list_head *tmp;
LASSERT(peer_ni->ksnp_error == 0);
LASSERT(!conn->ksnc_closing);
conn2 = NULL;
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn2->ksnc_route == route &&
conn2->ksnc_type == conn->ksnc_type)
/* No more connections to this peer_ni */
if (!list_empty(&peer_ni->ksnp_tx_queue)) {
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
}
void
-ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
+ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
{
- int notify = 0;
+ int notify = 0;
time64_t last_alive = 0;
/* There has been a connection failure or comms error; but I'll only
}
void
-ksocknal_finalize_zcreq(ksock_conn_t *conn)
+ksocknal_finalize_zcreq(struct ksock_conn *conn)
{
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *tmp;
- struct list_head zlist = LIST_HEAD_INIT(zlist);
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+ struct ksock_tx *tx;
+ struct ksock_tx *tmp;
+ struct list_head zlist = LIST_HEAD_INIT(zlist);
/* NB safe to finalize TXs because closing of socket will
* abort all buffered data */
spin_unlock(&peer_ni->ksnp_lock);
while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
void
-ksocknal_terminate_conn(ksock_conn_t *conn)
+ksocknal_terminate_conn(struct ksock_conn *conn)
{
/* This gets called by the reaper (guaranteed thread context) to
* disengage the socket from its callbacks and close it.
* ksnc_refcount will eventually hit zero, and then the reaper will
* destroy it. */
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
- ksock_sched_t *sched = conn->ksnc_scheduler;
- int failed = 0;
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
+ int failed = 0;
LASSERT(conn->ksnc_closing);
}
void
-ksocknal_queue_zombie_conn (ksock_conn_t *conn)
+ksocknal_queue_zombie_conn(struct ksock_conn *conn)
{
/* Queue the conn for the reaper to destroy */
-
LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
}
void
-ksocknal_destroy_conn(ksock_conn_t *conn)
+ksocknal_destroy_conn(struct ksock_conn *conn)
{
time64_t last_rcv;
}
int
-ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why)
+ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
{
- ksock_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
+ struct ksock_conn *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int count = 0;
list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
if (ipaddr == 0 ||
conn->ksnc_ipaddr == ipaddr) {
}
int
-ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
+ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
{
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
- __u32 ipaddr = conn->ksnc_ipaddr;
- int count;
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+ u32 ipaddr = conn->ksnc_ipaddr;
+ int count;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
int
ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
{
- ksock_peer_ni_t *peer_ni;
- struct list_head *ptmp;
- struct list_head *pnxt;
- int lo;
- int hi;
- int i;
- int count = 0;
+ struct ksock_peer_ni *peer_ni;
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ int lo;
+ int hi;
+ int i;
+ int count = 0;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
(id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
int connect = 1;
time64_t last_alive = 0;
time64_t now = ktime_get_seconds();
- ksock_peer_ni_t *peer_ni = NULL;
+ struct ksock_peer_ni *peer_ni = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
struct lnet_process_id id = {
.nid = nid,
read_lock(glock);
- peer_ni = ksocknal_find_peer_locked(ni, id);
- if (peer_ni != NULL) {
- struct list_head *tmp;
- ksock_conn_t *conn;
- int bufnob;
+ peer_ni = ksocknal_find_peer_locked(ni, id);
+ if (peer_ni != NULL) {
+ struct list_head *tmp;
+ struct ksock_conn *conn;
+ int bufnob;
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
if (bufnob < conn->ksnc_tx_bufnob) {
}
static void
-ksocknal_push_peer (ksock_peer_ni_t *peer_ni)
+ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
{
- int index;
- int i;
- struct list_head *tmp;
- ksock_conn_t *conn;
+ int index;
+ int i;
+ struct list_head *tmp;
+ struct ksock_conn *conn;
for (index = 0; ; index++) {
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each(tmp, &peer_ni->ksnp_conns) {
if (i++ == index) {
- conn = list_entry(tmp, ksock_conn_t,
- ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn,
+ ksnc_list);
ksocknal_conn_addref(conn);
break;
}
int peer_off; /* searching offset in peer_ni hash table */
for (peer_off = 0; ; peer_off++) {
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
int i = 0;
read_lock(&ksocknal_data.ksnd_global_lock);
static int
ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
{
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_interface *iface;
int rc;
int i;
int j;
struct list_head *ptmp;
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
struct list_head *rtmp;
- ksock_route_t *route;
+ struct ksock_route *route;
if (ipaddress == 0 ||
netmask == 0)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t,
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni,
ksnp_list);
for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
list_for_each(rtmp, &peer_ni->ksnp_routes) {
route = list_entry(rtmp,
- ksock_route_t,
+ struct ksock_route,
ksnr_list);
if (route->ksnr_myipaddr == ipaddress)
}
static void
-ksocknal_peer_del_interface_locked(ksock_peer_ni_t *peer_ni, __u32 ipaddr)
+ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
{
- struct list_head *tmp;
- struct list_head *nxt;
- ksock_route_t *route;
- ksock_conn_t *conn;
- int i;
- int j;
+ struct list_head *tmp;
+ struct list_head *nxt;
+ struct ksock_route *route;
+ struct ksock_conn *conn;
+ int i;
+ int j;
for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
}
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
if (route->ksnr_myipaddr != ipaddr)
continue;
}
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_myipaddr == ipaddr)
ksocknal_close_conn_locked (conn, 0);
static int
ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
{
- ksock_net_t *net = ni->ni_data;
- int rc = -ENOENT;
- struct list_head *tmp;
- struct list_head *nxt;
- ksock_peer_ni_t *peer_ni;
- __u32 this_ip;
- int i;
- int j;
+ struct ksock_net *net = ni->ni_data;
+ int rc = -ENOENT;
+ struct list_head *tmp;
+ struct list_head *nxt;
+ struct ksock_peer_ni *peer_ni;
+ u32 this_ip;
+ int i;
+ int j;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
list_for_each_safe(tmp, nxt,
- &ksocknal_data.ksnd_peers[j]) {
- peer_ni = list_entry(tmp, ksock_peer_ni_t,
- ksnp_list);
+ &ksocknal_data.ksnd_peers[j]) {
+ peer_ni = list_entry(tmp, struct ksock_peer_ni,
+ ksnp_list);
if (peer_ni->ksnp_ni != ni)
continue;
switch(cmd) {
case IOC_LIBCFS_GET_INTERFACE: {
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_interface *iface;
read_lock(&ksocknal_data.ksnd_global_lock);
int txmem;
int rxmem;
int nagle;
- ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
+ struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
if (conn == NULL)
return -ENOENT;
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- struct list_head zlist;
- ksock_tx_t *tx;
+ struct list_head zlist;
+ struct ksock_tx *tx;
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_list);
+ tx = list_entry(zlist.next, struct ksock_tx, tx_list);
list_del(&tx->tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
ksocknal_base_shutdown(void)
{
struct ksock_sched_info *info;
- ksock_sched_t *sched;
- int i;
- int j;
+ struct ksock_sched *sched;
+ int i;
+ int j;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
atomic_read (&libcfs_kmemory));
goto failed;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- ksock_sched_t *sched;
- int nthrs;
+ struct ksock_sched *sched;
+ int nthrs;
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
if (*ksocknal_tunables.ksnd_nscheds > 0) {
static void
ksocknal_debug_peerhash(struct lnet_ni *ni)
{
- ksock_peer_ni_t *peer_ni = NULL;
- struct list_head *tmp;
- int i;
+ struct ksock_peer_ni *peer_ni = NULL;
+ struct list_head *tmp;
+ int i;
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
if (peer_ni->ksnp_ni == ni) break;
}
if (peer_ni != NULL) {
- ksock_route_t *route;
- ksock_conn_t *conn;
+ struct ksock_route *route;
+ struct ksock_conn *conn;
CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
"closing %d, accepting %d, err %d, zcookie %llu, "
!list_empty(&peer_ni->ksnp_zc_req_list));
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
"del %d\n", atomic_read(&route->ksnr_refcount),
route->ksnr_scheduled, route->ksnr_connecting,
}
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
atomic_read(&conn->ksnc_conn_refcount),
atomic_read(&conn->ksnc_sock_refcount),
void
ksocknal_shutdown(struct lnet_ni *ni)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
struct lnet_process_id anyid = {
.nid = LNET_NID_ANY,
.pid = LNET_PID_ANY,
}
static int
-ksocknal_enumerate_interfaces(ksock_net_t *net)
+ksocknal_enumerate_interfaces(struct ksock_net *net)
{
- char **names;
- int i;
- int j;
- int rc;
- int n;
+ char **names;
+ int i;
+ int j;
+ int rc;
+ int n;
n = lnet_ipif_enumerate(&names);
if (n <= 0) {
}
static int
-ksocknal_search_new_ipif(ksock_net_t *net)
+ksocknal_search_new_ipif(struct ksock_net *net)
{
- int new_ipif = 0;
- int i;
+ int new_ipif = 0;
+ int i;
for (i = 0; i < net->ksnn_ninterfaces; i++) {
- char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
- char *colon = strchr(ifnam, ':');
- int found = 0;
- ksock_net_t *tmp;
- int j;
+ char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
+ char *colon = strchr(ifnam, ':');
+ int found = 0;
+ struct ksock_net *tmp;
+ int j;
if (colon != NULL) /* ignore alias device */
*colon = 0;
}
for (i = 0; i < nthrs; i++) {
- long id;
- char name[20];
- ksock_sched_t *sched;
+ long id;
+ char name[20];
+ struct ksock_sched *sched;
+
id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
}
static int
-ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
+ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
{
- int newif = ksocknal_search_new_ipif(net);
- int rc;
- int i;
+ int newif = ksocknal_search_new_ipif(net);
+ int rc;
+ int i;
if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
return -EINVAL;
int
ksocknal_startup(struct lnet_ni *ni)
{
- ksock_net_t *net;
- int rc;
- int i;
+ struct ksock_net *net;
+ int rc;
+ int i;
struct net_device *net_dev;
int node_id;