Change the typedefs in lnd ksocklnd to proper structures.
Several other style changes to fix checkpatch issues with
code impacted by typedef change.
Test-Parameters: trivial
Change-Id: I78d69aea46721f4e97a1775c64ae0d59879aa4fd
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: https://review.whamcloud.com/28275
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Dmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: Olaf Weber <olaf.weber@hpe.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
#include "socklnd.h"
static struct lnet_lnd the_ksocklnd;
#include "socklnd.h"
static struct lnet_lnd the_ksocklnd;
-ksock_nal_data_t ksocknal_data;
+struct ksock_nal_data ksocknal_data;
-static ksock_interface_t *
+static struct ksock_interface *
ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
{
ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
for (i = 0; i < net->ksnn_ninterfaces; i++) {
LASSERT(i < LNET_NUM_INTERFACES);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
LASSERT(i < LNET_NUM_INTERFACES);
-static ksock_route_t *
-ksocknal_create_route (__u32 ipaddr, int port)
+static struct ksock_route *
+ksocknal_create_route(__u32 ipaddr, int port)
+ struct ksock_route *route;
LIBCFS_ALLOC (route, sizeof (*route));
if (route == NULL)
LIBCFS_ALLOC (route, sizeof (*route));
if (route == NULL)
-ksocknal_destroy_route (ksock_route_t *route)
+ksocknal_destroy_route(struct ksock_route *route)
{
LASSERT (atomic_read(&route->ksnr_refcount) == 0);
{
LASSERT (atomic_read(&route->ksnr_refcount) == 0);
-ksocknal_create_peer(ksock_peer_ni_t **peerp, struct lnet_ni *ni,
+ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
struct lnet_process_id id)
{
struct lnet_process_id id)
{
- int cpt = lnet_cpt_of_nid(id.nid, ni);
- ksock_net_t *net = ni->ni_data;
- ksock_peer_ni_t *peer_ni;
+ int cpt = lnet_cpt_of_nid(id.nid, ni);
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_peer_ni *peer_ni;
LASSERT(id.nid != LNET_NID_ANY);
LASSERT(id.pid != LNET_PID_ANY);
LASSERT(id.nid != LNET_NID_ANY);
LASSERT(id.pid != LNET_PID_ANY);
-ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni)
+ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
- ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
+ struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
CDEBUG (D_NET, "peer_ni %s %p deleted\n",
libcfs_id2str(peer_ni->ksnp_id), peer_ni);
CDEBUG (D_NET, "peer_ni %s %p deleted\n",
libcfs_id2str(peer_ni->ksnp_id), peer_ni);
spin_unlock_bh(&net->ksnn_lock);
}
spin_unlock_bh(&net->ksnn_lock);
}
ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
{
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
struct list_head *tmp;
ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
{
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
struct list_head *tmp;
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
list_for_each(tmp, peer_list) {
list_for_each(tmp, peer_list) {
-
- peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
LASSERT(!peer_ni->ksnp_closing);
LASSERT(!peer_ni->ksnp_closing);
ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
{
ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
{
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
read_lock(&ksocknal_data.ksnd_global_lock);
peer_ni = ksocknal_find_peer_locked(ni, id);
read_lock(&ksocknal_data.ksnd_global_lock);
peer_ni = ksocknal_find_peer_locked(ni, id);
-ksocknal_unlink_peer_locked(ksock_peer_ni_t *peer_ni)
+ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
LASSERT(i < LNET_NUM_INTERFACES);
for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
LASSERT(i < LNET_NUM_INTERFACES);
struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
int *port, int *conn_count, int *share_count)
{
struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
int *port, int *conn_count, int *share_count)
{
- ksock_peer_ni_t *peer_ni;
- struct list_head *ptmp;
- ksock_route_t *route;
- struct list_head *rtmp;
- int i;
- int j;
- int rc = -ENOENT;
+ struct ksock_peer_ni *peer_ni;
+ struct list_head *ptmp;
+ struct ksock_route *route;
+ struct list_head *rtmp;
+ int i;
+ int j;
+ int rc = -ENOENT;
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
if (peer_ni->ksnp_ni != ni)
continue;
if (peer_ni->ksnp_ni != ni)
continue;
if (index-- > 0)
continue;
if (index-- > 0)
continue;
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
*id = peer_ni->ksnp_id;
ksnr_list);
*id = peer_ni->ksnp_id;
-ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
- ksock_peer_ni_t *peer_ni = route->ksnr_peer;
- int type = conn->ksnc_type;
- ksock_interface_t *iface;
+ struct ksock_peer_ni *peer_ni = route->ksnr_peer;
+ int type = conn->ksnc_type;
+ struct ksock_interface *iface;
conn->ksnc_route = route;
ksocknal_route_addref(route);
conn->ksnc_route = route;
ksocknal_route_addref(route);
-ksocknal_add_route_locked (ksock_peer_ni_t *peer_ni, ksock_route_t *route)
+ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
- ksock_conn_t *conn;
- ksock_route_t *route2;
+ struct ksock_conn *conn;
+ struct ksock_route *route2;
LASSERT(!peer_ni->ksnp_closing);
LASSERT(route->ksnr_peer == NULL);
LASSERT(!peer_ni->ksnp_closing);
LASSERT(route->ksnr_peer == NULL);
/* LASSERT(unique) */
list_for_each(tmp, &peer_ni->ksnp_routes) {
/* LASSERT(unique) */
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ route2 = list_entry(tmp, struct ksock_route, ksnr_list);
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
CERROR("Duplicate route %s %pI4h\n",
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
CERROR("Duplicate route %s %pI4h\n",
list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
list_for_each(tmp, &peer_ni->ksnp_conns) {
list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
continue;
if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
continue;
-ksocknal_del_route_locked (ksock_route_t *route)
+ksocknal_del_route_locked(struct ksock_route *route)
- ksock_peer_ni_t *peer_ni = route->ksnr_peer;
- ksock_interface_t *iface;
- ksock_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
+ struct ksock_peer_ni *peer_ni = route->ksnr_peer;
+ struct ksock_interface *iface;
+ struct ksock_conn *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
LASSERT(!route->ksnr_deleted);
/* Close associated conns */
list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
LASSERT(!route->ksnr_deleted);
/* Close associated conns */
list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_route != route)
continue;
if (conn->ksnc_route != route)
continue;
int port)
{
struct list_head *tmp;
int port)
{
struct list_head *tmp;
- ksock_peer_ni_t *peer_ni;
- ksock_peer_ni_t *peer2;
- ksock_route_t *route;
- ksock_route_t *route2;
- int rc;
+ struct ksock_peer_ni *peer_ni;
+ struct ksock_peer_ni *peer2;
+ struct ksock_route *route;
+ struct ksock_route *route2;
+ int rc;
if (id.nid == LNET_NID_ANY ||
id.pid == LNET_PID_ANY)
if (id.nid == LNET_NID_ANY ||
id.pid == LNET_PID_ANY)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
- LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
peer2 = ksocknal_find_peer_locked(ni, id);
if (peer2 != NULL) {
peer2 = ksocknal_find_peer_locked(ni, id);
if (peer2 != NULL) {
route2 = NULL;
list_for_each(tmp, &peer_ni->ksnp_routes) {
route2 = NULL;
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ route2 = list_entry(tmp, struct ksock_route, ksnr_list);
if (route2->ksnr_ipaddr == ipaddr)
break;
if (route2->ksnr_ipaddr == ipaddr)
break;
-ksocknal_del_peer_locked (ksock_peer_ni_t *peer_ni, __u32 ip)
+ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
- ksock_conn_t *conn;
- ksock_route_t *route;
+ struct ksock_conn *conn;
+ struct ksock_route *route;
struct list_head *tmp;
struct list_head *nxt;
struct list_head *tmp;
struct list_head *nxt;
LASSERT(!peer_ni->ksnp_closing);
LASSERT(!peer_ni->ksnp_closing);
ksocknal_peer_addref(peer_ni);
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
ksocknal_peer_addref(peer_ni);
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
/* no match */
if (!(ip == 0 || route->ksnr_ipaddr == ip))
/* no match */
if (!(ip == 0 || route->ksnr_ipaddr == ip))
nshared = 0;
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
nshared = 0;
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
nshared += route->ksnr_share_count;
}
nshared += route->ksnr_share_count;
}
* left */
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
* left */
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
/* we should only be removing auto-entries */
LASSERT(route->ksnr_share_count == 0);
/* we should only be removing auto-entries */
LASSERT(route->ksnr_share_count == 0);
}
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
}
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
ksocknal_close_conn_locked(conn, 0);
}
ksocknal_close_conn_locked(conn, 0);
}
static int
ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
{
static int
ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
struct list_head *ptmp;
struct list_head *pnxt;
- ksock_peer_ni_t *peer_ni;
- int lo;
- int hi;
- int i;
- int rc = -ENOENT;
+ struct ksock_peer_ni *peer_ni;
+ int lo;
+ int hi;
+ int i;
+ int rc = -ENOENT;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt,
&ksocknal_data.ksnd_peers[i]) {
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt,
&ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
if (peer_ni->ksnp_ni != ni)
continue;
if (peer_ni->ksnp_ni != ni)
continue;
+static struct ksock_conn *
ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
{
ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
{
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
+ struct ksock_conn *conn;
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
LASSERT(!peer_ni->ksnp_closing);
LASSERT(!peer_ni->ksnp_closing);
if (index-- > 0)
continue;
if (index-- > 0)
continue;
- conn = list_entry(ctmp, ksock_conn_t,
+ conn = list_entry(ctmp, struct ksock_conn,
ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data. \
ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data. \
+static struct ksock_sched *
ksocknal_choose_scheduler_locked(unsigned int cpt)
{
struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
ksocknal_choose_scheduler_locked(unsigned int cpt)
{
struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
- ksock_sched_t *sched;
- int i;
+ struct ksock_sched *sched;
+ int i;
if (info->ksi_nthreads == 0) {
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
if (info->ksi_nthreads == 0) {
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
static int
ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
{
static int
ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
-ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
+ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
- int best_netmatch = 0;
- int best_xor = 0;
- int best = -1;
- int this_xor;
- int this_netmatch;
- int i;
+ int best_netmatch = 0;
+ int best_xor = 0;
+ int best = -1;
+ int this_xor;
+ int this_netmatch;
+ int i;
for (i = 0; i < nips; i++) {
if (ips[i] == 0)
for (i = 0; i < nips; i++) {
if (ips[i] == 0)
-ksocknal_select_ips(ksock_peer_ni_t *peer_ni, __u32 *peerips, int n_peerips)
+ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- ksock_net_t *net = peer_ni->ksnp_ni->ni_data;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
- int n_ips;
- int i;
- int j;
- int k;
- __u32 ip;
- __u32 xor;
- int this_netmatch;
- int best_netmatch;
- int best_npeers;
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
+ struct ksock_interface *iface;
+ struct ksock_interface *best_iface;
+ int n_ips;
+ int i;
+ int j;
+ int k;
+ u32 ip;
+ u32 xor;
+ int this_netmatch;
+ int best_netmatch;
+ int best_npeers;
/* CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
/* CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
-ksocknal_create_routes(ksock_peer_ni_t *peer_ni, int port,
+ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
- ksock_route_t *newroute = NULL;
+ struct ksock_route *newroute = NULL;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
struct lnet_ni *ni = peer_ni->ksnp_ni;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
struct lnet_ni *ni = peer_ni->ksnp_ni;
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
- ksock_route_t *route;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
+ struct ksock_route *route;
+ struct ksock_interface *iface;
+ struct ksock_interface *best_iface;
int best_netmatch;
int this_netmatch;
int best_nroutes;
int best_netmatch;
int this_netmatch;
int best_nroutes;
/* Already got a route? */
route = NULL;
list_for_each(rtmp, &peer_ni->ksnp_routes) {
/* Already got a route? */
route = NULL;
list_for_each(rtmp, &peer_ni->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t, ksnr_list);
+ route = list_entry(rtmp, struct ksock_route, ksnr_list);
if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
break;
if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
break;
/* Using this interface already? */
list_for_each(rtmp, &peer_ni->ksnp_routes) {
/* Using this interface already? */
list_for_each(rtmp, &peer_ni->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
if (route->ksnr_myipaddr == iface->ksni_ipaddr)
ksnr_list);
if (route->ksnr_myipaddr == iface->ksni_ipaddr)
int
ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
{
int
ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
{
- ksock_connreq_t *cr;
- int rc;
- __u32 peer_ip;
- int peer_port;
+ struct ksock_connreq *cr;
+ int rc;
+ u32 peer_ip;
+ int peer_port;
rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
LASSERT(rc == 0); /* we succeeded before */
rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
LASSERT(rc == 0); /* we succeeded before */
-ksocknal_connecting (ksock_peer_ni_t *peer_ni, __u32 ipaddr)
+ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
+ struct ksock_route *route;
list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
if (route->ksnr_ipaddr == ipaddr)
list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
if (route->ksnr_ipaddr == ipaddr)
-ksocknal_create_conn(struct lnet_ni *ni, ksock_route_t *route,
+ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
struct socket *sock, int type)
{
struct socket *sock, int type)
{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- struct list_head zombies = LIST_HEAD_INIT(zombies);
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
struct lnet_process_id peerid;
struct lnet_process_id peerid;
- struct list_head *tmp;
- __u64 incarnation;
- ksock_conn_t *conn;
- ksock_conn_t *conn2;
- ksock_peer_ni_t *peer_ni = NULL;
- ksock_peer_ni_t *peer2;
- ksock_sched_t *sched;
+ struct list_head *tmp;
+ u64 incarnation;
+ struct ksock_conn *conn;
+ struct ksock_conn *conn2;
+ struct ksock_peer_ni *peer_ni = NULL;
+ struct ksock_peer_ni *peer2;
+ struct ksock_sched *sched;
struct ksock_hello_msg *hello;
struct ksock_hello_msg *hello;
- int cpt;
- ksock_tx_t *tx;
- ksock_tx_t *txtmp;
- int rc;
- int rc2;
- int active;
- char *warn = NULL;
+ int cpt;
+ struct ksock_tx *tx;
+ struct ksock_tx *txtmp;
+ int rc;
+ int rc2;
+ int active;
+ char *warn = NULL;
active = (route != NULL);
active = (route != NULL);
write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
- LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
peer2 = ksocknal_find_peer_locked(ni, peerid);
if (peer2 == NULL) {
peer2 = ksocknal_find_peer_locked(ni, peerid);
if (peer2 == NULL) {
* loopback connection */
if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
list_for_each(tmp, &peer_ni->ksnp_conns) {
* loopback connection */
if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
* by routes in my peer_ni to match my own route entries so I don't
* continually create duplicate routes. */
list_for_each(tmp, &peer_ni->ksnp_routes) {
* by routes in my peer_ni to match my own route entries so I don't
* continually create duplicate routes. */
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
continue;
if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
continue;
-ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
+ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
{
/* This just does the immmediate housekeeping, and queues the
* connection for the reaper to terminate.
* Caller holds ksnd_global_lock exclusively in irq context */
{
/* This just does the immmediate housekeeping, and queues the
* connection for the reaper to terminate.
* Caller holds ksnd_global_lock exclusively in irq context */
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
- ksock_route_t *route;
- ksock_conn_t *conn2;
- struct list_head *tmp;
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+ struct ksock_route *route;
+ struct ksock_conn *conn2;
+ struct list_head *tmp;
LASSERT(peer_ni->ksnp_error == 0);
LASSERT(!conn->ksnc_closing);
LASSERT(peer_ni->ksnp_error == 0);
LASSERT(!conn->ksnc_closing);
conn2 = NULL;
list_for_each(tmp, &peer_ni->ksnp_conns) {
conn2 = NULL;
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn2->ksnc_route == route &&
conn2->ksnc_type == conn->ksnc_type)
if (conn2->ksnc_route == route &&
conn2->ksnc_type == conn->ksnc_type)
/* No more connections to this peer_ni */
if (!list_empty(&peer_ni->ksnp_tx_queue)) {
/* No more connections to this peer_ni */
if (!list_empty(&peer_ni->ksnp_tx_queue)) {
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
-ksocknal_peer_failed (ksock_peer_ni_t *peer_ni)
+ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
time64_t last_alive = 0;
/* There has been a connection failure or comms error; but I'll only
time64_t last_alive = 0;
/* There has been a connection failure or comms error; but I'll only
-ksocknal_finalize_zcreq(ksock_conn_t *conn)
+ksocknal_finalize_zcreq(struct ksock_conn *conn)
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *tmp;
- struct list_head zlist = LIST_HEAD_INIT(zlist);
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+ struct ksock_tx *tx;
+ struct ksock_tx *tmp;
+ struct list_head zlist = LIST_HEAD_INIT(zlist);
/* NB safe to finalize TXs because closing of socket will
* abort all buffered data */
/* NB safe to finalize TXs because closing of socket will
* abort all buffered data */
spin_unlock(&peer_ni->ksnp_lock);
while (!list_empty(&zlist)) {
spin_unlock(&peer_ni->ksnp_lock);
while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
-ksocknal_terminate_conn(ksock_conn_t *conn)
+ksocknal_terminate_conn(struct ksock_conn *conn)
{
/* This gets called by the reaper (guaranteed thread context) to
* disengage the socket from its callbacks and close it.
* ksnc_refcount will eventually hit zero, and then the reaper will
* destroy it. */
{
/* This gets called by the reaper (guaranteed thread context) to
* disengage the socket from its callbacks and close it.
* ksnc_refcount will eventually hit zero, and then the reaper will
* destroy it. */
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
- ksock_sched_t *sched = conn->ksnc_scheduler;
- int failed = 0;
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
+ int failed = 0;
LASSERT(conn->ksnc_closing);
LASSERT(conn->ksnc_closing);
-ksocknal_queue_zombie_conn (ksock_conn_t *conn)
+ksocknal_queue_zombie_conn(struct ksock_conn *conn)
{
/* Queue the conn for the reaper to destroy */
{
/* Queue the conn for the reaper to destroy */
LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-ksocknal_destroy_conn(ksock_conn_t *conn)
+ksocknal_destroy_conn(struct ksock_conn *conn)
-ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni, __u32 ipaddr, int why)
+ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
- ksock_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
+ struct ksock_conn *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int count = 0;
list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
if (ipaddr == 0 ||
conn->ksnc_ipaddr == ipaddr) {
if (ipaddr == 0 ||
conn->ksnc_ipaddr == ipaddr) {
-ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
+ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
- __u32 ipaddr = conn->ksnc_ipaddr;
- int count;
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+ u32 ipaddr = conn->ksnc_ipaddr;
+ int count;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
int
ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
{
int
ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
{
- ksock_peer_ni_t *peer_ni;
- struct list_head *ptmp;
- struct list_head *pnxt;
- int lo;
- int hi;
- int i;
- int count = 0;
+ struct ksock_peer_ni *peer_ni;
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ int lo;
+ int hi;
+ int i;
+ int count = 0;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
(id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
(id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
int connect = 1;
time64_t last_alive = 0;
time64_t now = ktime_get_seconds();
int connect = 1;
time64_t last_alive = 0;
time64_t now = ktime_get_seconds();
- ksock_peer_ni_t *peer_ni = NULL;
+ struct ksock_peer_ni *peer_ni = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
struct lnet_process_id id = {
.nid = nid,
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
struct lnet_process_id id = {
.nid = nid,
- peer_ni = ksocknal_find_peer_locked(ni, id);
- if (peer_ni != NULL) {
- struct list_head *tmp;
- ksock_conn_t *conn;
- int bufnob;
+ peer_ni = ksocknal_find_peer_locked(ni, id);
+ if (peer_ni != NULL) {
+ struct list_head *tmp;
+ struct ksock_conn *conn;
+ int bufnob;
list_for_each(tmp, &peer_ni->ksnp_conns) {
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
if (bufnob < conn->ksnc_tx_bufnob) {
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
if (bufnob < conn->ksnc_tx_bufnob) {
-ksocknal_push_peer (ksock_peer_ni_t *peer_ni)
+ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
- int index;
- int i;
- struct list_head *tmp;
- ksock_conn_t *conn;
+ int index;
+ int i;
+ struct list_head *tmp;
+ struct ksock_conn *conn;
for (index = 0; ; index++) {
read_lock(&ksocknal_data.ksnd_global_lock);
for (index = 0; ; index++) {
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each(tmp, &peer_ni->ksnp_conns) {
if (i++ == index) {
list_for_each(tmp, &peer_ni->ksnp_conns) {
if (i++ == index) {
- conn = list_entry(tmp, ksock_conn_t,
- ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn,
+ ksnc_list);
ksocknal_conn_addref(conn);
break;
}
ksocknal_conn_addref(conn);
break;
}
int peer_off; /* searching offset in peer_ni hash table */
for (peer_off = 0; ; peer_off++) {
int peer_off; /* searching offset in peer_ni hash table */
for (peer_off = 0; ; peer_off++) {
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
int i = 0;
read_lock(&ksocknal_data.ksnd_global_lock);
int i = 0;
read_lock(&ksocknal_data.ksnd_global_lock);
static int
ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
{
static int
ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
{
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_interface *iface;
int rc;
int i;
int j;
struct list_head *ptmp;
int rc;
int i;
int j;
struct list_head *ptmp;
- ksock_peer_ni_t *peer_ni;
+ struct ksock_peer_ni *peer_ni;
+ struct ksock_route *route;
if (ipaddress == 0 ||
netmask == 0)
if (ipaddress == 0 ||
netmask == 0)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, ksock_peer_ni_t,
+ peer_ni = list_entry(ptmp, struct ksock_peer_ni,
ksnp_list);
for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
ksnp_list);
for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
list_for_each(rtmp, &peer_ni->ksnp_routes) {
route = list_entry(rtmp,
list_for_each(rtmp, &peer_ni->ksnp_routes) {
route = list_entry(rtmp,
ksnr_list);
if (route->ksnr_myipaddr == ipaddress)
ksnr_list);
if (route->ksnr_myipaddr == ipaddress)
-ksocknal_peer_del_interface_locked(ksock_peer_ni_t *peer_ni, __u32 ipaddr)
+ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
- struct list_head *tmp;
- struct list_head *nxt;
- ksock_route_t *route;
- ksock_conn_t *conn;
- int i;
- int j;
+ struct list_head *tmp;
+ struct list_head *nxt;
+ struct ksock_route *route;
+ struct ksock_conn *conn;
+ int i;
+ int j;
for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
}
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
}
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
if (route->ksnr_myipaddr != ipaddr)
continue;
if (route->ksnr_myipaddr != ipaddr)
continue;
}
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
}
list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_myipaddr == ipaddr)
ksocknal_close_conn_locked (conn, 0);
if (conn->ksnc_myipaddr == ipaddr)
ksocknal_close_conn_locked (conn, 0);
static int
ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
{
static int
ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
{
- ksock_net_t *net = ni->ni_data;
- int rc = -ENOENT;
- struct list_head *tmp;
- struct list_head *nxt;
- ksock_peer_ni_t *peer_ni;
- __u32 this_ip;
- int i;
- int j;
+ struct ksock_net *net = ni->ni_data;
+ int rc = -ENOENT;
+ struct list_head *tmp;
+ struct list_head *nxt;
+ struct ksock_peer_ni *peer_ni;
+ u32 this_ip;
+ int i;
+ int j;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
list_for_each_safe(tmp, nxt,
for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
list_for_each_safe(tmp, nxt,
- &ksocknal_data.ksnd_peers[j]) {
- peer_ni = list_entry(tmp, ksock_peer_ni_t,
- ksnp_list);
+ &ksocknal_data.ksnd_peers[j]) {
+ peer_ni = list_entry(tmp, struct ksock_peer_ni,
+ ksnp_list);
if (peer_ni->ksnp_ni != ni)
continue;
if (peer_ni->ksnp_ni != ni)
continue;
switch(cmd) {
case IOC_LIBCFS_GET_INTERFACE: {
switch(cmd) {
case IOC_LIBCFS_GET_INTERFACE: {
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_interface *iface;
read_lock(&ksocknal_data.ksnd_global_lock);
read_lock(&ksocknal_data.ksnd_global_lock);
int txmem;
int rxmem;
int nagle;
int txmem;
int rxmem;
int nagle;
- ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
+ struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
if (conn == NULL)
return -ENOENT;
if (conn == NULL)
return -ENOENT;
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- struct list_head zlist;
- ksock_tx_t *tx;
+ struct list_head zlist;
+ struct ksock_tx *tx;
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
while (!list_empty(&zlist)) {
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_list);
+ tx = list_entry(zlist.next, struct ksock_tx, tx_list);
list_del(&tx->tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
list_del(&tx->tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
ksocknal_base_shutdown(void)
{
struct ksock_sched_info *info;
ksocknal_base_shutdown(void)
{
struct ksock_sched_info *info;
- ksock_sched_t *sched;
- int i;
- int j;
+ struct ksock_sched *sched;
+ int i;
+ int j;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
atomic_read (&libcfs_kmemory));
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
atomic_read (&libcfs_kmemory));
goto failed;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
goto failed;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- ksock_sched_t *sched;
- int nthrs;
+ struct ksock_sched *sched;
+ int nthrs;
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
if (*ksocknal_tunables.ksnd_nscheds > 0) {
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
if (*ksocknal_tunables.ksnd_nscheds > 0) {
static void
ksocknal_debug_peerhash(struct lnet_ni *ni)
{
static void
ksocknal_debug_peerhash(struct lnet_ni *ni)
{
- ksock_peer_ni_t *peer_ni = NULL;
- struct list_head *tmp;
- int i;
+ struct ksock_peer_ni *peer_ni = NULL;
+ struct list_head *tmp;
+ int i;
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(tmp, ksock_peer_ni_t, ksnp_list);
+ peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
if (peer_ni->ksnp_ni == ni) break;
if (peer_ni->ksnp_ni == ni) break;
- ksock_route_t *route;
- ksock_conn_t *conn;
+ struct ksock_route *route;
+ struct ksock_conn *conn;
CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
"closing %d, accepting %d, err %d, zcookie %llu, "
CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
"closing %d, accepting %d, err %d, zcookie %llu, "
!list_empty(&peer_ni->ksnp_zc_req_list));
list_for_each(tmp, &peer_ni->ksnp_routes) {
!list_empty(&peer_ni->ksnp_zc_req_list));
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
"del %d\n", atomic_read(&route->ksnr_refcount),
route->ksnr_scheduled, route->ksnr_connecting,
CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
"del %d\n", atomic_read(&route->ksnr_refcount),
route->ksnr_scheduled, route->ksnr_connecting,
}
list_for_each(tmp, &peer_ni->ksnp_conns) {
}
list_for_each(tmp, &peer_ni->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
atomic_read(&conn->ksnc_conn_refcount),
atomic_read(&conn->ksnc_sock_refcount),
CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
atomic_read(&conn->ksnc_conn_refcount),
atomic_read(&conn->ksnc_sock_refcount),
void
ksocknal_shutdown(struct lnet_ni *ni)
{
void
ksocknal_shutdown(struct lnet_ni *ni)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
struct lnet_process_id anyid = {
.nid = LNET_NID_ANY,
.pid = LNET_PID_ANY,
struct lnet_process_id anyid = {
.nid = LNET_NID_ANY,
.pid = LNET_PID_ANY,
-ksocknal_enumerate_interfaces(ksock_net_t *net)
+ksocknal_enumerate_interfaces(struct ksock_net *net)
- char **names;
- int i;
- int j;
- int rc;
- int n;
+ char **names;
+ int i;
+ int j;
+ int rc;
+ int n;
n = lnet_ipif_enumerate(&names);
if (n <= 0) {
n = lnet_ipif_enumerate(&names);
if (n <= 0) {
-ksocknal_search_new_ipif(ksock_net_t *net)
+ksocknal_search_new_ipif(struct ksock_net *net)
- int new_ipif = 0;
- int i;
+ int new_ipif = 0;
+ int i;
for (i = 0; i < net->ksnn_ninterfaces; i++) {
for (i = 0; i < net->ksnn_ninterfaces; i++) {
- char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
- char *colon = strchr(ifnam, ':');
- int found = 0;
- ksock_net_t *tmp;
- int j;
+ char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
+ char *colon = strchr(ifnam, ':');
+ int found = 0;
+ struct ksock_net *tmp;
+ int j;
if (colon != NULL) /* ignore alias device */
*colon = 0;
if (colon != NULL) /* ignore alias device */
*colon = 0;
}
for (i = 0; i < nthrs; i++) {
}
for (i = 0; i < nthrs; i++) {
- long id;
- char name[20];
- ksock_sched_t *sched;
+ long id;
+ char name[20];
+ struct ksock_sched *sched;
+
id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
-ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
+ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
- int newif = ksocknal_search_new_ipif(net);
- int rc;
- int i;
+ int newif = ksocknal_search_new_ipif(net);
+ int rc;
+ int i;
if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
return -EINVAL;
if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
return -EINVAL;
int
ksocknal_startup(struct lnet_ni *ni)
{
int
ksocknal_startup(struct lnet_ni *ni)
{
- ksock_net_t *net;
- int rc;
- int i;
+ struct ksock_net *net;
+ int rc;
+ int i;
struct net_device *net_dev;
int node_id;
struct net_device *net_dev;
int node_id;
-typedef struct /* per scheduler state */
-{
+struct ksock_sched { /* per scheduler state */
spinlock_t kss_lock; /* serialise */
struct list_head kss_rx_conns; /* conn waiting to be read */
/* conn waiting to be written */
spinlock_t kss_lock; /* serialise */
struct list_head kss_rx_conns; /* conn waiting to be read */
/* conn waiting to be written */
#if !SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_SINGLE_FRAG_RX
struct kvec kss_scratch_iov[LNET_MAX_IOV];
#endif
#if !SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_SINGLE_FRAG_RX
struct kvec kss_scratch_iov[LNET_MAX_IOV];
#endif
struct ksock_sched_info {
int ksi_nthreads_max; /* max allowed threads */
int ksi_nthreads; /* number of threads */
int ksi_cpt; /* CPT id */
struct ksock_sched_info {
int ksi_nthreads_max; /* max allowed threads */
int ksi_nthreads; /* number of threads */
int ksi_cpt; /* CPT id */
- ksock_sched_t *ksi_scheds; /* array of schedulers */
+ struct ksock_sched *ksi_scheds; /* array of schedulers */
};
#define KSOCK_CPT_SHIFT 16
};
#define KSOCK_CPT_SHIFT 16
#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
-typedef struct /* in-use interface */
-{
+struct ksock_interface { /* in-use interface */
__u32 ksni_ipaddr; /* interface's IP address */
__u32 ksni_netmask; /* interface's network mask */
int ksni_nroutes; /* # routes using (active) */
int ksni_npeers; /* # peers using (passive) */
char ksni_name[IFNAMSIZ]; /* interface name */
__u32 ksni_ipaddr; /* interface's IP address */
__u32 ksni_netmask; /* interface's network mask */
int ksni_nroutes; /* # routes using (active) */
int ksni_npeers; /* # peers using (passive) */
char ksni_name[IFNAMSIZ]; /* interface name */
/* "stuck" socket timeout (seconds) */
int *ksnd_timeout;
/* # scheduler threads in each pool while starting */
/* "stuck" socket timeout (seconds) */
int *ksnd_timeout;
/* # scheduler threads in each pool while starting */
#if SOCKNAL_VERSION_DEBUG
int *ksnd_protocol; /* protocol version */
#endif
#if SOCKNAL_VERSION_DEBUG
int *ksnd_protocol; /* protocol version */
#endif
__u64 ksnn_incarnation; /* my epoch */
spinlock_t ksnn_lock; /* serialise */
struct list_head ksnn_list; /* chain on global list */
int ksnn_npeers; /* # peers */
int ksnn_shutdown; /* shutting down? */
int ksnn_ninterfaces; /* IP interfaces */
__u64 ksnn_incarnation; /* my epoch */
spinlock_t ksnn_lock; /* serialise */
struct list_head ksnn_list; /* chain on global list */
int ksnn_npeers; /* # peers */
int ksnn_shutdown; /* shutting down? */
int ksnn_ninterfaces; /* IP interfaces */
- ksock_interface_t ksnn_interfaces[LNET_NUM_INTERFACES];
-} ksock_net_t;
+ struct ksock_interface ksnn_interfaces[LNET_NUM_INTERFACES];
+};
/** connd timeout */
#define SOCKNAL_CONND_TIMEOUT 120
/** reserved thread for accepting & creating new connd */
#define SOCKNAL_CONND_RESV 1
/** connd timeout */
#define SOCKNAL_CONND_TIMEOUT 120
/** reserved thread for accepting & creating new connd */
#define SOCKNAL_CONND_RESV 1
int ksnd_init; /* initialisation state */
int ksnd_nnets; /* # networks set up */
struct list_head ksnd_nets; /* list of nets */
int ksnd_init; /* initialisation state */
int ksnd_nnets; /* # networks set up */
struct list_head ksnd_nets; /* list of nets */
struct list_head ksnd_idle_noop_txs;
/* serialise, g_lock unsafe */
spinlock_t ksnd_tx_lock;
struct list_head ksnd_idle_noop_txs;
/* serialise, g_lock unsafe */
spinlock_t ksnd_tx_lock;
#define SOCKNAL_INIT_NOTHING 0
#define SOCKNAL_INIT_DATA 1
#define SOCKNAL_INIT_NOTHING 0
#define SOCKNAL_INIT_DATA 1
struct ksock_route; /* forward ref */
struct ksock_proto; /* forward ref */
struct ksock_route; /* forward ref */
struct ksock_proto; /* forward ref */
-typedef struct /* transmit packet */
-{
+struct ksock_tx { /* transmit packet */
struct list_head tx_list; /* queue on conn for transmission etc */
struct list_head tx_zc_list; /* queue on peer_ni for ZC request */
atomic_t tx_refcount; /* tx reference count */
struct list_head tx_list; /* queue on conn for transmission etc */
struct list_head tx_zc_list; /* queue on peer_ni for ZC request */
atomic_t tx_refcount; /* tx reference count */
struct kvec iov[1]; /* virt hdr + payload */
} virt;
} tx_frags;
struct kvec iov[1]; /* virt hdr + payload */
} virt;
} tx_frags;
-#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
+#define KSOCK_NOOP_TX_SIZE ((int)offsetof(struct ksock_tx, tx_frags.paged.kiov[0]))
-/* network zero copy callback descriptor embedded in ksock_tx_t */
+/* network zero copy callback descriptor embedded in struct ksock_tx */
/* space for the rx frag descriptors; we either read a single contiguous
* header, or up to LNET_MAX_IOV frags of payload of either type. */
/* space for the rx frag descriptors; we either read a single contiguous
* header, or up to LNET_MAX_IOV frags of payload of either type. */
-typedef union {
- struct kvec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
-} ksock_rxiovspace_t;
+union ksock_rxiovspace {
+ struct kvec iov[LNET_MAX_IOV];
+ lnet_kiov_t kiov[LNET_MAX_IOV];
+};
#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
#define SOCKNAL_RX_SLOP 6 /* skipping body */
#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
#define SOCKNAL_RX_SLOP 6 /* skipping body */
-typedef struct ksock_conn
-{
- struct ksock_peer *ksnc_peer; /* owning peer_ni */
- struct ksock_route *ksnc_route; /* owning route */
+struct ksock_conn {
+ struct ksock_peer_ni *ksnc_peer; /* owning peer_ni */
+ struct ksock_route *ksnc_route; /* owning route */
struct list_head ksnc_list; /* stash on peer_ni's conn list */
struct socket *ksnc_sock; /* actual socket */
void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
void *ksnc_saved_write_space; /* socket's original write_space() callback */
atomic_t ksnc_conn_refcount; /* conn refcount */
atomic_t ksnc_sock_refcount; /* sock refcount */
struct list_head ksnc_list; /* stash on peer_ni's conn list */
struct socket *ksnc_sock; /* actual socket */
void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
void *ksnc_saved_write_space; /* socket's original write_space() callback */
atomic_t ksnc_conn_refcount; /* conn refcount */
atomic_t ksnc_sock_refcount; /* sock refcount */
- ksock_sched_t *ksnc_scheduler; /* who schedules this connection */
+ struct ksock_sched *ksnc_scheduler; /* who schedules this connection */
__u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer_ni's IP */
int ksnc_port; /* peer_ni's port */
__u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer_ni's IP */
int ksnc_port; /* peer_ni's port */
struct kvec *ksnc_rx_iov; /* the kvec frags */
int ksnc_rx_nkiov; /* # page frags */
lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
struct kvec *ksnc_rx_iov; /* the kvec frags */
int ksnc_rx_nkiov; /* # page frags */
lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
- ksock_rxiovspace_t ksnc_rx_iov_space;/* space for frag descriptors */
+ union ksock_rxiovspace ksnc_rx_iov_space;/* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming data */
void *ksnc_cookie; /* rx lnet_finalize passthru arg */
struct ksock_msg ksnc_msg; /* incoming message buffer:
__u32 ksnc_rx_csum; /* partial checksum for incoming data */
void *ksnc_cookie; /* rx lnet_finalize passthru arg */
struct ksock_msg ksnc_msg; /* incoming message buffer:
/* packets waiting to be sent */
struct list_head ksnc_tx_queue;
/* next TX that can carry a LNet message or ZC-ACK */
/* packets waiting to be sent */
struct list_head ksnc_tx_queue;
/* next TX that can carry a LNet message or ZC-ACK */
- ksock_tx_t *ksnc_tx_carrier;
+ struct ksock_tx *ksnc_tx_carrier;
/* when (in seconds) tx times out */
time64_t ksnc_tx_deadline;
/* send buffer marker */
/* when (in seconds) tx times out */
time64_t ksnc_tx_deadline;
/* send buffer marker */
int ksnc_tx_scheduled;
/* time stamp of the last posted TX */
time64_t ksnc_tx_last_post;
int ksnc_tx_scheduled;
/* time stamp of the last posted TX */
time64_t ksnc_tx_last_post;
-typedef struct ksock_route
-{
struct list_head ksnr_list; /* chain on peer_ni route list */
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct list_head ksnr_list; /* chain on peer_ni route list */
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
- struct ksock_peer *ksnr_peer; /* owning peer_ni */
+ struct ksock_peer_ni *ksnr_peer; /* owning peer_ni */
atomic_t ksnr_refcount; /* # users */
time64_t ksnr_timeout; /* when (in secs) reconnection can happen next */
time64_t ksnr_retry_interval; /* how long between retries */
atomic_t ksnr_refcount; /* # users */
time64_t ksnr_timeout; /* when (in secs) reconnection can happen next */
time64_t ksnr_retry_interval; /* how long between retries */
unsigned int ksnr_deleted:1; /* been removed from peer_ni? */
unsigned int ksnr_share_count; /* created explicitly? */
int ksnr_conn_count; /* # conns established by this route */
unsigned int ksnr_deleted:1; /* been removed from peer_ni? */
unsigned int ksnr_share_count; /* created explicitly? */
int ksnr_conn_count; /* # conns established by this route */
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
-typedef struct ksock_peer
-{
struct list_head ksnp_list; /* stash on global peer_ni list */
time64_t ksnp_last_alive;/* when (in seconds) I was last alive */
struct lnet_process_id ksnp_id; /* who's on the other end(s) */
struct list_head ksnp_list; /* stash on global peer_ni list */
time64_t ksnp_last_alive;/* when (in seconds) I was last alive */
struct lnet_process_id ksnp_id; /* who's on the other end(s) */
struct lnet_ni *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
__u32 ksnp_passive_ips[LNET_NUM_INTERFACES]; /* preferred local interfaces */
struct lnet_ni *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
__u32 ksnp_passive_ips[LNET_NUM_INTERFACES]; /* preferred local interfaces */
-typedef struct ksock_connreq
-{
/* stash on ksnd_connd_connreqs */
struct list_head ksncr_list;
/* chosen NI */
struct lnet_ni *ksncr_ni;
/* accepted socket */
struct socket *ksncr_sock;
/* stash on ksnd_connd_connreqs */
struct list_head ksncr_list;
/* chosen NI */
struct lnet_ni *ksncr_ni;
/* accepted socket */
struct socket *ksncr_sock;
-extern ksock_nal_data_t ksocknal_data;
-extern ksock_tunables_t ksocknal_tunables;
+extern struct ksock_nal_data ksocknal_data;
+extern struct ksock_tunables ksocknal_tunables;
#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not preferred */
#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not preferred */
-typedef struct ksock_proto
-{
int pro_version; /* version number of protocol */
int pro_version; /* version number of protocol */
- int (*pro_send_hello)(ksock_conn_t *, struct ksock_hello_msg *); /* handshake function */
- int (*pro_recv_hello)(ksock_conn_t *, struct ksock_hello_msg *, int);/* handshake function */
- void (*pro_pack)(ksock_tx_t *); /* message pack */
+ int (*pro_send_hello)(struct ksock_conn *, struct ksock_hello_msg *); /* handshake function */
+ int (*pro_recv_hello)(struct ksock_conn *, struct ksock_hello_msg *, int);/* handshake function */
+ void (*pro_pack)(struct ksock_tx *); /* message pack */
void (*pro_unpack)(struct ksock_msg *); /* message unpack */
void (*pro_unpack)(struct ksock_msg *); /* message unpack */
- ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); /* queue tx on the connection */
- int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64); /* queue ZC ack on the connection */
- int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int); /* handle ZC request */
- int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); /* handle ZC ACK */
- int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); /* msg type matches the connection type:
+ struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *, struct ksock_tx *); /* queue tx on the connection */
+ int (*pro_queue_tx_zcack)(struct ksock_conn *, struct ksock_tx *, __u64); /* queue ZC ack on the connection */
+ int (*pro_handle_zcreq)(struct ksock_conn *, __u64, int); /* handle ZC request */
+ int (*pro_handle_zcack)(struct ksock_conn *, __u64, __u64); /* handle ZC ACK */
+ int (*pro_match_tx)(struct ksock_conn *, struct ksock_tx *, int); /* msg type matches the connection type:
* return value:
* return MATCH_NO : no
* return MATCH_YES : matching type
* return MATCH_MAY : can be backup */
* return value:
* return MATCH_NO : no
* return MATCH_YES : matching type
* return MATCH_MAY : can be backup */
-extern ksock_proto_t ksocknal_protocol_v1x;
-extern ksock_proto_t ksocknal_protocol_v2x;
-extern ksock_proto_t ksocknal_protocol_v3x;
+extern struct ksock_proto ksocknal_protocol_v1x;
+extern struct ksock_proto ksocknal_protocol_v2x;
+extern struct ksock_proto ksocknal_protocol_v3x;
#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
-ksocknal_conn_addref (ksock_conn_t *conn)
+ksocknal_conn_addref(struct ksock_conn *conn)
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
atomic_inc(&conn->ksnc_conn_refcount);
}
atomic_inc(&conn->ksnc_conn_refcount);
}
-extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn);
-extern void ksocknal_finalize_zcreq(ksock_conn_t *conn);
+extern void ksocknal_queue_zombie_conn(struct ksock_conn *conn);
+extern void ksocknal_finalize_zcreq(struct ksock_conn *conn);
-ksocknal_conn_decref (ksock_conn_t *conn)
+ksocknal_conn_decref(struct ksock_conn *conn)
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
ksocknal_queue_zombie_conn(conn);
}
static inline int
if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
ksocknal_queue_zombie_conn(conn);
}
static inline int
-ksocknal_connsock_addref (ksock_conn_t *conn)
+ksocknal_connsock_addref(struct ksock_conn *conn)
read_lock(&ksocknal_data.ksnd_global_lock);
if (!conn->ksnc_closing) {
read_lock(&ksocknal_data.ksnd_global_lock);
if (!conn->ksnc_closing) {
-ksocknal_connsock_decref (ksock_conn_t *conn)
+ksocknal_connsock_decref(struct ksock_conn *conn)
- LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
+ LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
LASSERT (conn->ksnc_closing);
sock_release(conn->ksnc_sock);
if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
LASSERT (conn->ksnc_closing);
sock_release(conn->ksnc_sock);
-ksocknal_tx_addref (ksock_tx_t *tx)
+ksocknal_tx_addref(struct ksock_tx *tx)
- LASSERT (atomic_read(&tx->tx_refcount) > 0);
+ LASSERT(atomic_read(&tx->tx_refcount) > 0);
atomic_inc(&tx->tx_refcount);
}
atomic_inc(&tx->tx_refcount);
}
-extern void ksocknal_tx_prep (ksock_conn_t *, ksock_tx_t *tx);
-extern void ksocknal_tx_done(struct lnet_ni *ni, ksock_tx_t *tx, int error);
+extern void ksocknal_tx_prep(struct ksock_conn *, struct ksock_tx *tx);
+extern void ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int error);
-ksocknal_tx_decref (ksock_tx_t *tx)
+ksocknal_tx_decref(struct ksock_tx *tx)
- LASSERT (atomic_read(&tx->tx_refcount) > 0);
+ LASSERT(atomic_read(&tx->tx_refcount) > 0);
if (atomic_dec_and_test(&tx->tx_refcount))
ksocknal_tx_done(NULL, tx, 0);
}
static inline void
if (atomic_dec_and_test(&tx->tx_refcount))
ksocknal_tx_done(NULL, tx, 0);
}
static inline void
-ksocknal_route_addref (ksock_route_t *route)
+ksocknal_route_addref(struct ksock_route *route)
- LASSERT (atomic_read(&route->ksnr_refcount) > 0);
+ LASSERT(atomic_read(&route->ksnr_refcount) > 0);
atomic_inc(&route->ksnr_refcount);
}
atomic_inc(&route->ksnr_refcount);
}
-extern void ksocknal_destroy_route (ksock_route_t *route);
+extern void ksocknal_destroy_route(struct ksock_route *route);
-ksocknal_route_decref (ksock_route_t *route)
+ksocknal_route_decref(struct ksock_route *route)
- LASSERT (atomic_read (&route->ksnr_refcount) > 0);
+ LASSERT(atomic_read(&route->ksnr_refcount) > 0);
if (atomic_dec_and_test(&route->ksnr_refcount))
ksocknal_destroy_route (route);
}
static inline void
if (atomic_dec_and_test(&route->ksnr_refcount))
ksocknal_destroy_route (route);
}
static inline void
-ksocknal_peer_addref (ksock_peer_ni_t *peer_ni)
+ksocknal_peer_addref(struct ksock_peer_ni *peer_ni)
- LASSERT (atomic_read (&peer_ni->ksnp_refcount) > 0);
+ LASSERT(atomic_read(&peer_ni->ksnp_refcount) > 0);
atomic_inc(&peer_ni->ksnp_refcount);
}
atomic_inc(&peer_ni->ksnp_refcount);
}
-extern void ksocknal_destroy_peer (ksock_peer_ni_t *peer_ni);
+extern void ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni);
-ksocknal_peer_decref (ksock_peer_ni_t *peer_ni)
+ksocknal_peer_decref(struct ksock_peer_ni *peer_ni)
{
LASSERT (atomic_read (&peer_ni->ksnp_refcount) > 0);
if (atomic_dec_and_test(&peer_ni->ksnp_refcount))
{
LASSERT (atomic_read (&peer_ni->ksnp_refcount) > 0);
if (atomic_dec_and_test(&peer_ni->ksnp_refcount))
- ksocknal_destroy_peer (peer_ni);
+ ksocknal_destroy_peer(peer_ni);
}
int ksocknal_startup(struct lnet_ni *ni);
}
int ksocknal_startup(struct lnet_ni *ni);
int ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip,
int port);
int ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip,
int port);
-ksock_peer_ni_t *ksocknal_find_peer_locked(struct lnet_ni *ni,
+struct ksock_peer_ni *ksocknal_find_peer_locked(struct lnet_ni *ni,
struct lnet_process_id id);
struct lnet_process_id id);
-ksock_peer_ni_t *ksocknal_find_peer(struct lnet_ni *ni,
+struct ksock_peer_ni *ksocknal_find_peer(struct lnet_ni *ni,
struct lnet_process_id id);
struct lnet_process_id id);
-extern void ksocknal_peer_failed (ksock_peer_ni_t *peer_ni);
-extern int ksocknal_create_conn(struct lnet_ni *ni, ksock_route_t *route,
+extern void ksocknal_peer_failed(struct ksock_peer_ni *peer_ni);
+extern int ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
struct socket *sock, int type);
struct socket *sock, int type);
-extern void ksocknal_close_conn_locked (ksock_conn_t *conn, int why);
-extern void ksocknal_terminate_conn (ksock_conn_t *conn);
-extern void ksocknal_destroy_conn (ksock_conn_t *conn);
-extern int ksocknal_close_peer_conns_locked (ksock_peer_ni_t *peer_ni,
- __u32 ipaddr, int why);
-extern int ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why);
+extern void ksocknal_close_conn_locked(struct ksock_conn *conn, int why);
+extern void ksocknal_terminate_conn(struct ksock_conn *conn);
+extern void ksocknal_destroy_conn(struct ksock_conn *conn);
+extern int ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni,
+ __u32 ipaddr, int why);
+extern int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why);
int ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr);
int ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr);
-extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni,
- ksock_tx_t *tx, int nonblk);
+extern struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni,
+ struct ksock_tx *tx, int nonblk);
-extern int ksocknal_launch_packet(struct lnet_ni *ni, ksock_tx_t *tx,
+extern int ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
struct lnet_process_id id);
struct lnet_process_id id);
-extern ksock_tx_t *ksocknal_alloc_tx(int type, int size);
-extern void ksocknal_free_tx (ksock_tx_t *tx);
-extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
-extern void ksocknal_next_tx_carrier(ksock_conn_t *conn);
-extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn);
+extern struct ksock_tx *ksocknal_alloc_tx(int type, int size);
+extern void ksocknal_free_tx(struct ksock_tx *tx);
+extern struct ksock_tx *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
+extern void ksocknal_next_tx_carrier(struct ksock_conn *conn);
+extern void ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn);
extern void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist,
int error);
extern void ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive);
extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
extern void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist,
int error);
extern void ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive);
extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
-extern void ksocknal_thread_fini (void);
-extern void ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni);
-extern ksock_route_t *ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni);
-extern ksock_route_t *ksocknal_find_connecting_route_locked (ksock_peer_ni_t *peer_ni);
-extern int ksocknal_new_packet (ksock_conn_t *conn, int skip);
-extern int ksocknal_scheduler (void *arg);
-extern int ksocknal_connd (void *arg);
-extern int ksocknal_reaper (void *arg);
-int ksocknal_send_hello(struct lnet_ni *ni, ksock_conn_t *conn,
+extern void ksocknal_thread_fini(void);
+extern void ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni);
+extern struct ksock_route *ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni);
+extern struct ksock_route *ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni);
+extern int ksocknal_new_packet(struct ksock_conn *conn, int skip);
+extern int ksocknal_scheduler(void *arg);
+extern int ksocknal_connd(void *arg);
+extern int ksocknal_reaper(void *arg);
+int ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
lnet_nid_t peer_nid, struct ksock_hello_msg *hello);
lnet_nid_t peer_nid, struct ksock_hello_msg *hello);
-int ksocknal_recv_hello(struct lnet_ni *ni, ksock_conn_t *conn,
+int ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
struct ksock_hello_msg *hello,
struct lnet_process_id *id,
__u64 *incarnation);
struct ksock_hello_msg *hello,
struct lnet_process_id *id,
__u64 *incarnation);
-extern void ksocknal_read_callback(ksock_conn_t *conn);
-extern void ksocknal_write_callback(ksock_conn_t *conn);
+extern void ksocknal_read_callback(struct ksock_conn *conn);
+extern void ksocknal_write_callback(struct ksock_conn *conn);
-extern int ksocknal_lib_zc_capable(ksock_conn_t *conn);
-extern void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
-extern void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn);
+extern int ksocknal_lib_zc_capable(struct ksock_conn *conn);
+extern void ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn);
+extern void ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn);
extern void ksocknal_lib_reset_callback(struct socket *sock,
extern void ksocknal_lib_reset_callback(struct socket *sock,
- ksock_conn_t *conn);
-extern void ksocknal_lib_push_conn(ksock_conn_t *conn);
-extern int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
+ struct ksock_conn *conn);
+extern void ksocknal_lib_push_conn(struct ksock_conn *conn);
+extern int ksocknal_lib_get_conn_addrs(struct ksock_conn *conn);
extern int ksocknal_lib_setup_sock(struct socket *so);
extern int ksocknal_lib_setup_sock(struct socket *so);
-extern int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
-extern int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
-extern void ksocknal_lib_eager_ack(ksock_conn_t *conn);
-extern int ksocknal_lib_recv_iov(ksock_conn_t *conn);
-extern int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
-extern int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
+extern int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx);
+extern int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx);
+extern void ksocknal_lib_eager_ack(struct ksock_conn *conn);
+extern int ksocknal_lib_recv_iov(struct ksock_conn *conn);
+extern int ksocknal_lib_recv_kiov(struct ksock_conn *conn);
+extern int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
int *rxmem, int *nagle);
extern int ksocknal_tunables_init(void);
int *rxmem, int *nagle);
extern int ksocknal_tunables_init(void);
-extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+extern void ksocknal_lib_csum_tx(struct ksock_tx *tx);
-extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+extern int ksocknal_lib_memory_pressure(struct ksock_conn *conn);
extern int ksocknal_lib_bind_thread_to_cpu(int id);
#endif /* _SOCKLND_SOCKLND_H_ */
extern int ksocknal_lib_bind_thread_to_cpu(int id);
#endif /* _SOCKLND_SOCKLND_H_ */
ksocknal_alloc_tx(int type, int size)
{
ksocknal_alloc_tx(int type, int size)
{
+ struct ksock_tx *tx = NULL;
if (type == KSOCK_MSG_NOOP) {
LASSERT(size == KSOCK_NOOP_TX_SIZE);
if (type == KSOCK_MSG_NOOP) {
LASSERT(size == KSOCK_NOOP_TX_SIZE);
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
- next, ksock_tx_t, tx_list);
+ tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
+ struct ksock_tx, tx_list);
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
{
ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
{
tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
if (tx == NULL) {
tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
if (tx == NULL) {
-ksocknal_free_tx (ksock_tx_t *tx)
+ksocknal_free_tx(struct ksock_tx *tx)
{
atomic_dec(&ksocknal_data.ksnd_nactive_txs);
{
atomic_dec(&ksocknal_data.ksnd_nactive_txs);
-ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct kvec *iov = tx->tx_iov;
int nob;
{
struct kvec *iov = tx->tx_iov;
int nob;
-ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
- lnet_kiov_t *kiov = tx->tx_kiov;
- int nob;
- int rc;
+ lnet_kiov_t *kiov = tx->tx_kiov;
+ int nob;
+ int rc;
LASSERT (tx->tx_niov == 0);
LASSERT (tx->tx_nkiov > 0);
LASSERT (tx->tx_niov == 0);
LASSERT (tx->tx_nkiov > 0);
-ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
-ksocknal_recv_iov (ksock_conn_t *conn)
+ksocknal_recv_iov(struct ksock_conn *conn)
{
struct kvec *iov = conn->ksnc_rx_iov;
int nob;
{
struct kvec *iov = conn->ksnc_rx_iov;
int nob;
-ksocknal_recv_kiov (ksock_conn_t *conn)
+ksocknal_recv_kiov(struct ksock_conn *conn)
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
- int nob;
- int rc;
+ lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ int nob;
+ int rc;
LASSERT (conn->ksnc_rx_nkiov > 0);
/* Never touch conn->ksnc_rx_kiov or change connection
LASSERT (conn->ksnc_rx_nkiov > 0);
/* Never touch conn->ksnc_rx_kiov or change connection
-ksocknal_receive (ksock_conn_t *conn)
+ksocknal_receive(struct ksock_conn *conn)
{
/* Return 1 on success, 0 on EOF, < 0 on error.
* Caller checks ksnc_rx_nob_wanted to determine
{
/* Return 1 on success, 0 on EOF, < 0 on error.
* Caller checks ksnc_rx_nob_wanted to determine
-ksocknal_tx_done(struct lnet_ni *ni, ksock_tx_t *tx, int rc)
+ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
{
struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
ENTRY;
{
struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
ENTRY;
void
ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
{
void
ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
{
while (!list_empty(txlist)) {
while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, ksock_tx_t, tx_list);
+ tx = list_entry(txlist->next, struct ksock_tx, tx_list);
if (error && tx->tx_lnetmsg != NULL) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
if (error && tx->tx_lnetmsg != NULL) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
-ksocknal_check_zc_req(ksock_tx_t *tx)
+ksocknal_check_zc_req(struct ksock_tx *tx)
- ksock_conn_t *conn = tx->tx_conn;
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
+ struct ksock_conn *conn = tx->tx_conn;
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
/* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
* to ksnp_zc_req_list if some fragment of this message should be sent
/* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
* to ksnp_zc_req_list if some fragment of this message should be sent
-ksocknal_uncheck_zc_req(ksock_tx_t *tx)
+ksocknal_uncheck_zc_req(struct ksock_tx *tx)
- ksock_peer_ni_t *peer_ni = tx->tx_conn->ksnc_peer;
+ struct ksock_peer_ni *peer_ni = tx->tx_conn->ksnc_peer;
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
-ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
if (tx->tx_zc_capable && !tx->tx_zc_checked)
ksocknal_check_zc_req(tx);
if (tx->tx_zc_capable && !tx->tx_zc_checked)
ksocknal_check_zc_req(tx);
-ksocknal_launch_connection_locked (ksock_route_t *route)
+ksocknal_launch_connection_locked(struct ksock_route *route)
{
/* called holding write lock on ksnd_global_lock */
{
/* called holding write lock on ksnd_global_lock */
-ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni)
+ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
+ struct ksock_route *route;
/* called holding write lock on ksnd_global_lock */
for (;;) {
/* called holding write lock on ksnd_global_lock */
for (;;) {
-ksock_conn_t *
-ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk)
+struct ksock_conn *
+ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
- ksock_conn_t *conn;
- ksock_conn_t *typed = NULL;
- ksock_conn_t *fallback = NULL;
- int tnob = 0;
- int fnob = 0;
+ struct ksock_conn *conn;
+ struct ksock_conn *typed = NULL;
+ struct ksock_conn *fallback = NULL;
+ int tnob = 0;
+ int fnob = 0;
list_for_each(tmp, &peer_ni->ksnp_conns) {
list_for_each(tmp, &peer_ni->ksnp_conns) {
- ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
- int nob = atomic_read(&c->ksnc_tx_nob) +
- c->ksnc_sock->sk->sk_wmem_queued;
- int rc;
+ struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
+ ksnc_list);
+ int nob = atomic_read(&c->ksnc_tx_nob) +
+ c->ksnc_sock->sk->sk_wmem_queued;
+ int rc;
LASSERT (!c->ksnc_closing);
LASSERT (c->ksnc_proto != NULL &&
LASSERT (!c->ksnc_closing);
LASSERT (c->ksnc_proto != NULL &&
-ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
{
conn->ksnc_proto->pro_pack(tx);
{
conn->ksnc_proto->pro_pack(tx);
-ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
- ksock_sched_t *sched = conn->ksnc_scheduler;
- struct ksock_msg *msg = &tx->tx_msg;
- ksock_tx_t *ztx = NULL;
- int bufnob = 0;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
+ struct ksock_msg *msg = &tx->tx_msg;
+ struct ksock_tx *ztx = NULL;
+ int bufnob = 0;
/* called holding global lock (read or irq-write) and caller may
* not have dropped this lock between finding conn and calling me,
/* called holding global lock (read or irq-write) and caller may
* not have dropped this lock between finding conn and calling me,
-ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni)
+struct ksock_route *
+ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni)
{
time64_t now = ktime_get_seconds();
struct list_head *tmp;
{
time64_t now = ktime_get_seconds();
struct list_head *tmp;
+ struct ksock_route *route;
list_for_each(tmp, &peer_ni->ksnp_routes) {
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
-ksock_route_t *
-ksocknal_find_connecting_route_locked (ksock_peer_ni_t *peer_ni)
+struct ksock_route *
+ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni)
- struct list_head *tmp;
- ksock_route_t *route;
+ struct list_head *tmp;
+ struct ksock_route *route;
list_for_each(tmp, &peer_ni->ksnp_routes) {
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
-ksocknal_launch_packet(struct lnet_ni *ni, ksock_tx_t *tx,
+ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
struct lnet_process_id id)
{
struct lnet_process_id id)
{
- ksock_peer_ni_t *peer_ni;
- ksock_conn_t *conn;
- rwlock_t *g_lock;
- int retry;
- int rc;
+ struct ksock_peer_ni *peer_ni;
+ struct ksock_conn *conn;
+ rwlock_t *g_lock;
+ int retry;
+ int rc;
LASSERT (tx->tx_conn == NULL);
LASSERT (tx->tx_conn == NULL);
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
LASSERT (!in_interrupt ());
if (payload_iov != NULL)
LASSERT (!in_interrupt ());
if (payload_iov != NULL)
- desc_size = offsetof(ksock_tx_t,
+ desc_size = offsetof(struct ksock_tx,
tx_frags.virt.iov[1 + payload_niov]);
else
tx_frags.virt.iov[1 + payload_niov]);
else
- desc_size = offsetof(ksock_tx_t,
+ desc_size = offsetof(struct ksock_tx,
tx_frags.paged.kiov[payload_niov]);
if (lntmsg->msg_vmflush)
tx_frags.paged.kiov[payload_niov]);
if (lntmsg->msg_vmflush)
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
{
static char ksocknal_slop_buffer[4096];
-
- int nob;
- unsigned int niov;
- int skipped;
+ int nob;
+ unsigned int niov;
+ int skipped;
LASSERT(conn->ksnc_proto != NULL);
LASSERT(conn->ksnc_proto != NULL);
-ksocknal_process_receive (ksock_conn_t *conn)
+ksocknal_process_receive(struct ksock_conn *conn)
{
struct lnet_hdr *lhdr;
struct lnet_process_id *id;
{
struct lnet_hdr *lhdr;
struct lnet_process_id *id;
lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen)
{
lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen)
{
- ksock_conn_t *conn = (ksock_conn_t *)private;
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_conn *conn = private;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
LASSERT (mlen <= rlen);
LASSERT (niov <= LNET_MAX_IOV);
LASSERT (mlen <= rlen);
LASSERT (niov <= LNET_MAX_IOV);
-ksocknal_sched_cansleep(ksock_sched_t *sched)
+ksocknal_sched_cansleep(struct ksock_sched *sched)
int ksocknal_scheduler(void *arg)
{
struct ksock_sched_info *info;
int ksocknal_scheduler(void *arg)
{
struct ksock_sched_info *info;
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
- int rc;
- int nloops = 0;
- long id = (long)arg;
+ struct ksock_sched *sched;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
+ int rc;
+ int nloops = 0;
+ long id = (long)arg;
info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
if (!list_empty(&sched->kss_rx_conns)) {
conn = list_entry(sched->kss_rx_conns.next,
if (!list_empty(&sched->kss_rx_conns)) {
conn = list_entry(sched->kss_rx_conns.next,
- ksock_conn_t, ksnc_rx_list);
+ struct ksock_conn, ksnc_rx_list);
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
}
conn = list_entry(sched->kss_tx_conns.next,
}
conn = list_entry(sched->kss_tx_conns.next,
- ksock_conn_t, ksnc_tx_list);
+ struct ksock_conn, ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(!list_empty(&conn->ksnc_tx_queue));
tx = list_entry(conn->ksnc_tx_queue.next,
LASSERT(!list_empty(&conn->ksnc_tx_queue));
tx = list_entry(conn->ksnc_tx_queue.next,
+ struct ksock_tx, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
* Add connection to kss_rx_conns of scheduler
* and wakeup the scheduler.
*/
* Add connection to kss_rx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_read_callback (ksock_conn_t *conn)
+void ksocknal_read_callback(struct ksock_conn *conn)
+ struct ksock_sched *sched;
ENTRY;
sched = conn->ksnc_scheduler;
ENTRY;
sched = conn->ksnc_scheduler;
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_write_callback(ksock_conn_t *conn)
+void ksocknal_write_callback(struct ksock_conn *conn)
+ struct ksock_sched *sched;
ENTRY;
sched = conn->ksnc_scheduler;
ENTRY;
sched = conn->ksnc_scheduler;
+static struct ksock_proto *
ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
{
__u32 version = 0;
ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
{
__u32 version = 0;
-ksocknal_send_hello(struct lnet_ni *ni, ksock_conn_t *conn,
+ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
- ksock_net_t *net = (ksock_net_t *)ni->ni_data;
+ struct ksock_net *net = (struct ksock_net *)ni->ni_data;
LASSERT(hello->kshm_nips <= LNET_NUM_INTERFACES);
LASSERT(hello->kshm_nips <= LNET_NUM_INTERFACES);
-ksocknal_recv_hello(struct lnet_ni *ni, ksock_conn_t *conn,
+ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
struct ksock_hello_msg *hello,
struct lnet_process_id *peerid,
__u64 *incarnation)
struct ksock_hello_msg *hello,
struct lnet_process_id *peerid,
__u64 *incarnation)
int timeout;
int proto_match;
int rc;
int timeout;
int proto_match;
int rc;
- ksock_proto_t *proto;
- struct lnet_process_id recv_id;
+ struct ksock_proto *proto;
+ struct lnet_process_id recv_id;
/* socket type set on active connections - not set on passive */
LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
/* socket type set on active connections - not set on passive */
LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
-ksocknal_connect (ksock_route_t *route)
+ksocknal_connect(struct ksock_route *route)
- struct list_head zombies = LIST_HEAD_INIT(zombies);
- ksock_peer_ni_t *peer_ni = route->ksnr_peer;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct ksock_peer_ni *peer_ni = route->ksnr_peer;
int type;
int wanted;
struct socket *sock;
int type;
int wanted;
struct socket *sock;
if (!list_empty(&peer_ni->ksnp_tx_queue) &&
peer_ni->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
if (!list_empty(&peer_ni->ksnp_tx_queue) &&
peer_ni->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
+ struct ksock_conn *conn;
/* ksnp_tx_queue is queued on a conn on successful
* connection for V1.x and V2.x */
if (!list_empty(&peer_ni->ksnp_conns)) {
conn = list_entry(peer_ni->ksnp_conns.next,
/* ksnp_tx_queue is queued on a conn on successful
* connection for V1.x and V2.x */
if (!list_empty(&peer_ni->ksnp_conns)) {
conn = list_entry(peer_ni->ksnp_conns.next,
- ksock_conn_t, ksnc_list);
+ struct ksock_conn, ksnc_list);
LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
}
LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
}
/* Go through connd_routes queue looking for a route that we can process
* right now, @timeout_p can be updated if we need to come back later */
/* Go through connd_routes queue looking for a route that we can process
* right now, @timeout_p can be updated if we need to come back later */
+static struct ksock_route *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
time64_t now = ktime_get_seconds();
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
time64_t now = ktime_get_seconds();
+ struct ksock_route *route;
/* connd_routes can contain both pending and ordinary routes */
list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
/* connd_routes can contain both pending and ordinary routes */
list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
-ksocknal_connd (void *arg)
+ksocknal_connd(void *arg)
- spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
- ksock_connreq_t *cr;
- wait_queue_t wait;
- int nloops = 0;
- int cons_retry = 0;
+ spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
+ struct ksock_connreq *cr;
+ wait_queue_t wait;
+ int nloops = 0;
+ int cons_retry = 0;
ksocknal_data.ksnd_connd_running++;
while (!ksocknal_data.ksnd_shuttingdown) {
ksocknal_data.ksnd_connd_running++;
while (!ksocknal_data.ksnd_shuttingdown) {
- ksock_route_t *route = NULL;
+ struct ksock_route *route = NULL;
time64_t sec = ktime_get_real_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
int dropped_lock = 0;
time64_t sec = ktime_get_real_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
int dropped_lock = 0;
if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
/* Connection accepted by the listener */
if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
/* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
- next, ksock_connreq_t, ksncr_list);
+ cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
+ struct ksock_connreq, ksncr_list);
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
-static ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_ni_t *peer_ni)
+static struct ksock_conn *
+ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
{
/* We're called with a shared lock on ksnd_global_lock */
{
/* We're called with a shared lock on ksnd_global_lock */
- ksock_conn_t *conn;
- struct list_head *ctmp;
+ struct ksock_conn *conn;
+ struct list_head *ctmp;
list_for_each(ctmp, &peer_ni->ksnp_conns) {
list_for_each(ctmp, &peer_ni->ksnp_conns) {
- int error;
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ int error;
+
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
-ksocknal_flush_stale_txs(ksock_peer_ni_t *peer_ni)
+ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
- ksock_tx_t *tx;
- struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
+ struct ksock_tx *tx;
+ struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
while (!list_empty(&peer_ni->ksnp_tx_queue)) {
tx = list_entry(peer_ni->ksnp_tx_queue.next,
write_lock_bh(&ksocknal_data.ksnd_global_lock);
while (!list_empty(&peer_ni->ksnp_tx_queue)) {
tx = list_entry(peer_ni->ksnp_tx_queue.next,
+ struct ksock_tx, tx_list);
if (ktime_get_seconds() < tx->tx_deadline)
break;
if (ktime_get_seconds() < tx->tx_deadline)
break;
-ksocknal_send_keepalive_locked(ksock_peer_ni_t *peer_ni)
+ksocknal_send_keepalive_locked(struct ksock_peer_ni *peer_ni)
__must_hold(&ksocknal_data.ksnd_global_lock)
{
__must_hold(&ksocknal_data.ksnd_global_lock)
{
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_sched *sched;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
/* last_alive will be updated by create_conn */
if (list_empty(&peer_ni->ksnp_conns))
/* last_alive will be updated by create_conn */
if (list_empty(&peer_ni->ksnp_conns))
-ksocknal_check_peer_timeouts (int idx)
+ksocknal_check_peer_timeouts(int idx)
- struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
- ksock_peer_ni_t *peer_ni;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+ struct ksock_peer_ni *peer_ni;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
again:
/* NB. We expect to have a look at all the peers and not find any
again:
/* NB. We expect to have a look at all the peers and not find any
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each_entry(peer_ni, peers, ksnp_list) {
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each_entry(peer_ni, peers, ksnp_list) {
+ struct ksock_tx *tx_stale;
time64_t deadline = 0;
int resid = 0;
int n = 0;
time64_t deadline = 0;
int resid = 0;
int n = 0;
/* we can't process stale txs right here because we're
* holding only shared lock */
if (!list_empty(&peer_ni->ksnp_tx_queue)) {
/* we can't process stale txs right here because we're
* holding only shared lock */
if (!list_empty(&peer_ni->ksnp_tx_queue)) {
- ksock_tx_t *tx =
- list_entry(peer_ni->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ tx = list_entry(peer_ni->ksnp_tx_queue.next,
+ struct ksock_tx, tx_list);
if (ktime_get_seconds() >= tx->tx_deadline) {
ksocknal_peer_addref(peer_ni);
read_unlock(&ksocknal_data.ksnd_global_lock);
if (ktime_get_seconds() >= tx->tx_deadline) {
ksocknal_peer_addref(peer_ni);
read_unlock(&ksocknal_data.ksnd_global_lock);
int ksocknal_reaper(void *arg)
{
int ksocknal_reaper(void *arg)
{
- wait_queue_t wait;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
- struct list_head enomem_conns;
- int nenomem_conns;
+ wait_queue_t wait;
+ struct ksock_conn *conn;
+ struct ksock_sched *sched;
+ struct list_head enomem_conns;
+ int nenomem_conns;
- int i;
- int peer_index = 0;
+ int i;
+ int peer_index = 0;
time64_t deadline = ktime_get_seconds();
cfs_block_allsigs ();
time64_t deadline = ktime_get_seconds();
cfs_block_allsigs ();
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry(ksocknal_data. \
- ksnd_deathrow_conns.next,
- ksock_conn_t, ksnc_list);
+ conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
+ struct ksock_conn, ksnc_list);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
}
if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry(ksocknal_data.ksnd_zombie_conns.\
- next, ksock_conn_t, ksnc_list);
+ conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
+ struct ksock_conn, ksnc_list);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
nenomem_conns = 0;
while (!list_empty(&enomem_conns)) {
conn = list_entry(enomem_conns.next,
nenomem_conns = 0;
while (!list_empty(&enomem_conns)) {
conn = list_entry(enomem_conns.next,
- ksock_conn_t, ksnc_tx_list);
+ struct ksock_conn, ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
-ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
+ksocknal_lib_get_conn_addrs(struct ksock_conn *conn)
{
int rc = lnet_sock_getaddr(conn->ksnc_sock, true,
{
int rc = lnet_sock_getaddr(conn->ksnc_sock, true,
- &conn->ksnc_ipaddr,
- &conn->ksnc_port);
+ &conn->ksnc_ipaddr,
+ &conn->ksnc_port);
/* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
LASSERT (!conn->ksnc_closing);
/* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
LASSERT (!conn->ksnc_closing);
-ksocknal_lib_zc_capable(ksock_conn_t *conn)
+ksocknal_lib_zc_capable(struct ksock_conn *conn)
{
int caps = conn->ksnc_sock->sk->sk_route_caps;
{
int caps = conn->ksnc_sock->sk->sk_route_caps;
-ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct socket *sock = conn->ksnc_sock;
int nob;
{
struct socket *sock = conn->ksnc_sock;
int nob;
-ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct socket *sock = conn->ksnc_sock;
lnet_kiov_t *kiov = tx->tx_kiov;
{
struct socket *sock = conn->ksnc_sock;
lnet_kiov_t *kiov = tx->tx_kiov;
-ksocknal_lib_eager_ack (ksock_conn_t *conn)
+ksocknal_lib_eager_ack(struct ksock_conn *conn)
- int opt = 1;
- struct socket *sock = conn->ksnc_sock;
+ int opt = 1;
+ struct socket *sock = conn->ksnc_sock;
/* Remind the socket to ACK eagerly. If I don't, the socket might
* think I'm about to send something it could piggy-back the ACK
/* Remind the socket to ACK eagerly. If I don't, the socket might
* think I'm about to send something it could piggy-back the ACK
-ksocknal_lib_recv_iov (ksock_conn_t *conn)
+ksocknal_lib_recv_iov(struct ksock_conn *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX
struct kvec scratch;
{
#if SOCKNAL_SINGLE_FRAG_RX
struct kvec scratch;
-ksocknal_lib_recv_kiov (ksock_conn_t *conn)
+ksocknal_lib_recv_kiov(struct ksock_conn *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
struct kvec scratch;
{
#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
struct kvec scratch;
-ksocknal_lib_csum_tx(ksock_tx_t *tx)
+ksocknal_lib_csum_tx(struct ksock_tx *tx)
-ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
{
struct socket *sock = conn->ksnc_sock;
int len;
{
struct socket *sock = conn->ksnc_sock;
int len;
-ksocknal_lib_push_conn (ksock_conn_t *conn)
+ksocknal_lib_push_conn(struct ksock_conn *conn)
- struct sock *sk;
- struct tcp_sock *tp;
- int nonagle;
- int val = 1;
- int rc;
+ struct sock *sk;
+ struct tcp_sock *tp;
+ int nonagle;
+ int val = 1;
+ int rc;
rc = ksocknal_connsock_addref(conn);
if (rc != 0) /* being shut down */
rc = ksocknal_connsock_addref(conn);
if (rc != 0) /* being shut down */
ksocknal_connsock_decref(conn);
}
ksocknal_connsock_decref(conn);
}
-extern void ksocknal_read_callback (ksock_conn_t *conn);
-extern void ksocknal_write_callback (ksock_conn_t *conn);
+void ksocknal_read_callback(struct ksock_conn *conn);
+void ksocknal_write_callback(struct ksock_conn *conn);
/*
* socket call back in Linux
*/
/*
* socket call back in Linux
*/
ksocknal_data_ready(struct sock *sk, int n)
#endif
{
ksocknal_data_ready(struct sock *sk, int n)
#endif
{
+ struct ksock_conn *conn;
ENTRY;
/* interleave correctly with closing sockets... */
ENTRY;
/* interleave correctly with closing sockets... */
static void
ksocknal_write_space (struct sock *sk)
{
static void
ksocknal_write_space (struct sock *sk)
{
+ struct ksock_conn *conn;
int wspace;
int min_wpace;
int wspace;
int min_wpace;
-ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn)
{
conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
conn->ksnc_saved_write_space = sock->sk->sk_write_space;
}
void
{
conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
conn->ksnc_saved_write_space = sock->sk->sk_write_space;
}
void
-ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn)
{
sock->sk->sk_user_data = conn;
sock->sk->sk_data_ready = ksocknal_data_ready;
{
sock->sk->sk_user_data = conn;
sock->sk->sk_data_ready = ksocknal_data_ready;
-ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn)
{
/* Remove conn's network callbacks.
* NB I _have_ to restore the callback, rather than storing a noop,
{
/* Remove conn's network callbacks.
* NB I _have_ to restore the callback, rather than storing a noop,
-ksocknal_lib_memory_pressure(ksock_conn_t *conn)
+ksocknal_lib_memory_pressure(struct ksock_conn *conn)
+ struct ksock_sched *sched;
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
MODULE_PARM_DESC(protocol, "protocol version");
#endif
MODULE_PARM_DESC(protocol, "protocol version");
#endif
-ksock_tunables_t ksocknal_tunables;
+struct ksock_tunables ksocknal_tunables;
int ksocknal_tunables_init(void)
{
int ksocknal_tunables_init(void)
{
* pro_match_tx() : Called holding glock
*/
* pro_match_tx() : Called holding glock
*/
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+static struct ksock_tx *
+ksocknal_queue_tx_msg_v1(struct ksock_conn *conn, struct ksock_tx *tx_msg)
{
/* V1.x, just enqueue it */
list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
{
/* V1.x, just enqueue it */
list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
-ksocknal_next_tx_carrier(ksock_conn_t *conn)
+ksocknal_next_tx_carrier(struct ksock_conn *conn)
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
+ struct ksock_tx *tx = conn->ksnc_tx_carrier;
/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
LASSERT(!list_empty(&conn->ksnc_tx_queue));
/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
LASSERT(!list_empty(&conn->ksnc_tx_queue));
conn->ksnc_tx_carrier = NULL;
} else {
conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
conn->ksnc_tx_carrier = NULL;
} else {
conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
+ struct ksock_tx, tx_list);
LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type ==
tx->tx_msg.ksm_type);
}
}
static int
LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type ==
tx->tx_msg.ksm_type);
}
}
static int
-ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
- ksock_tx_t *tx_ack, __u64 cookie)
+ksocknal_queue_tx_zcack_v2(struct ksock_conn *conn,
+ struct ksock_tx *tx_ack, __u64 cookie)
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
+ struct ksock_tx *tx = conn->ksnc_tx_carrier;
LASSERT (tx_ack == NULL ||
tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
LASSERT (tx_ack == NULL ||
tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+static struct ksock_tx *
+ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg)
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
+ struct ksock_tx *tx = conn->ksnc_tx_carrier;
-ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
- ksock_tx_t *tx_ack, __u64 cookie)
+ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
+ struct ksock_tx *tx_ack, __u64 cookie)
if (conn->ksnc_type != SOCKLND_CONN_ACK)
return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
if (conn->ksnc_type != SOCKLND_CONN_ACK)
return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
-ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
-ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
/* (Sink) handle incoming ZC request from sender */
static int
/* (Sink) handle incoming ZC request from sender */
static int
-ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
+ksocknal_handle_zcreq(struct ksock_conn *c, __u64 cookie, int remote)
- ksock_peer_ni_t *peer_ni = c->ksnc_peer;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
- int rc;
+ struct ksock_peer_ni *peer_ni = c->ksnc_peer;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
+ int rc;
read_lock(&ksocknal_data.ksnd_global_lock);
conn = ksocknal_find_conn_locked(peer_ni, NULL, !!remote);
if (conn != NULL) {
read_lock(&ksocknal_data.ksnd_global_lock);
conn = ksocknal_find_conn_locked(peer_ni, NULL, !!remote);
if (conn != NULL) {
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
/* (Sender) handle ZC_ACK from sink */
static int
/* (Sender) handle ZC_ACK from sink */
static int
-ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
+ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *tmp;
- struct list_head zlist = LIST_HEAD_INIT(zlist);
- int count;
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+ struct ksock_tx *tx;
+ struct ksock_tx *tmp;
+ struct list_head zlist = LIST_HEAD_INIT(zlist);
+ int count;
if (cookie1 == 0)
cookie1 = cookie2;
if (cookie1 == 0)
cookie1 = cookie2;
spin_unlock(&peer_ni->ksnp_lock);
while (!list_empty(&zlist)) {
spin_unlock(&peer_ni->ksnp_lock);
while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
-ksocknal_send_hello_v1 (ksock_conn_t *conn, struct ksock_hello_msg *hello)
+ksocknal_send_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello)
{
struct socket *sock = conn->ksnc_sock;
struct lnet_hdr *hdr;
{
struct socket *sock = conn->ksnc_sock;
struct lnet_hdr *hdr;
-ksocknal_send_hello_v2 (ksock_conn_t *conn, struct ksock_hello_msg *hello)
+ksocknal_send_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello)
- struct socket *sock = conn->ksnc_sock;
- int rc;
+ struct socket *sock = conn->ksnc_sock;
+ int rc;
hello->kshm_magic = LNET_PROTO_MAGIC;
hello->kshm_version = conn->ksnc_proto->pro_version;
hello->kshm_magic = LNET_PROTO_MAGIC;
hello->kshm_version = conn->ksnc_proto->pro_version;
-ksocknal_recv_hello_v1(ksock_conn_t *conn, struct ksock_hello_msg *hello,int timeout)
+ksocknal_recv_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello,
+ int timeout)
{
struct socket *sock = conn->ksnc_sock;
struct lnet_hdr *hdr;
{
struct socket *sock = conn->ksnc_sock;
struct lnet_hdr *hdr;
-ksocknal_recv_hello_v2(ksock_conn_t *conn, struct ksock_hello_msg *hello,
+ksocknal_recv_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello,
int timeout)
{
struct socket *sock = conn->ksnc_sock;
int timeout)
{
struct socket *sock = conn->ksnc_sock;
-ksocknal_pack_msg_v1(ksock_tx_t *tx)
+ksocknal_pack_msg_v1(struct ksock_tx *tx)
{
/* V1.x has no KSOCK_MSG_NOOP */
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
{
/* V1.x has no KSOCK_MSG_NOOP */
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
-ksocknal_pack_msg_v2(ksock_tx_t *tx)
+ksocknal_pack_msg_v2(struct ksock_tx *tx)
{
tx->tx_iov[0].iov_base = (void *)&tx->tx_msg;
{
tx->tx_iov[0].iov_base = (void *)&tx->tx_msg;
return; /* Do nothing */
}
return; /* Do nothing */
}
-ksock_proto_t ksocknal_protocol_v1x =
+struct ksock_proto ksocknal_protocol_v1x =
{
.pro_version = KSOCK_PROTO_V1,
.pro_send_hello = ksocknal_send_hello_v1,
{
.pro_version = KSOCK_PROTO_V1,
.pro_send_hello = ksocknal_send_hello_v1,
.pro_match_tx = ksocknal_match_tx
};
.pro_match_tx = ksocknal_match_tx
};
-ksock_proto_t ksocknal_protocol_v2x =
+struct ksock_proto ksocknal_protocol_v2x =
{
.pro_version = KSOCK_PROTO_V2,
.pro_send_hello = ksocknal_send_hello_v2,
{
.pro_version = KSOCK_PROTO_V2,
.pro_send_hello = ksocknal_send_hello_v2,
.pro_match_tx = ksocknal_match_tx
};
.pro_match_tx = ksocknal_match_tx
};
-ksock_proto_t ksocknal_protocol_v3x =
+struct ksock_proto ksocknal_protocol_v3x =
{
.pro_version = KSOCK_PROTO_V3,
.pro_send_hello = ksocknal_send_hello_v2,
{
.pro_version = KSOCK_PROTO_V3,
.pro_send_hello = ksocknal_send_hello_v2,