struct ksock_peer_ni *
ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
{
- struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
- struct list_head *tmp;
struct ksock_peer_ni *peer_ni;
- list_for_each(tmp, peer_list) {
- peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
-
+ hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni,
+ ksnp_list, id.nid) {
LASSERT(!peer_ni->ksnp_closing);
if (peer_ni->ksnp_ni != ni)
LASSERT(list_empty(&peer_ni->ksnp_routes));
LASSERT(!peer_ni->ksnp_closing);
peer_ni->ksnp_closing = 1;
- list_del(&peer_ni->ksnp_list);
+ hlist_del(&peer_ni->ksnp_list);
/* lose peerlist's ref */
ksocknal_peer_decref(peer_ni);
}
int *port, int *conn_count, int *share_count)
{
struct ksock_peer_ni *peer_ni;
- struct list_head *ptmp;
struct ksock_route *route;
struct list_head *rtmp;
int i;
read_lock(&ksocknal_data.ksnd_global_lock);
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
+ hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
- if (peer_ni->ksnp_ni != ni)
+ if (peer_ni->ksnp_ni != ni)
+ continue;
+
+ if (peer_ni->ksnp_n_passive_ips == 0 &&
+ list_empty(&peer_ni->ksnp_routes)) {
+ if (index-- > 0)
continue;
- if (peer_ni->ksnp_n_passive_ips == 0 &&
- list_empty(&peer_ni->ksnp_routes)) {
- if (index-- > 0)
- continue;
+ *id = peer_ni->ksnp_id;
+ *myip = 0;
+ *peer_ip = 0;
+ *port = 0;
+ *conn_count = 0;
+ *share_count = 0;
+ rc = 0;
+ goto out;
+ }
- *id = peer_ni->ksnp_id;
- *myip = 0;
- *peer_ip = 0;
- *port = 0;
- *conn_count = 0;
- *share_count = 0;
- rc = 0;
- goto out;
- }
+ for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
+ if (index-- > 0)
+ continue;
- for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
- if (index-- > 0)
- continue;
+ *id = peer_ni->ksnp_id;
+ *myip = peer_ni->ksnp_passive_ips[j];
+ *peer_ip = 0;
+ *port = 0;
+ *conn_count = 0;
+ *share_count = 0;
+ rc = 0;
+ goto out;
+ }
- *id = peer_ni->ksnp_id;
- *myip = peer_ni->ksnp_passive_ips[j];
- *peer_ip = 0;
- *port = 0;
- *conn_count = 0;
- *share_count = 0;
- rc = 0;
- goto out;
- }
+ list_for_each(rtmp, &peer_ni->ksnp_routes) {
+ if (index-- > 0)
+ continue;
- list_for_each(rtmp, &peer_ni->ksnp_routes) {
- if (index-- > 0)
- continue;
+ route = list_entry(rtmp, struct ksock_route,
+ ksnr_list);
- route = list_entry(rtmp, struct ksock_route,
- ksnr_list);
-
- *id = peer_ni->ksnp_id;
- *myip = route->ksnr_myipaddr;
- *peer_ip = route->ksnr_ipaddr;
- *port = route->ksnr_port;
- *conn_count = route->ksnr_conn_count;
- *share_count = route->ksnr_share_count;
- rc = 0;
- goto out;
- }
+ *id = peer_ni->ksnp_id;
+ *myip = route->ksnr_myipaddr;
+ *peer_ip = route->ksnr_ipaddr;
+ *port = route->ksnr_port;
+ *conn_count = route->ksnr_conn_count;
+ *share_count = route->ksnr_share_count;
+ rc = 0;
+ goto out;
}
}
out:
peer_ni = peer2;
} else {
/* peer_ni table takes my ref on peer_ni */
- list_add_tail(&peer_ni->ksnp_list,
- ksocknal_nid2peerlist(id.nid));
+ hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list, id.nid);
}
route2 = NULL;
ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
{
LIST_HEAD(zombies);
- struct list_head *ptmp;
- struct list_head *pnxt;
+ struct hlist_node *pnxt;
struct ksock_peer_ni *peer_ni;
int lo;
int hi;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY) {
- hi = (int)(ksocknal_nid2peerlist(id.nid) -
- ksocknal_data.ksnd_peers);
- lo = hi;
+ lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+ hi = lo;
} else {
lo = 0;
- hi = ksocknal_data.ksnd_peer_hash_size - 1;
+ hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
-
+ hlist_for_each_entry_safe(peer_ni, pnxt,
+ &ksocknal_data.ksnd_peers[i],
+ ksnp_list) {
if (peer_ni->ksnp_ni != ni)
continue;
ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
{
struct ksock_peer_ni *peer_ni;
- struct list_head *ptmp;
struct ksock_conn *conn;
struct list_head *ctmp;
int i;
read_lock(&ksocknal_data.ksnd_global_lock);
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
+ hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
+ LASSERT(!peer_ni->ksnp_closing);
- LASSERT(!peer_ni->ksnp_closing);
+ if (peer_ni->ksnp_ni != ni)
+ continue;
- if (peer_ni->ksnp_ni != ni)
+ list_for_each(ctmp, &peer_ni->ksnp_conns) {
+ if (index-- > 0)
continue;
- list_for_each(ctmp, &peer_ni->ksnp_conns) {
- if (index-- > 0)
- continue;
-
- conn = list_entry(ctmp, struct ksock_conn,
- ksnc_list);
- ksocknal_conn_addref(conn);
- read_unlock(&ksocknal_data. \
- ksnd_global_lock);
- return conn;
- }
+ conn = list_entry(ctmp, struct ksock_conn,
+ ksnc_list);
+ ksocknal_conn_addref(conn);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return conn;
}
}
if (peer2 == NULL) {
/* NB this puts an "empty" peer_ni in the peer_ni
* table (which takes my ref) */
- list_add_tail(&peer_ni->ksnp_list,
- ksocknal_nid2peerlist(peerid.nid));
+ hash_add(ksocknal_data.ksnd_peers,
+ &peer_ni->ksnp_list, peerid.nid);
} else {
ksocknal_peer_decref(peer_ni);
peer_ni = peer2;
ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
{
struct ksock_peer_ni *peer_ni;
- struct list_head *ptmp;
- struct list_head *pnxt;
+ struct hlist_node *pnxt;
int lo;
int hi;
int i;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (id.nid != LNET_NID_ANY)
- lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- else {
- lo = 0;
- hi = ksocknal_data.ksnd_peer_hash_size - 1;
- }
-
- for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
+ if (id.nid != LNET_NID_ANY) {
+ lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+ hi = lo;
+ } else {
+ lo = 0;
+ hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
+ }
- peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
+ for (i = lo; i <= hi; i++) {
+ hlist_for_each_entry_safe(peer_ni, pnxt,
+ &ksocknal_data.ksnd_peers[i],
+ ksnp_list) {
- if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
- (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
- continue;
+ if (!((id.nid == LNET_NID_ANY ||
+ id.nid == peer_ni->ksnp_id.nid) &&
+ (id.pid == LNET_PID_ANY ||
+ id.pid == peer_ni->ksnp_id.pid)))
+ continue;
- count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
- }
- }
+ count += ksocknal_close_peer_conns_locked(peer_ni,
+ ipaddr, 0);
+ }
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- /* wildcards always succeed */
- if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
- return (0);
+ /* wildcards always succeed */
+ if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
+ return 0;
- return (count == 0 ? -ENOENT : 0);
+ return (count == 0 ? -ENOENT : 0);
}
void
static int
ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
{
- struct list_head *start;
- struct list_head *end;
- struct list_head *tmp;
- int rc = -ENOENT;
- unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
+ int lo;
+ int hi;
+ int bkt;
+ int rc = -ENOENT;
- if (id.nid == LNET_NID_ANY) {
- start = &ksocknal_data.ksnd_peers[0];
- end = &ksocknal_data.ksnd_peers[hsize - 1];
+ if (id.nid != LNET_NID_ANY) {
+ lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+ hi = lo;
} else {
- start = end = ksocknal_nid2peerlist(id.nid);
+ lo = 0;
+ hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
}
- for (tmp = start; tmp <= end; tmp++) {
- int peer_off; /* searching offset in peer_ni hash table */
+ for (bkt = lo; bkt <= hi; bkt++) {
+ int peer_off; /* searching offset in peer_ni hash table */
for (peer_off = 0; ; peer_off++) {
struct ksock_peer_ni *peer_ni;
int i = 0;
read_lock(&ksocknal_data.ksnd_global_lock);
- list_for_each_entry(peer_ni, tmp, ksnp_list) {
+ hlist_for_each_entry(peer_ni,
+ &ksocknal_data.ksnd_peers[bkt],
+ ksnp_list) {
if (!((id.nid == LNET_NID_ANY ||
id.nid == peer_ni->ksnp_id.nid) &&
(id.pid == LNET_PID_ANY ||
int rc;
int i;
int j;
- struct list_head *ptmp;
struct ksock_peer_ni *peer_ni;
struct list_head *rtmp;
struct ksock_route *route;
iface->ksni_nroutes = 0;
iface->ksni_npeers = 0;
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer_ni = list_entry(ptmp, struct ksock_peer_ni,
- ksnp_list);
-
- for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
- if (peer_ni->ksnp_passive_ips[j] == ipaddress)
- iface->ksni_npeers++;
+ hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
+ for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
+ if (peer_ni->ksnp_passive_ips[j] == ipaddress)
+ iface->ksni_npeers++;
- list_for_each(rtmp, &peer_ni->ksnp_routes) {
- route = list_entry(rtmp,
- struct ksock_route,
- ksnr_list);
+ list_for_each(rtmp, &peer_ni->ksnp_routes) {
+ route = list_entry(rtmp,
+ struct ksock_route,
+ ksnr_list);
- if (route->ksnr_myipaddr == ipaddress)
- iface->ksni_nroutes++;
- }
+ if (route->ksnr_myipaddr == ipaddress)
+ iface->ksni_nroutes++;
}
}
rc = 0;
- /* NB only new connections will pay attention to the new interface! */
+ /* NB only new connections will pay attention to the new
+ * interface!
+ */
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
{
struct ksock_net *net = ni->ni_data;
int rc = -ENOENT;
- struct list_head *tmp;
- struct list_head *nxt;
+ struct hlist_node *nxt;
struct ksock_peer_ni *peer_ni;
u32 this_ip;
int i;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
-
- if (!(ipaddress == 0 ||
- ipaddress == this_ip))
- continue;
+ for (i = 0; i < net->ksnn_ninterfaces; i++) {
+ this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
- rc = 0;
+ if (!(ipaddress == 0 ||
+ ipaddress == this_ip))
+ continue;
- for (j = i+1; j < net->ksnn_ninterfaces; j++)
- net->ksnn_interfaces[j-1] =
- net->ksnn_interfaces[j];
+ rc = 0;
- net->ksnn_ninterfaces--;
+ for (j = i+1; j < net->ksnn_ninterfaces; j++)
+ net->ksnn_interfaces[j-1] =
+ net->ksnn_interfaces[j];
- for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
- list_for_each_safe(tmp, nxt,
- &ksocknal_data.ksnd_peers[j]) {
- peer_ni = list_entry(tmp, struct ksock_peer_ni,
- ksnp_list);
+ net->ksnn_ninterfaces--;
- if (peer_ni->ksnp_ni != ni)
- continue;
+ hash_for_each_safe(ksocknal_data.ksnd_peers, j,
+ nxt, peer_ni, ksnp_list) {
+ if (peer_ni->ksnp_ni != ni)
+ continue;
- ksocknal_peer_del_interface_locked(peer_ni, this_ip);
- }
- }
- }
+ ksocknal_peer_del_interface_locked(peer_ni, this_ip);
+ }
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- return (rc);
+ return rc;
}
int
if (ksocknal_data.ksnd_schedulers != NULL)
cfs_percpt_free(ksocknal_data.ksnd_schedulers);
- LIBCFS_FREE (ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
-
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
ksocknal_base_shutdown(void)
{
struct ksock_sched *sched;
+ struct ksock_peer_ni *peer_ni;
int i;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
case SOCKNAL_INIT_ALL:
case SOCKNAL_INIT_DATA:
- LASSERT(ksocknal_data.ksnd_peers != NULL);
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
- LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
+ hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list)
+ LASSERT(0);
LASSERT(list_empty(&ksocknal_data.ksnd_nets));
LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
int rc;
int i;
- LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
- LASSERT (ksocknal_data.ksnd_nnets == 0);
-
- memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
+ LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
+ LASSERT(ksocknal_data.ksnd_nnets == 0);
- ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
- LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
- if (ksocknal_data.ksnd_peers == NULL)
- return -ENOMEM;
+ memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
- INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
+ hash_init(ksocknal_data.ksnd_peers);
rwlock_init(&ksocknal_data.ksnd_global_lock);
INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
read_lock(&ksocknal_data.ksnd_global_lock);
- for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
- list_for_each_entry(peer_ni, &ksocknal_data.ksnd_peers[i],
- ksnp_list) {
+ hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
struct ksock_route *route;
struct ksock_conn *conn;
atomic_read(&conn->ksnc_sock_refcount),
conn->ksnc_type, conn->ksnc_closing);
}
- goto done;
- }
+ break;
}
-done:
+
read_unlock(&ksocknal_data.ksnd_global_lock);
}
#include <linux/sysctl.h>
#include <linux/uio.h>
#include <linux/unistd.h>
+#include <linux/hashtable.h>
#include <net/sock.h>
#include <net/tcp.h>
#define SOCKNAL_NSCHEDS 3
#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1)
-#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer_ni lists */
-#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
-#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
-#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */
+#define SOCKNAL_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */
+#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
+#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
+#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */
-#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
-#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
+#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
+#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
-#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
+#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
* no risk if we're not running on a CONFIG_HIGHMEM platform. */
/* stabilize peer_ni/conn ops */
rwlock_t ksnd_global_lock;
/* hash table of all my known peers */
- struct list_head *ksnd_peers;
- int ksnd_peer_hash_size; /* size of ksnd_peers */
+ DECLARE_HASHTABLE(ksnd_peers, SOCKNAL_PEER_HASH_BITS);
int ksnd_nthreads; /* # live threads */
int ksnd_shuttingdown; /* tell threads to exit */
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
struct ksock_peer_ni {
- struct list_head ksnp_list; /* stash on global peer_ni list */
+ struct hlist_node ksnp_list; /* stash on global peer_ni list */
time64_t ksnp_last_alive;/* when (in seconds) I was last alive */
struct lnet_process_id ksnp_id; /* who's on the other end(s) */
- atomic_t ksnp_refcount; /* # users */
- int ksnp_closing; /* being closed */
- int ksnp_accepting;/* # passive connections pending */
- int ksnp_error; /* errno on closing last conn */
- __u64 ksnp_zc_next_cookie;/* ZC completion cookie */
- __u64 ksnp_incarnation; /* latest known peer_ni incarnation */
- struct ksock_proto *ksnp_proto; /* latest known peer_ni protocol */
+ atomic_t ksnp_refcount; /* # users */
+ int ksnp_closing; /* being closed */
+ int ksnp_accepting; /* # passive connections pending */
+ int ksnp_error; /* errno on closing last conn */
+ __u64 ksnp_zc_next_cookie;/* ZC completion cookie */
+ __u64 ksnp_incarnation; /* latest known peer_ni incarnation */
+ struct ksock_proto *ksnp_proto; /* latest known peer_ni protocol */
struct list_head ksnp_conns; /* all active connections */
struct list_head ksnp_routes; /* routes */
struct list_head ksnp_tx_queue; /* waiting packets */
- spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
+ spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
/* zero copy requests wait for ACK */
struct list_head ksnp_zc_req_list;
time64_t ksnp_send_keepalive; /* time to send keepalive */
- struct lnet_ni *ksnp_ni; /* which network */
- int ksnp_n_passive_ips; /* # of... */
- __u32 ksnp_passive_ips[LNET_INTERFACES_NUM]; /* preferred local interfaces */
+ struct lnet_ni *ksnp_ni; /* which network */
+ int ksnp_n_passive_ips; /* # of... */
+ __u32 ksnp_passive_ips[LNET_INTERFACES_NUM]; /* preferred local interfaces */
};
struct ksock_connreq {
(1 << SOCKLND_CONN_BULK_OUT));
}
-static inline struct list_head *
-ksocknal_nid2peerlist (lnet_nid_t nid)
-{
- unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
-
- return (&ksocknal_data.ksnd_peers [hash]);
-}
-
static inline void
ksocknal_conn_addref(struct ksock_conn *conn)
{
static void
ksocknal_check_peer_timeouts(int idx)
{
- struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+ struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
struct ksock_peer_ni *peer_ni;
struct ksock_conn *conn;
struct ksock_tx *tx;
again:
- /* NB. We expect to have a look at all the peers and not find any
- * connections to time out, so we just use a shared lock while we
- * take a look... */
+ /* NB. We expect to have a look at all the peers and not find any
+ * connections to time out, so we just use a shared lock while we
+ * take a look...
+ */
read_lock(&ksocknal_data.ksnd_global_lock);
- list_for_each_entry(peer_ni, peers, ksnp_list) {
+ hlist_for_each_entry(peer_ni, peers, ksnp_list) {
struct ksock_tx *tx_stale;
time64_t deadline = 0;
int resid = 0;
int n = 0;
- if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
+ if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
read_unlock(&ksocknal_data.ksnd_global_lock);
- goto again;
- }
+ goto again;
+ }
- conn = ksocknal_find_timed_out_conn (peer_ni);
+ conn = ksocknal_find_timed_out_conn(peer_ni);
- if (conn != NULL) {
+ if (conn != NULL) {
read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+ ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
- /* NB we won't find this one again, but we can't
- * just proceed with the next peer_ni, since we dropped
- * ksnd_global_lock and it might be dead already! */
- ksocknal_conn_decref(conn);
- goto again;
- }
+ /* NB we won't find this one again, but we can't
+ * just proceed with the next peer_ni, since we dropped
+ * ksnd_global_lock and it might be dead already!
+ */
+ ksocknal_conn_decref(conn);
+ goto again;
+ }
- /* we can't process stale txs right here because we're
- * holding only shared lock */
+ /* we can't process stale txs right here because we're
+ * holding only shared lock
+ */
if (!list_empty(&peer_ni->ksnp_tx_queue)) {
struct ksock_tx *tx;
tx = list_entry(peer_ni->ksnp_tx_queue.next,
struct ksock_tx, tx_list);
if (ktime_get_seconds() >= tx->tx_deadline) {
- ksocknal_peer_addref(peer_ni);
+ ksocknal_peer_addref(peer_ni);
read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_flush_stale_txs(peer_ni);
+ ksocknal_flush_stale_txs(peer_ni);
- ksocknal_peer_decref(peer_ni);
- goto again;
- }
- }
+ ksocknal_peer_decref(peer_ni);
+ goto again;
+ }
+ }
if (list_empty(&peer_ni->ksnp_zc_req_list))
- continue;
+ continue;
tx_stale = NULL;
spin_lock(&peer_ni->ksnp_lock);
nenomem_conns++;
}
- /* careful with the jiffy wrap... */
+ /* careful with the jiffy wrap... */
while ((timeout = deadline - ktime_get_seconds()) <= 0) {
- const int n = 4;
- const int p = 1;
- int chunk = ksocknal_data.ksnd_peer_hash_size;
+ const int n = 4;
+ const int p = 1;
+ int chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
unsigned int lnd_timeout;
- /* Time to check for timeouts on a few more peers: I do
- * checks every 'p' seconds on a proportion of the peer_ni
- * table and I need to check every connection 'n' times
- * within a timeout interval, to ensure I detect a
- * timeout on any connection within (n+1)/n times the
- * timeout interval. */
+ /* Time to check for timeouts on a few more peers: I
+ * do checks every 'p' seconds on a proportion of the
+ * peer_ni table and I need to check every connection
+ * 'n' times within a timeout interval, to ensure I
+ * detect a timeout on any connection within (n+1)/n
+ * times the timeout interval.
+ */
lnd_timeout = lnet_get_lnd_timeout();
if (lnd_timeout > n * p)
if (chunk == 0)
chunk = 1;
- for (i = 0; i < chunk; i++) {
- ksocknal_check_peer_timeouts (peer_index);
- peer_index = (peer_index + 1) %
- ksocknal_data.ksnd_peer_hash_size;
- }
+ for (i = 0; i < chunk; i++) {
+ ksocknal_check_peer_timeouts(peer_index);
+ peer_index = (peer_index + 1) %
+ HASH_SIZE(ksocknal_data.ksnd_peers);
+ }
deadline += p;
- }
+ }
if (nenomem_conns != 0) {
/* Reduce my timeout if I rescheduled ENOMEM conns.