From: Mr NeilBrown Date: Wed, 15 Jan 2020 15:36:42 +0000 (-0500) Subject: LU-12678 socklnd: convert peers hash table to hashtable.h X-Git-Tag: 2.13.52~39 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=dbbcf61d2bdcf62740c1d5c8debaeb6afdfc2261 LU-12678 socklnd: convert peers hash table to hashtable.h Using a hashtable.h hashtable, rather than bespoke code, has several advantages: - the table is comprised of hlist_head, rather than list_head, so it consumes less memory (though we need to make it a little bigger as it must be a power-of-2) - there are existing macros for easily walking the whole table - it uses a "real" hash function rather than "mod a prime number". In some ways, rhashtable might be even better, but it can change the ordering of objects in the table at arbitrary moments, and that could hurt the user-space API. It also does not support the partitioned walking that ksocknal_check_peer_timeouts() depends on. Note that new peers are inserted at the top of a hash chain, rather than appended at the end. I don't think that should be a problem. Test-Parameters: trivial testlist=sanity-lnet Signed-off-by: Mr NeilBrown Change-Id: I70fe64df0dd0db73666ff6fb2d2888b1d64f4be5 Reviewed-on: https://review.whamcloud.com/36837 Tested-by: jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Serguei Smirnov Reviewed-by: Oleg Drokin --- diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index a9907c5..00ed04c 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -164,13 +164,10 @@ ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni) struct ksock_peer_ni * ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id) { - struct list_head *peer_list = ksocknal_nid2peerlist(id.nid); - struct list_head *tmp; struct ksock_peer_ni *peer_ni; - list_for_each(tmp, peer_list) { - peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list); - + hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni, + ksnp_list, id.nid) { LASSERT(!peer_ni->ksnp_closing); if (peer_ni->ksnp_ni != ni) @@ -229,7 +226,7 @@ ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni) LASSERT(list_empty(&peer_ni->ksnp_routes)); LASSERT(!peer_ni->ksnp_closing); peer_ni->ksnp_closing = 1; - list_del(&peer_ni->ksnp_list); + hlist_del(&peer_ni->ksnp_list); /* lose peerlist's ref */ ksocknal_peer_decref(peer_ni); } @@ -240,7 +237,6 @@ ksocknal_get_peer_info(struct lnet_ni *ni, int index, int *port, int *conn_count, int *share_count) { struct ksock_peer_ni *peer_ni; - struct list_head *ptmp; struct ksock_route *route; struct list_head *rtmp; int i; @@ -249,58 +245,55 @@ ksocknal_get_peer_info(struct lnet_ni *ni, int index, read_lock(&ksocknal_data.ksnd_global_lock); - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list); + hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) { - if (peer_ni->ksnp_ni != ni) + if (peer_ni->ksnp_ni != ni) + continue; + + if (peer_ni->ksnp_n_passive_ips == 0 && + list_empty(&peer_ni->ksnp_routes)) { + if (index-- > 0) continue; - if (peer_ni->ksnp_n_passive_ips == 0 && - list_empty(&peer_ni->ksnp_routes)) { - if (index-- > 0) - continue; + *id = peer_ni->ksnp_id; + *myip = 0; + *peer_ip = 0; + *port = 0; + *conn_count = 0; + *share_count = 0; + rc = 0; + goto out; + } - *id = peer_ni->ksnp_id; - *myip = 0; - *peer_ip = 0; - *port = 0; - *conn_count = 0; - *share_count = 0; - rc = 0; - goto out; - } + for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) { + if (index-- > 0) + continue; - for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) { - if (index-- > 0) - continue; + *id = peer_ni->ksnp_id; + *myip = peer_ni->ksnp_passive_ips[j]; + *peer_ip = 0; + *port = 0; + *conn_count = 0; + *share_count = 0; + rc = 0; + goto out; + } - *id = peer_ni->ksnp_id; - *myip = peer_ni->ksnp_passive_ips[j]; - *peer_ip = 0; - *port = 0; - *conn_count = 0; - *share_count = 0; - rc = 0; - goto out; - } + list_for_each(rtmp, &peer_ni->ksnp_routes) { + if (index-- > 0) + continue; - list_for_each(rtmp, &peer_ni->ksnp_routes) { - if (index-- > 0) - continue; + route = list_entry(rtmp, struct ksock_route, + ksnr_list); - route = list_entry(rtmp, struct ksock_route, - ksnr_list); - - *id = peer_ni->ksnp_id; - *myip = route->ksnr_myipaddr; - *peer_ip = route->ksnr_ipaddr; - *port = route->ksnr_port; - *conn_count = route->ksnr_conn_count; - *share_count = route->ksnr_share_count; - rc = 0; - goto out; - } + *id = peer_ni->ksnp_id; + *myip = route->ksnr_myipaddr; + *peer_ip = route->ksnr_ipaddr; + *port = route->ksnr_port; + *conn_count = route->ksnr_conn_count; + *share_count = route->ksnr_share_count; + rc = 0; + goto out; } } out: @@ -470,8 +463,7 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr, peer_ni = peer2; } else { /* peer_ni table takes my ref on peer_ni */ - list_add_tail(&peer_ni->ksnp_list, - ksocknal_nid2peerlist(id.nid)); + hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list, id.nid); } route2 = NULL; @@ -555,8 +547,7 @@ static int ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip) { LIST_HEAD(zombies); - struct list_head *ptmp; - struct list_head *pnxt; + struct hlist_node *pnxt; struct ksock_peer_ni *peer_ni; int lo; int hi; @@ -566,19 +557,17 @@ ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip) write_lock_bh(&ksocknal_data.ksnd_global_lock); if (id.nid != LNET_NID_ANY) { - hi = (int)(ksocknal_nid2peerlist(id.nid) - - ksocknal_data.ksnd_peers); - lo = hi; + lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers)); + hi = lo; } else { lo = 0; - hi = ksocknal_data.ksnd_peer_hash_size - 1; + hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1; } for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, - &ksocknal_data.ksnd_peers[i]) { - peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list); - + hlist_for_each_entry_safe(peer_ni, pnxt, + &ksocknal_data.ksnd_peers[i], + ksnp_list) { if (peer_ni->ksnp_ni != ni) continue; @@ -618,33 +607,27 @@ static struct ksock_conn * ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index) { struct ksock_peer_ni *peer_ni; - struct list_head *ptmp; struct ksock_conn *conn; struct list_head *ctmp; int i; read_lock(&ksocknal_data.ksnd_global_lock); - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list); + hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) { + LASSERT(!peer_ni->ksnp_closing); - LASSERT(!peer_ni->ksnp_closing); + if (peer_ni->ksnp_ni != ni) + continue; - if (peer_ni->ksnp_ni != ni) + list_for_each(ctmp, &peer_ni->ksnp_conns) { + if (index-- > 0) continue; - list_for_each(ctmp, &peer_ni->ksnp_conns) { - if (index-- > 0) - continue; - - conn = list_entry(ctmp, struct ksock_conn, - ksnc_list); - ksocknal_conn_addref(conn); - read_unlock(&ksocknal_data. \ - ksnd_global_lock); - return conn; - } + conn = list_entry(ctmp, struct ksock_conn, + ksnc_list); + ksocknal_conn_addref(conn); + read_unlock(&ksocknal_data.ksnd_global_lock); + return conn; } } @@ -1132,8 +1115,8 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route, if (peer2 == NULL) { /* NB this puts an "empty" peer_ni in the peer_ni * table (which takes my ref) */ - list_add_tail(&peer_ni->ksnp_list, - ksocknal_nid2peerlist(peerid.nid)); + hash_add(ksocknal_data.ksnd_peers, + &peer_ni->ksnp_list, peerid.nid); } else { ksocknal_peer_decref(peer_ni); peer_ni = peer2; @@ -1732,8 +1715,7 @@ int ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr) { struct ksock_peer_ni *peer_ni; - struct list_head *ptmp; - struct list_head *pnxt; + struct hlist_node *pnxt; int lo; int hi; int i; @@ -1741,33 +1723,37 @@ ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr) write_lock_bh(&ksocknal_data.ksnd_global_lock); - if (id.nid != LNET_NID_ANY) - lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); - else { - lo = 0; - hi = ksocknal_data.ksnd_peer_hash_size - 1; - } - - for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { + if (id.nid != LNET_NID_ANY) { + lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers)); + hi = lo; + } else { + lo = 0; + hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1; + } - peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list); + for (i = lo; i <= hi; i++) { + hlist_for_each_entry_safe(peer_ni, pnxt, + &ksocknal_data.ksnd_peers[i], + ksnp_list) { - if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) && - (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid))) - continue; + if (!((id.nid == LNET_NID_ANY || + id.nid == peer_ni->ksnp_id.nid) && + (id.pid == LNET_PID_ANY || + id.pid == peer_ni->ksnp_id.pid))) + continue; - count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0); - } - } + count += ksocknal_close_peer_conns_locked(peer_ni, + ipaddr, 0); + } + } write_unlock_bh(&ksocknal_data.ksnd_global_lock); - /* wildcards always succeed */ - if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0) - return (0); + /* wildcards always succeed */ + if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0) + return 0; - return (count == 0 ? -ENOENT : 0); + return (count == 0 ? -ENOENT : 0); } void @@ -1890,28 +1876,30 @@ ksocknal_push_peer(struct ksock_peer_ni *peer_ni) static int ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id) { - struct list_head *start; - struct list_head *end; - struct list_head *tmp; - int rc = -ENOENT; - unsigned int hsize = ksocknal_data.ksnd_peer_hash_size; + int lo; + int hi; + int bkt; + int rc = -ENOENT; - if (id.nid == LNET_NID_ANY) { - start = &ksocknal_data.ksnd_peers[0]; - end = &ksocknal_data.ksnd_peers[hsize - 1]; + if (id.nid != LNET_NID_ANY) { + lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers)); + hi = lo; } else { - start = end = ksocknal_nid2peerlist(id.nid); + lo = 0; + hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1; } - for (tmp = start; tmp <= end; tmp++) { - int peer_off; /* searching offset in peer_ni hash table */ + for (bkt = lo; bkt <= hi; bkt++) { + int peer_off; /* searching offset in peer_ni hash table */ for (peer_off = 0; ; peer_off++) { struct ksock_peer_ni *peer_ni; int i = 0; read_lock(&ksocknal_data.ksnd_global_lock); - list_for_each_entry(peer_ni, tmp, ksnp_list) { + hlist_for_each_entry(peer_ni, + &ksocknal_data.ksnd_peers[bkt], + ksnp_list) { if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) && (id.pid == LNET_PID_ANY || @@ -1944,7 +1932,6 @@ ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask) int rc; int i; int j; - struct list_head *ptmp; struct ksock_peer_ni *peer_ni; struct list_head *rtmp; struct ksock_route *route; @@ -1969,28 +1956,25 @@ ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask) iface->ksni_nroutes = 0; iface->ksni_npeers = 0; - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer_ni = list_entry(ptmp, struct ksock_peer_ni, - ksnp_list); - - for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) - if (peer_ni->ksnp_passive_ips[j] == ipaddress) - iface->ksni_npeers++; + hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) { + for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) + if (peer_ni->ksnp_passive_ips[j] == ipaddress) + iface->ksni_npeers++; - list_for_each(rtmp, &peer_ni->ksnp_routes) { - route = list_entry(rtmp, - struct ksock_route, - ksnr_list); + list_for_each(rtmp, &peer_ni->ksnp_routes) { + route = list_entry(rtmp, + struct ksock_route, + ksnr_list); - if (route->ksnr_myipaddr == ipaddress) - iface->ksni_nroutes++; - } + if (route->ksnr_myipaddr == ipaddress) + iface->ksni_nroutes++; } } rc = 0; - /* NB only new connections will pay attention to the new interface! */ + /* NB only new connections will pay attention to the new + * interface! + */ } write_unlock_bh(&ksocknal_data.ksnd_global_lock); @@ -2044,8 +2028,7 @@ ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress) { struct ksock_net *net = ni->ni_data; int rc = -ENOENT; - struct list_head *tmp; - struct list_head *nxt; + struct hlist_node *nxt; struct ksock_peer_ni *peer_ni; u32 this_ip; int i; @@ -2053,38 +2036,33 @@ ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress) write_lock_bh(&ksocknal_data.ksnd_global_lock); - for (i = 0; i < net->ksnn_ninterfaces; i++) { - this_ip = net->ksnn_interfaces[i].ksni_ipaddr; - - if (!(ipaddress == 0 || - ipaddress == this_ip)) - continue; + for (i = 0; i < net->ksnn_ninterfaces; i++) { + this_ip = net->ksnn_interfaces[i].ksni_ipaddr; - rc = 0; + if (!(ipaddress == 0 || + ipaddress == this_ip)) + continue; - for (j = i+1; j < net->ksnn_ninterfaces; j++) - net->ksnn_interfaces[j-1] = - net->ksnn_interfaces[j]; + rc = 0; - net->ksnn_ninterfaces--; + for (j = i+1; j < net->ksnn_ninterfaces; j++) + net->ksnn_interfaces[j-1] = + net->ksnn_interfaces[j]; - for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) { - list_for_each_safe(tmp, nxt, - &ksocknal_data.ksnd_peers[j]) { - peer_ni = list_entry(tmp, struct ksock_peer_ni, - ksnp_list); + net->ksnn_ninterfaces--; - if (peer_ni->ksnp_ni != ni) - continue; + hash_for_each_safe(ksocknal_data.ksnd_peers, j, + nxt, peer_ni, ksnp_list) { + if (peer_ni->ksnp_ni != ni) + continue; - ksocknal_peer_del_interface_locked(peer_ni, this_ip); - } - } - } + ksocknal_peer_del_interface_locked(peer_ni, this_ip); + } + } write_unlock_bh(&ksocknal_data.ksnd_global_lock); - return (rc); + return rc; } int @@ -2222,10 +2200,6 @@ ksocknal_free_buffers (void) if (ksocknal_data.ksnd_schedulers != NULL) cfs_percpt_free(ksocknal_data.ksnd_schedulers); - LIBCFS_FREE (ksocknal_data.ksnd_peers, - sizeof(struct list_head) * - ksocknal_data.ksnd_peer_hash_size); - spin_lock(&ksocknal_data.ksnd_tx_lock); if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { @@ -2250,6 +2224,7 @@ static void ksocknal_base_shutdown(void) { struct ksock_sched *sched; + struct ksock_peer_ni *peer_ni; int i; CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", @@ -2263,9 +2238,8 @@ ksocknal_base_shutdown(void) case SOCKNAL_INIT_ALL: case SOCKNAL_INIT_DATA: - LASSERT(ksocknal_data.ksnd_peers != NULL); - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) - LASSERT(list_empty(&ksocknal_data.ksnd_peers[i])); + hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) + LASSERT(0); LASSERT(list_empty(&ksocknal_data.ksnd_nets)); LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns)); @@ -2329,20 +2303,12 @@ ksocknal_base_startup(void) int rc; int i; - LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); - LASSERT (ksocknal_data.ksnd_nnets == 0); - - memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */ + LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); + LASSERT(ksocknal_data.ksnd_nnets == 0); - ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE; - LIBCFS_ALLOC(ksocknal_data.ksnd_peers, - sizeof(struct list_head) * - ksocknal_data.ksnd_peer_hash_size); - if (ksocknal_data.ksnd_peers == NULL) - return -ENOMEM; + memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */ - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) - INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]); + hash_init(ksocknal_data.ksnd_peers); rwlock_init(&ksocknal_data.ksnd_global_lock); INIT_LIST_HEAD(&ksocknal_data.ksnd_nets); @@ -2459,9 +2425,7 @@ ksocknal_debug_peerhash(struct lnet_ni *ni) read_lock(&ksocknal_data.ksnd_global_lock); - for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each_entry(peer_ni, &ksocknal_data.ksnd_peers[i], - ksnp_list) { + hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) { struct ksock_route *route; struct ksock_conn *conn; @@ -2491,10 +2455,9 @@ ksocknal_debug_peerhash(struct lnet_ni *ni) atomic_read(&conn->ksnc_sock_refcount), conn->ksnc_type, conn->ksnc_closing); } - goto done; - } + break; } -done: + read_unlock(&ksocknal_data.ksnd_global_lock); } diff --git a/lnet/klnds/socklnd/socklnd.h b/lnet/klnds/socklnd/socklnd.h index 58ed39d..83582da 100644 --- a/lnet/klnds/socklnd/socklnd.h +++ b/lnet/klnds/socklnd/socklnd.h @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -62,15 +63,15 @@ #define SOCKNAL_NSCHEDS 3 #define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1) -#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer_ni lists */ -#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ -#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ -#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */ +#define SOCKNAL_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */ +#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ +#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ +#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */ -#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */ -#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */ +#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */ +#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */ -#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */ +#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */ /* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled). * no risk if we're not running on a CONFIG_HIGHMEM platform. */ @@ -178,8 +179,7 @@ struct ksock_nal_data { /* stabilize peer_ni/conn ops */ rwlock_t ksnd_global_lock; /* hash table of all my known peers */ - struct list_head *ksnd_peers; - int ksnd_peer_hash_size; /* size of ksnd_peers */ + DECLARE_HASHTABLE(ksnd_peers, SOCKNAL_PEER_HASH_BITS); int ksnd_nthreads; /* # live threads */ int ksnd_shuttingdown; /* tell threads to exit */ @@ -383,26 +383,26 @@ struct ksock_route { #define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */ struct ksock_peer_ni { - struct list_head ksnp_list; /* stash on global peer_ni list */ + struct hlist_node ksnp_list; /* stash on global peer_ni list */ time64_t ksnp_last_alive;/* when (in seconds) I was last alive */ struct lnet_process_id ksnp_id; /* who's on the other end(s) */ - atomic_t ksnp_refcount; /* # users */ - int ksnp_closing; /* being closed */ - int ksnp_accepting;/* # passive connections pending */ - int ksnp_error; /* errno on closing last conn */ - __u64 ksnp_zc_next_cookie;/* ZC completion cookie */ - __u64 ksnp_incarnation; /* latest known peer_ni incarnation */ - struct ksock_proto *ksnp_proto; /* latest known peer_ni protocol */ + atomic_t ksnp_refcount; /* # users */ + int ksnp_closing; /* being closed */ + int ksnp_accepting; /* # passive connections pending */ + int ksnp_error; /* errno on closing last conn */ + __u64 ksnp_zc_next_cookie;/* ZC completion cookie */ + __u64 ksnp_incarnation; /* latest known peer_ni incarnation */ + struct ksock_proto *ksnp_proto; /* latest known peer_ni protocol */ struct list_head ksnp_conns; /* all active connections */ struct list_head ksnp_routes; /* routes */ struct list_head ksnp_tx_queue; /* waiting packets */ - spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ + spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ /* zero copy requests wait for ACK */ struct list_head ksnp_zc_req_list; time64_t ksnp_send_keepalive; /* time to send keepalive */ - struct lnet_ni *ksnp_ni; /* which network */ - int ksnp_n_passive_ips; /* # of... */ - __u32 ksnp_passive_ips[LNET_INTERFACES_NUM]; /* preferred local interfaces */ + struct lnet_ni *ksnp_ni; /* which network */ + int ksnp_n_passive_ips; /* # of... */ + __u32 ksnp_passive_ips[LNET_INTERFACES_NUM]; /* preferred local interfaces */ }; struct ksock_connreq { @@ -473,14 +473,6 @@ ksocknal_route_mask(void) (1 << SOCKLND_CONN_BULK_OUT)); } -static inline struct list_head * -ksocknal_nid2peerlist (lnet_nid_t nid) -{ - unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size; - - return (&ksocknal_data.ksnd_peers [hash]); -} - static inline void ksocknal_conn_addref(struct ksock_conn *conn) { diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 68fdb1d..390185d 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -2502,62 +2502,65 @@ __must_hold(&ksocknal_data.ksnd_global_lock) static void ksocknal_check_peer_timeouts(int idx) { - struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; + struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx]; struct ksock_peer_ni *peer_ni; struct ksock_conn *conn; struct ksock_tx *tx; again: - /* NB. We expect to have a look at all the peers and not find any - * connections to time out, so we just use a shared lock while we - * take a look... */ + /* NB. We expect to have a look at all the peers and not find any + * connections to time out, so we just use a shared lock while we + * take a look... + */ read_lock(&ksocknal_data.ksnd_global_lock); - list_for_each_entry(peer_ni, peers, ksnp_list) { + hlist_for_each_entry(peer_ni, peers, ksnp_list) { struct ksock_tx *tx_stale; time64_t deadline = 0; int resid = 0; int n = 0; - if (ksocknal_send_keepalive_locked(peer_ni) != 0) { + if (ksocknal_send_keepalive_locked(peer_ni) != 0) { read_unlock(&ksocknal_data.ksnd_global_lock); - goto again; - } + goto again; + } - conn = ksocknal_find_timed_out_conn (peer_ni); + conn = ksocknal_find_timed_out_conn(peer_ni); - if (conn != NULL) { + if (conn != NULL) { read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); + ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT); - /* NB we won't find this one again, but we can't - * just proceed with the next peer_ni, since we dropped - * ksnd_global_lock and it might be dead already! */ - ksocknal_conn_decref(conn); - goto again; - } + /* NB we won't find this one again, but we can't + * just proceed with the next peer_ni, since we dropped + * ksnd_global_lock and it might be dead already! + */ + ksocknal_conn_decref(conn); + goto again; + } - /* we can't process stale txs right here because we're - * holding only shared lock */ + /* we can't process stale txs right here because we're + * holding only shared lock + */ if (!list_empty(&peer_ni->ksnp_tx_queue)) { struct ksock_tx *tx; tx = list_entry(peer_ni->ksnp_tx_queue.next, struct ksock_tx, tx_list); if (ktime_get_seconds() >= tx->tx_deadline) { - ksocknal_peer_addref(peer_ni); + ksocknal_peer_addref(peer_ni); read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_flush_stale_txs(peer_ni); + ksocknal_flush_stale_txs(peer_ni); - ksocknal_peer_decref(peer_ni); - goto again; - } - } + ksocknal_peer_decref(peer_ni); + goto again; + } + } if (list_empty(&peer_ni->ksnp_zc_req_list)) - continue; + continue; tx_stale = NULL; spin_lock(&peer_ni->ksnp_lock); @@ -2676,19 +2679,20 @@ int ksocknal_reaper(void *arg) nenomem_conns++; } - /* careful with the jiffy wrap... */ + /* careful with the jiffy wrap... */ while ((timeout = deadline - ktime_get_seconds()) <= 0) { - const int n = 4; - const int p = 1; - int chunk = ksocknal_data.ksnd_peer_hash_size; + const int n = 4; + const int p = 1; + int chunk = HASH_SIZE(ksocknal_data.ksnd_peers); unsigned int lnd_timeout; - /* Time to check for timeouts on a few more peers: I do - * checks every 'p' seconds on a proportion of the peer_ni - * table and I need to check every connection 'n' times - * within a timeout interval, to ensure I detect a - * timeout on any connection within (n+1)/n times the - * timeout interval. */ + /* Time to check for timeouts on a few more peers: I + * do checks every 'p' seconds on a proportion of the + * peer_ni table and I need to check every connection + * 'n' times within a timeout interval, to ensure I + * detect a timeout on any connection within (n+1)/n + * times the timeout interval. + */ lnd_timeout = lnet_get_lnd_timeout(); if (lnd_timeout > n * p) @@ -2696,14 +2700,14 @@ int ksocknal_reaper(void *arg) if (chunk == 0) chunk = 1; - for (i = 0; i < chunk; i++) { - ksocknal_check_peer_timeouts (peer_index); - peer_index = (peer_index + 1) % - ksocknal_data.ksnd_peer_hash_size; - } + for (i = 0; i < chunk; i++) { + ksocknal_check_peer_timeouts(peer_index); + peer_index = (peer_index + 1) % + HASH_SIZE(ksocknal_data.ksnd_peers); + } deadline += p; - } + } if (nenomem_conns != 0) { /* Reduce my timeout if I rescheduled ENOMEM conns.