* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <libcfs/linux/linux-mem.h>
#include "socklnd.h"
struct ksock_tx *
ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
struct kvec *scratch_iov)
{
- lnet_kiov_t *kiov = tx->tx_kiov;
+ struct bio_vec *kiov = tx->tx_kiov;
int nob;
int rc;
do {
LASSERT(tx->tx_nkiov > 0);
- if (nob < (int)kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
+ if (nob < (int)kiov->bv_len) {
+ kiov->bv_offset += nob;
+ kiov->bv_len -= nob;
return rc;
}
- nob -= (int)kiov->kiov_len;
+ nob -= (int)kiov->bv_len;
tx->tx_kiov = ++kiov;
tx->tx_nkiov--;
} while (nob != 0);
int rc;
int bufnob;
- if (ksocknal_data.ksnd_stall_tx != 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
- }
+ if (ksocknal_data.ksnd_stall_tx != 0)
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
LASSERT(tx->tx_resid != 0);
ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
struct kvec *scratch_iov)
{
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ struct bio_vec *kiov = conn->ksnc_rx_kiov;
int nob;
int rc;
LASSERT(conn->ksnc_rx_nkiov > 0);
do {
LASSERT(conn->ksnc_rx_nkiov > 0);
- if (nob < (int) kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
+ if (nob < (int) kiov->bv_len) {
+ kiov->bv_offset += nob;
+ kiov->bv_len -= nob;
return -EAGAIN;
}
- nob -= kiov->kiov_len;
+ nob -= kiov->bv_len;
conn->ksnc_rx_kiov = ++kiov;
conn->ksnc_rx_nkiov--;
} while (nob != 0);
int rc;
ENTRY;
- if (ksocknal_data.ksnd_stall_rx != 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
- }
+ if (ksocknal_data.ksnd_stall_rx != 0)
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
int
ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
{
- int mpflag = 1;
+ /* '1' for consistency with code that checks !mpflag to restore */
+ unsigned int mpflag = 1;
int type = lntmsg->msg_type;
struct lnet_process_id target = lntmsg->msg_target;
- unsigned int payload_niov = lntmsg->msg_niov;
- struct kvec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
+ unsigned int payload_niov = lntmsg->msg_niov;
+ struct bio_vec *payload_kiov = lntmsg->msg_kiov;
+ unsigned int payload_offset = lntmsg->msg_offset;
+ unsigned int payload_nob = lntmsg->msg_len;
struct ksock_tx *tx;
- int desc_size;
- int rc;
+ int desc_size;
+ int rc;
/* NB 'private' is different depending on what we're sending.
* Just ignore it... */
LASSERT (payload_nob == 0 || payload_niov > 0);
LASSERT (payload_niov <= LNET_MAX_IOV);
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
LASSERT (!in_interrupt ());
- if (payload_iov != NULL)
- desc_size = offsetof(struct ksock_tx,
- tx_frags.virt.iov[1 + payload_niov]);
- else
- desc_size = offsetof(struct ksock_tx,
- tx_frags.paged.kiov[payload_niov]);
+ desc_size = offsetof(struct ksock_tx,
+ tx_frags.paged.kiov[payload_niov]);
if (lntmsg->msg_vmflush)
- mpflag = cfs_memory_pressure_get_and_set();
- tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
- if (tx == NULL) {
- CERROR("Can't allocate tx desc type %d size %d\n",
- type, desc_size);
- if (lntmsg->msg_vmflush)
- cfs_memory_pressure_restore(mpflag);
- return (-ENOMEM);
- }
+ mpflag = memalloc_noreclaim_save();
- tx->tx_conn = NULL; /* set when assigned a conn */
- tx->tx_lnetmsg = lntmsg;
-
- if (payload_iov != NULL) {
- tx->tx_kiov = NULL;
- tx->tx_nkiov = 0;
- tx->tx_iov = tx->tx_frags.virt.iov;
- tx->tx_niov = 1 +
- lnet_extract_iov(payload_niov, &tx->tx_iov[1],
- payload_niov, payload_iov,
- payload_offset, payload_nob);
- } else {
- tx->tx_niov = 1;
- tx->tx_iov = &tx->tx_frags.paged.iov;
- tx->tx_kiov = tx->tx_frags.paged.kiov;
- tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
-
- if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
- tx->tx_zc_capable = 1;
- }
+ tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
+ if (tx == NULL) {
+ CERROR("Can't allocate tx desc type %d size %d\n",
+ type, desc_size);
+ if (lntmsg->msg_vmflush)
+ memalloc_noreclaim_restore(mpflag);
+ return -ENOMEM;
+ }
+
+ tx->tx_conn = NULL; /* set when assigned a conn */
+ tx->tx_lnetmsg = lntmsg;
+
+ tx->tx_niov = 1;
+ tx->tx_iov = &tx->tx_frags.paged.iov;
+ tx->tx_kiov = tx->tx_frags.paged.kiov;
+ tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+
+ if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
+ tx->tx_zc_capable = 1;
tx->tx_msg.ksm_csum = 0;
tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
tx->tx_msg.ksm_zc_cookies[0] = 0;
tx->tx_msg.ksm_zc_cookies[1] = 0;
- /* The first fragment will be set later in pro_pack */
- rc = ksocknal_launch_packet(ni, tx, target);
- if (!mpflag)
- cfs_memory_pressure_restore(mpflag);
+ /* The first fragment will be set later in pro_pack */
+ rc = ksocknal_launch_packet(ni, tx, target);
+ /*
+ * We can't test lntsmg->msg_vmflush again as lntmsg may
+ * have been freed.
+ */
+ if (!mpflag)
+ memalloc_noreclaim_restore(mpflag);
if (rc == 0)
return (0);
ksocknal_thread_fini (void)
{
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads--;
+ if (--ksocknal_data.ksnd_nthreads == 0)
+ wake_up_var(&ksocknal_data.ksnd_nthreads);
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
/* Set up to skip as much as possible now. If there's more left
* (ran out of iov entries) we'll get called again */
- conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
- conn->ksnc_rx_nob_left = nob_to_skip;
+ conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
+ conn->ksnc_rx_nob_left = nob_to_skip;
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- skipped = 0;
- niov = 0;
+ skipped = 0;
+ niov = 0;
- do {
- nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
+ do {
+ nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
- conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
- conn->ksnc_rx_iov[niov].iov_len = nob;
- niov++;
- skipped += nob;
- nob_to_skip -=nob;
+ conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
+ conn->ksnc_rx_iov[niov].iov_len = nob;
+ niov++;
+ skipped += nob;
+ nob_to_skip -= nob;
- } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
+ } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
conn->ksnc_rx_niov = niov;
int
ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
- int delayed, unsigned int niov, struct kvec *iov,
- lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
+ int delayed, unsigned int niov,
+ struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen)
{
struct ksock_conn *conn = private;
conn->ksnc_rx_nob_wanted = mlen;
conn->ksnc_rx_nob_left = rlen;
- if (mlen == 0 || iov != NULL) {
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
- conn->ksnc_rx_niov =
- lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
- niov, iov, offset, mlen);
- } else {
- conn->ksnc_rx_niov = 0;
- conn->ksnc_rx_iov = NULL;
- conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
- conn->ksnc_rx_nkiov =
- lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
- niov, kiov, offset, mlen);
- }
+ if (mlen == 0) {
+ conn->ksnc_rx_nkiov = 0;
+ conn->ksnc_rx_kiov = NULL;
+ conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
+ conn->ksnc_rx_niov = 0;
+ } else {
+ conn->ksnc_rx_niov = 0;
+ conn->ksnc_rx_iov = NULL;
+ conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
+ conn->ksnc_rx_nkiov =
+ lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
+ niov, kiov, offset, mlen);
+ }
LASSERT (mlen ==
lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
return -ENOMEM;
}
- cfs_block_allsigs();
-
rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
if (rc != 0) {
CWARN("Can't set CPU partition affinity to %d: %d\n",
}
if (!list_empty(&sched->kss_tx_conns)) {
- struct list_head zlist = LIST_HEAD_INIT(zlist);
+ LIST_HEAD(zlist);
- if (!list_empty(&sched->kss_zombie_noop_txs)) {
- list_add(&zlist,
- &sched->kss_zombie_noop_txs);
- list_del_init(&sched->kss_zombie_noop_txs);
- }
+ list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
conn = list_entry(sched->kss_tx_conns.next,
struct ksock_conn, ksnc_tx_list);
}
spin_unlock_bh(&sched->kss_lock);
- LIBCFS_FREE(rx_scratch_pgs, sizeof(*rx_scratch_pgs) *
- LNET_MAX_IOV);
- LIBCFS_FREE(scratch_iov, sizeof(*scratch_iov) *
- LNET_MAX_IOV);
+ CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
+ CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
ksocknal_thread_fini();
return 0;
}
EXIT;
}
-static struct ksock_proto *
-ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
+static const struct ksock_proto *
+ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
{
__u32 version = 0;
if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
struct lnet_magicversion *hmv;
- CLASSERT(sizeof(struct lnet_magicversion) ==
+ BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
offsetof(struct ksock_hello_msg, kshm_src_nid));
hmv = (struct lnet_magicversion *)hello;
int timeout;
int proto_match;
int rc;
- struct ksock_proto *proto;
+ const struct ksock_proto *proto;
struct lnet_process_id recv_id;
/* socket type set on active connections - not set on passive */
*incarnation = hello->kshm_src_incarnation;
- if (hello->kshm_src_nid == LNET_NID_ANY) {
- CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
- "from %pI4h\n", &conn->ksnc_ipaddr);
- return -EPROTO;
- }
+ if (hello->kshm_src_nid == LNET_NID_ANY) {
+ CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
+ &conn->ksnc_ipaddr);
+ return -EPROTO;
+ }
if (!active &&
conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
static int
ksocknal_connect(struct ksock_route *route)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
+ LIST_HEAD(zombies);
struct ksock_peer_ni *peer_ni = route->ksnr_peer;
int type;
int wanted;
route->ksnr_connecting = 1;
- for (;;) {
- wanted = ksocknal_route_mask() & ~route->ksnr_connected;
+ for (;;) {
+ wanted = ksocknal_route_mask() & ~route->ksnr_connected;
- /* stop connecting if peer_ni/route got closed under me, or
- * route got connected while queued */
- if (peer_ni->ksnp_closing || route->ksnr_deleted ||
- wanted == 0) {
- retry_later = 0;
- break;
- }
+ /* stop connecting if peer_ni/route got closed under me, or
+ * route got connected while queued */
+ if (peer_ni->ksnp_closing || route->ksnr_deleted ||
+ wanted == 0) {
+ retry_later = 0;
+ break;
+ }
- /* reschedule if peer_ni is connecting to me */
- if (peer_ni->ksnp_accepting > 0) {
- CDEBUG(D_NET,
- "peer_ni %s(%d) already connecting to me, retry later.\n",
- libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
- retry_later = 1;
- }
+ /* reschedule if peer_ni is connecting to me */
+ if (peer_ni->ksnp_accepting > 0) {
+ CDEBUG(D_NET,
+ "peer_ni %s(%d) already connecting to me, retry later.\n",
+ libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
+ retry_later = 1;
+ }
- if (retry_later) /* needs reschedule */
- break;
+ if (retry_later) /* needs reschedule */
+ break;
- if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
- type = SOCKLND_CONN_ANY;
- } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
- type = SOCKLND_CONN_CONTROL;
- } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
- type = SOCKLND_CONN_BULK_IN;
- } else {
- LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
- type = SOCKLND_CONN_BULK_OUT;
- }
+ if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) {
+ type = SOCKLND_CONN_ANY;
+ } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
+ type = SOCKLND_CONN_CONTROL;
+ } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0) {
+ type = SOCKLND_CONN_BULK_IN;
+ } else {
+ LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
+ type = SOCKLND_CONN_BULK_OUT;
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (ktime_get_seconds() >= deadline) {
- rc = -ETIMEDOUT;
- lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
+ rc = -ETIMEDOUT;
+ lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
+ route->ksnr_ipaddr,
+ route->ksnr_port);
+ goto failed;
+ }
- rc = lnet_connect(&sock, peer_ni->ksnp_id.nid,
- route->ksnr_myipaddr,
- route->ksnr_ipaddr, route->ksnr_port,
- peer_ni->ksnp_ni->ni_net_ns);
- if (rc != 0)
+ sock = lnet_connect(peer_ni->ksnp_id.nid,
+ route->ksnr_myiface,
+ route->ksnr_ipaddr, route->ksnr_port,
+ peer_ni->ksnp_ni->ni_net_ns);
+ if (IS_ERR(sock)) {
+ rc = PTR_ERR(sock);
goto failed;
+ }
- rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
- if (rc < 0) {
- lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
+ rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
+ if (rc < 0) {
+ lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
+ route->ksnr_ipaddr,
+ route->ksnr_port);
+ goto failed;
+ }
- /* A +ve RC means I have to retry because I lost the connection
- * race or I have to renegotiate protocol version */
- retry_later = (rc != 0);
- if (retry_later)
- CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
- libcfs_nid2str(peer_ni->ksnp_id.nid));
+ /* A +ve RC means I have to retry because I lost the connection
+ * race or I have to renegotiate protocol version */
+ retry_later = (rc != 0);
+ if (retry_later)
+ CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
+ libcfs_nid2str(peer_ni->ksnp_id.nid));
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- }
+ }
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
int nloops = 0;
int cons_retry = 0;
- cfs_block_allsigs();
-
init_waitqueue_entry(&wait, current);
spin_lock_bh(connd_lock);
nloops = 0;
schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_lock_bh(connd_lock);
}
ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
{
struct ksock_tx *tx;
- struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
+ LIST_HEAD(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &stale_txs);
+ list_move_tail(&tx->tx_list, &stale_txs);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
static void
ksocknal_check_peer_timeouts(int idx)
{
- struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+ struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
struct ksock_peer_ni *peer_ni;
struct ksock_conn *conn;
struct ksock_tx *tx;
again:
- /* NB. We expect to have a look at all the peers and not find any
- * connections to time out, so we just use a shared lock while we
- * take a look... */
+ /* NB. We expect to have a look at all the peers and not find any
+ * connections to time out, so we just use a shared lock while we
+ * take a look...
+ */
read_lock(&ksocknal_data.ksnd_global_lock);
- list_for_each_entry(peer_ni, peers, ksnp_list) {
+ hlist_for_each_entry(peer_ni, peers, ksnp_list) {
struct ksock_tx *tx_stale;
time64_t deadline = 0;
int resid = 0;
int n = 0;
- if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
+ if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
read_unlock(&ksocknal_data.ksnd_global_lock);
- goto again;
- }
+ goto again;
+ }
- conn = ksocknal_find_timed_out_conn (peer_ni);
+ conn = ksocknal_find_timed_out_conn(peer_ni);
- if (conn != NULL) {
+ if (conn != NULL) {
read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+ ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
- /* NB we won't find this one again, but we can't
- * just proceed with the next peer_ni, since we dropped
- * ksnd_global_lock and it might be dead already! */
- ksocknal_conn_decref(conn);
- goto again;
- }
+ /* NB we won't find this one again, but we can't
+ * just proceed with the next peer_ni, since we dropped
+ * ksnd_global_lock and it might be dead already!
+ */
+ ksocknal_conn_decref(conn);
+ goto again;
+ }
- /* we can't process stale txs right here because we're
- * holding only shared lock */
+ /* we can't process stale txs right here because we're
+ * holding only shared lock
+ */
if (!list_empty(&peer_ni->ksnp_tx_queue)) {
struct ksock_tx *tx;
tx = list_entry(peer_ni->ksnp_tx_queue.next,
struct ksock_tx, tx_list);
if (ktime_get_seconds() >= tx->tx_deadline) {
- ksocknal_peer_addref(peer_ni);
+ ksocknal_peer_addref(peer_ni);
read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_flush_stale_txs(peer_ni);
+ ksocknal_flush_stale_txs(peer_ni);
- ksocknal_peer_decref(peer_ni);
- goto again;
- }
- }
+ ksocknal_peer_decref(peer_ni);
+ goto again;
+ }
+ }
if (list_empty(&peer_ni->ksnp_zc_req_list))
- continue;
+ continue;
tx_stale = NULL;
spin_lock(&peer_ni->ksnp_lock);
wait_queue_entry_t wait;
struct ksock_conn *conn;
struct ksock_sched *sched;
- struct list_head enomem_conns;
+ LIST_HEAD(enomem_conns);
int nenomem_conns;
time64_t timeout;
int i;
int peer_index = 0;
time64_t deadline = ktime_get_seconds();
- cfs_block_allsigs ();
-
- INIT_LIST_HEAD(&enomem_conns);
init_waitqueue_entry(&wait, current);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
ksocknal_destroy_conn(conn);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- continue;
- }
+ continue;
+ }
- if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
- list_add(&enomem_conns,
- &ksocknal_data.ksnd_enomem_conns);
- list_del_init(&ksocknal_data.ksnd_enomem_conns);
- }
+ list_splice_init(&ksocknal_data.ksnd_enomem_conns,
+ &enomem_conns);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
nenomem_conns++;
}
- /* careful with the jiffy wrap... */
+ /* careful with the jiffy wrap... */
while ((timeout = deadline - ktime_get_seconds()) <= 0) {
- const int n = 4;
- const int p = 1;
- int chunk = ksocknal_data.ksnd_peer_hash_size;
+ const int n = 4;
+ const int p = 1;
+ int chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
unsigned int lnd_timeout;
- /* Time to check for timeouts on a few more peers: I do
- * checks every 'p' seconds on a proportion of the peer_ni
- * table and I need to check every connection 'n' times
- * within a timeout interval, to ensure I detect a
- * timeout on any connection within (n+1)/n times the
- * timeout interval. */
+ /* Time to check for timeouts on a few more peers: I
+ * do checks every 'p' seconds on a proportion of the
+ * peer_ni table and I need to check every connection
+ * 'n' times within a timeout interval, to ensure I
+ * detect a timeout on any connection within (n+1)/n
+ * times the timeout interval.
+ */
lnd_timeout = lnet_get_lnd_timeout();
if (lnd_timeout > n * p)
if (chunk == 0)
chunk = 1;
- for (i = 0; i < chunk; i++) {
- ksocknal_check_peer_timeouts (peer_index);
- peer_index = (peer_index + 1) %
- ksocknal_data.ksnd_peer_hash_size;
- }
+ for (i = 0; i < chunk; i++) {
+ ksocknal_check_peer_timeouts(peer_index);
+ peer_index = (peer_index + 1) %
+ HASH_SIZE(ksocknal_data.ksnd_peers);
+ }
deadline += p;
- }
+ }
if (nenomem_conns != 0) {
/* Reduce my timeout if I rescheduled ENOMEM conns.