Whamcloud - gitweb
LU-13004 lnet: remove lnet_extract_iov()
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd_cb.c
index 2cda73c..c878b2b 100644 (file)
@@ -24,6 +24,7 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <libcfs/linux/linux-mem.h>
 #include "socklnd.h"
 
 struct ksock_tx *
@@ -152,7 +153,7 @@ static int
 ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
                   struct kvec *scratch_iov)
 {
-       lnet_kiov_t *kiov = tx->tx_kiov;
+       struct bio_vec *kiov = tx->tx_kiov;
        int nob;
        int rc;
 
@@ -173,13 +174,13 @@ ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
        do {
                LASSERT(tx->tx_nkiov > 0);
 
-               if (nob < (int)kiov->kiov_len) {
-                       kiov->kiov_offset += nob;
-                       kiov->kiov_len -= nob;
+               if (nob < (int)kiov->bv_len) {
+                       kiov->bv_offset += nob;
+                       kiov->bv_len -= nob;
                        return rc;
                }
 
-               nob -= (int)kiov->kiov_len;
+               nob -= (int)kiov->bv_len;
                tx->tx_kiov = ++kiov;
                tx->tx_nkiov--;
        } while (nob != 0);
@@ -194,10 +195,9 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
        int     rc;
        int     bufnob;
 
-       if (ksocknal_data.ksnd_stall_tx != 0) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
-       }
+       if (ksocknal_data.ksnd_stall_tx != 0)
+               schedule_timeout_uninterruptible(
+                       cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
 
        LASSERT(tx->tx_resid != 0);
 
@@ -303,7 +303,7 @@ static int
 ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
                   struct kvec *scratch_iov)
 {
-       lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+       struct bio_vec *kiov = conn->ksnc_rx_kiov;
        int nob;
        int rc;
        LASSERT(conn->ksnc_rx_nkiov > 0);
@@ -330,13 +330,13 @@ ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
        do {
                LASSERT(conn->ksnc_rx_nkiov > 0);
 
-               if (nob < (int) kiov->kiov_len) {
-                       kiov->kiov_offset += nob;
-                       kiov->kiov_len -= nob;
+               if (nob < (int) kiov->bv_len) {
+                       kiov->bv_offset += nob;
+                       kiov->bv_len -= nob;
                        return -EAGAIN;
                }
 
-               nob -= kiov->kiov_len;
+               nob -= kiov->bv_len;
                conn->ksnc_rx_kiov = ++kiov;
                conn->ksnc_rx_nkiov--;
        } while (nob != 0);
@@ -354,10 +354,9 @@ ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs,
        int     rc;
        ENTRY;
 
-       if (ksocknal_data.ksnd_stall_rx != 0) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
-       }
+       if (ksocknal_data.ksnd_stall_rx != 0)
+               schedule_timeout_uninterruptible(
+                       cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
 
        rc = ksocknal_connsock_addref(conn);
        if (rc != 0) {
@@ -984,17 +983,17 @@ ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
 int
 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
 {
-       int mpflag = 1;
+       /* '1' for consistency with code that checks !mpflag to restore */
+       unsigned int mpflag = 1;
        int type = lntmsg->msg_type;
        struct lnet_process_id target = lntmsg->msg_target;
-        unsigned int      payload_niov = lntmsg->msg_niov;
-       struct kvec *payload_iov = lntmsg->msg_iov;
-        lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
-        unsigned int      payload_offset = lntmsg->msg_offset;
-        unsigned int      payload_nob = lntmsg->msg_len;
+       unsigned int payload_niov = lntmsg->msg_niov;
+       struct bio_vec *payload_kiov = lntmsg->msg_kiov;
+       unsigned int payload_offset = lntmsg->msg_offset;
+       unsigned int payload_nob = lntmsg->msg_len;
        struct ksock_tx *tx;
-        int               desc_size;
-        int               rc;
+       int desc_size;
+       int rc;
 
         /* NB 'private' is different depending on what we're sending.
          * Just ignore it... */
@@ -1004,60 +1003,49 @@ ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
 
        LASSERT (payload_nob == 0 || payload_niov > 0);
        LASSERT (payload_niov <= LNET_MAX_IOV);
-       /* payload is either all vaddrs or all pages */
-       LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
        LASSERT (!in_interrupt ());
 
-       if (payload_iov != NULL)
-               desc_size = offsetof(struct ksock_tx,
-                                    tx_frags.virt.iov[1 + payload_niov]);
-       else
-               desc_size = offsetof(struct ksock_tx,
-                                    tx_frags.paged.kiov[payload_niov]);
+       desc_size = offsetof(struct ksock_tx,
+                            tx_frags.paged.kiov[payload_niov]);
 
         if (lntmsg->msg_vmflush)
-                mpflag = cfs_memory_pressure_get_and_set();
-        tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
-        if (tx == NULL) {
-                CERROR("Can't allocate tx desc type %d size %d\n",
-                       type, desc_size);
-                if (lntmsg->msg_vmflush)
-                        cfs_memory_pressure_restore(mpflag);
-                return (-ENOMEM);
-        }
+               mpflag = memalloc_noreclaim_save();
 
-        tx->tx_conn = NULL;                     /* set when assigned a conn */
-        tx->tx_lnetmsg = lntmsg;
-
-        if (payload_iov != NULL) {
-                tx->tx_kiov = NULL;
-                tx->tx_nkiov = 0;
-                tx->tx_iov = tx->tx_frags.virt.iov;
-                tx->tx_niov = 1 +
-                              lnet_extract_iov(payload_niov, &tx->tx_iov[1],
-                                               payload_niov, payload_iov,
-                                               payload_offset, payload_nob);
-        } else {
-                tx->tx_niov = 1;
-                tx->tx_iov = &tx->tx_frags.paged.iov;
-                tx->tx_kiov = tx->tx_frags.paged.kiov;
-                tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
-                                                 payload_niov, payload_kiov,
-                                                 payload_offset, payload_nob);
-
-                if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
-                        tx->tx_zc_capable = 1;
-        }
+       tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
+       if (tx == NULL) {
+               CERROR("Can't allocate tx desc type %d size %d\n",
+                      type, desc_size);
+               if (lntmsg->msg_vmflush)
+                       memalloc_noreclaim_restore(mpflag);
+               return -ENOMEM;
+       }
+
+       tx->tx_conn = NULL;                     /* set when assigned a conn */
+       tx->tx_lnetmsg = lntmsg;
+
+       tx->tx_niov = 1;
+       tx->tx_iov = &tx->tx_frags.paged.iov;
+       tx->tx_kiov = tx->tx_frags.paged.kiov;
+       tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
+                                        payload_niov, payload_kiov,
+                                        payload_offset, payload_nob);
+
+       if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
+               tx->tx_zc_capable = 1;
 
        tx->tx_msg.ksm_csum = 0;
        tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
        tx->tx_msg.ksm_zc_cookies[0] = 0;
        tx->tx_msg.ksm_zc_cookies[1] = 0;
 
-        /* The first fragment will be set later in pro_pack */
-        rc = ksocknal_launch_packet(ni, tx, target);
-        if (!mpflag)
-                cfs_memory_pressure_restore(mpflag);
+       /* The first fragment will be set later in pro_pack */
+       rc = ksocknal_launch_packet(ni, tx, target);
+       /*
+        * We can't test lntsmg->msg_vmflush again as lntmsg may
+        * have been freed.
+        */
+       if (!mpflag)
+               memalloc_noreclaim_restore(mpflag);
 
         if (rc == 0)
                 return (0);
@@ -1085,7 +1073,8 @@ void
 ksocknal_thread_fini (void)
 {
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
-        ksocknal_data.ksnd_nthreads--;
+       if (--ksocknal_data.ksnd_nthreads == 0)
+               wake_up_var(&ksocknal_data.ksnd_nthreads);
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 }
 
@@ -1145,22 +1134,22 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
         /* Set up to skip as much as possible now.  If there's more left
          * (ran out of iov entries) we'll get called again */
 
-        conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
-        conn->ksnc_rx_nob_left = nob_to_skip;
+       conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
+       conn->ksnc_rx_nob_left = nob_to_skip;
        conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
-        skipped = 0;
-        niov = 0;
+       skipped = 0;
+       niov = 0;
 
-        do {
-                nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
+       do {
+               nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
 
-                conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
-                conn->ksnc_rx_iov[niov].iov_len  = nob;
-                niov++;
-                skipped += nob;
-                nob_to_skip -=nob;
+               conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
+               conn->ksnc_rx_iov[niov].iov_len  = nob;
+               niov++;
+               skipped += nob;
+               nob_to_skip -= nob;
 
-        } while (nob_to_skip != 0 &&    /* mustn't overflow conn's rx iov */
+       } while (nob_to_skip != 0 &&    /* mustn't overflow conn's rx iov */
                 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
 
         conn->ksnc_rx_niov = niov;
@@ -1385,8 +1374,8 @@ ksocknal_process_receive(struct ksock_conn *conn,
 
 int
 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
-             int delayed, unsigned int niov, struct kvec *iov,
-             lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
+             int delayed, unsigned int niov,
+             struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
              unsigned int rlen)
 {
        struct ksock_conn *conn = private;
@@ -1399,21 +1388,19 @@ ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
        conn->ksnc_rx_nob_wanted = mlen;
        conn->ksnc_rx_nob_left   = rlen;
 
-        if (mlen == 0 || iov != NULL) {
-                conn->ksnc_rx_nkiov = 0;
-                conn->ksnc_rx_kiov = NULL;
-                conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
-                conn->ksnc_rx_niov =
-                        lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
-                                         niov, iov, offset, mlen);
-        } else {
-                conn->ksnc_rx_niov = 0;
-                conn->ksnc_rx_iov  = NULL;
-                conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
-                conn->ksnc_rx_nkiov =
-                        lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
-                                          niov, kiov, offset, mlen);
-        }
+       if (mlen == 0) {
+               conn->ksnc_rx_nkiov = 0;
+               conn->ksnc_rx_kiov = NULL;
+               conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
+               conn->ksnc_rx_niov = 0;
+       } else {
+               conn->ksnc_rx_niov = 0;
+               conn->ksnc_rx_iov  = NULL;
+               conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
+               conn->ksnc_rx_nkiov =
+                       lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
+                                         niov, kiov, offset, mlen);
+       }
 
         LASSERT (mlen ==
                  lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
@@ -1484,8 +1471,6 @@ int ksocknal_scheduler(void *arg)
                return -ENOMEM;
        }
 
-       cfs_block_allsigs();
-
        rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
        if (rc != 0) {
                CWARN("Can't set CPU partition affinity to %d: %d\n",
@@ -1547,11 +1532,7 @@ int ksocknal_scheduler(void *arg)
                if (!list_empty(&sched->kss_tx_conns)) {
                        LIST_HEAD(zlist);
 
-                       if (!list_empty(&sched->kss_zombie_noop_txs)) {
-                               list_add(&zlist,
-                                        &sched->kss_zombie_noop_txs);
-                               list_del_init(&sched->kss_zombie_noop_txs);
-                       }
+                       list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
 
                        conn = list_entry(sched->kss_tx_conns.next,
                                          struct ksock_conn, ksnc_tx_list);
@@ -1635,10 +1616,8 @@ int ksocknal_scheduler(void *arg)
        }
 
        spin_unlock_bh(&sched->kss_lock);
-       LIBCFS_FREE(rx_scratch_pgs, sizeof(*rx_scratch_pgs) *
-                   LNET_MAX_IOV);
-       LIBCFS_FREE(scratch_iov, sizeof(*scratch_iov) *
-                   LNET_MAX_IOV);
+       CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
+       CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
        ksocknal_thread_fini();
        return 0;
 }
@@ -1702,8 +1681,8 @@ void ksocknal_write_callback(struct ksock_conn *conn)
        EXIT;
 }
 
-static struct ksock_proto *
-ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
+static const struct ksock_proto *
+ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
 {
         __u32   version = 0;
 
@@ -1801,7 +1780,7 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
         int                  timeout;
         int                  proto_match;
         int                  rc;
-       struct ksock_proto *proto;
+       const struct ksock_proto *proto;
        struct lnet_process_id recv_id;
 
        /* socket type set on active connections - not set on passive */
@@ -1873,11 +1852,11 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
 
         *incarnation = hello->kshm_src_incarnation;
 
-        if (hello->kshm_src_nid == LNET_NID_ANY) {
-                CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
-                      "from %pI4h\n", &conn->ksnc_ipaddr);
-                return -EPROTO;
-        }
+       if (hello->kshm_src_nid == LNET_NID_ANY) {
+               CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
+                      &conn->ksnc_ipaddr);
+               return -EPROTO;
+       }
 
         if (!active &&
             conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
@@ -1951,73 +1930,75 @@ ksocknal_connect(struct ksock_route *route)
 
         route->ksnr_connecting = 1;
 
-        for (;;) {
-                wanted = ksocknal_route_mask() & ~route->ksnr_connected;
+       for (;;) {
+               wanted = ksocknal_route_mask() & ~route->ksnr_connected;
 
-                /* stop connecting if peer_ni/route got closed under me, or
-                 * route got connected while queued */
-                if (peer_ni->ksnp_closing || route->ksnr_deleted ||
-                    wanted == 0) {
-                        retry_later = 0;
-                        break;
-                }
+               /* stop connecting if peer_ni/route got closed under me, or
+                * route got connected while queued */
+               if (peer_ni->ksnp_closing || route->ksnr_deleted ||
+                   wanted == 0) {
+                       retry_later = 0;
+                       break;
+               }
 
-                /* reschedule if peer_ni is connecting to me */
-                if (peer_ni->ksnp_accepting > 0) {
-                        CDEBUG(D_NET,
-                               "peer_ni %s(%d) already connecting to me, retry later.\n",
-                               libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
-                        retry_later = 1;
-                }
+               /* reschedule if peer_ni is connecting to me */
+               if (peer_ni->ksnp_accepting > 0) {
+                       CDEBUG(D_NET,
+                              "peer_ni %s(%d) already connecting to me, retry later.\n",
+                              libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
+                       retry_later = 1;
+               }
 
-                if (retry_later) /* needs reschedule */
-                        break;
+               if (retry_later) /* needs reschedule */
+                       break;
 
-                if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
-                        type = SOCKLND_CONN_ANY;
-                } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
-                        type = SOCKLND_CONN_CONTROL;
-                } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
-                        type = SOCKLND_CONN_BULK_IN;
-                } else {
-                        LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
-                        type = SOCKLND_CONN_BULK_OUT;
-                }
+               if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) {
+                       type = SOCKLND_CONN_ANY;
+               } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
+                       type = SOCKLND_CONN_CONTROL;
+               } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0) {
+                       type = SOCKLND_CONN_BULK_IN;
+               } else {
+                       LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
+                       type = SOCKLND_CONN_BULK_OUT;
+               }
 
                write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
                if (ktime_get_seconds() >= deadline) {
-                        rc = -ETIMEDOUT;
-                        lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
-                                                   route->ksnr_ipaddr,
-                                                   route->ksnr_port);
-                        goto failed;
-                }
+                       rc = -ETIMEDOUT;
+                       lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
+                                                  route->ksnr_ipaddr,
+                                                  route->ksnr_port);
+                       goto failed;
+               }
 
-               rc = lnet_connect(&sock, peer_ni->ksnp_id.nid,
-                                 route->ksnr_myipaddr,
-                                 route->ksnr_ipaddr, route->ksnr_port,
-                                 peer_ni->ksnp_ni->ni_net_ns);
-               if (rc != 0)
+               sock = lnet_connect(peer_ni->ksnp_id.nid,
+                                   route->ksnr_myiface,
+                                   route->ksnr_ipaddr, route->ksnr_port,
+                                   peer_ni->ksnp_ni->ni_net_ns);
+               if (IS_ERR(sock)) {
+                       rc = PTR_ERR(sock);
                        goto failed;
+               }
 
-                rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
-                if (rc < 0) {
-                        lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
-                                                   route->ksnr_ipaddr,
-                                                   route->ksnr_port);
-                        goto failed;
-                }
+               rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
+               if (rc < 0) {
+                       lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
+                                                  route->ksnr_ipaddr,
+                                                  route->ksnr_port);
+                       goto failed;
+               }
 
-                /* A +ve RC means I have to retry because I lost the connection
-                 * race or I have to renegotiate protocol version */
-                retry_later = (rc != 0);
-                if (retry_later)
-                        CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
-                               libcfs_nid2str(peer_ni->ksnp_id.nid));
+               /* A +ve RC means I have to retry because I lost the connection
+                * race or I have to renegotiate protocol version */
+               retry_later = (rc != 0);
+               if (retry_later)
+                       CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
+                              libcfs_nid2str(peer_ni->ksnp_id.nid));
 
                write_lock_bh(&ksocknal_data.ksnd_global_lock);
-        }
+       }
 
         route->ksnr_scheduled = 0;
         route->ksnr_connecting = 0;
@@ -2222,8 +2203,6 @@ ksocknal_connd(void *arg)
        int nloops = 0;
        int cons_retry = 0;
 
-       cfs_block_allsigs();
-
        init_waitqueue_entry(&wait, current);
 
        spin_lock_bh(connd_lock);
@@ -2315,7 +2294,6 @@ ksocknal_connd(void *arg)
                nloops = 0;
                schedule_timeout(timeout);
 
-               set_current_state(TASK_RUNNING);
                remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
                spin_lock_bh(connd_lock);
        }
@@ -2429,8 +2407,7 @@ ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
 
                tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
 
-               list_del(&tx->tx_list);
-               list_add_tail(&tx->tx_list, &stale_txs);
+               list_move_tail(&tx->tx_list, &stale_txs);
        }
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2503,62 +2480,65 @@ __must_hold(&ksocknal_data.ksnd_global_lock)
 static void
 ksocknal_check_peer_timeouts(int idx)
 {
-       struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+       struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
        struct ksock_peer_ni *peer_ni;
        struct ksock_conn *conn;
        struct ksock_tx *tx;
 
  again:
-        /* NB. We expect to have a look at all the peers and not find any
-         * connections to time out, so we just use a shared lock while we
-         * take a look... */
+       /* NB. We expect to have a look at all the peers and not find any
+        * connections to time out, so we just use a shared lock while we
+        * take a look...
+        */
        read_lock(&ksocknal_data.ksnd_global_lock);
 
-       list_for_each_entry(peer_ni, peers, ksnp_list) {
+       hlist_for_each_entry(peer_ni, peers, ksnp_list) {
                struct ksock_tx *tx_stale;
                time64_t deadline = 0;
                int resid = 0;
                int n = 0;
 
-                if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
+               if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
                        read_unlock(&ksocknal_data.ksnd_global_lock);
-                        goto again;
-                }
+                       goto again;
+               }
 
-                conn = ksocknal_find_timed_out_conn (peer_ni);
+               conn = ksocknal_find_timed_out_conn(peer_ni);
 
-                if (conn != NULL) {
+               if (conn != NULL) {
                        read_unlock(&ksocknal_data.ksnd_global_lock);
 
-                        ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+                       ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
 
-                        /* NB we won't find this one again, but we can't
-                         * just proceed with the next peer_ni, since we dropped
-                         * ksnd_global_lock and it might be dead already! */
-                        ksocknal_conn_decref(conn);
-                        goto again;
-                }
+                       /* NB we won't find this one again, but we can't
+                        * just proceed with the next peer_ni, since we dropped
+                        * ksnd_global_lock and it might be dead already!
+                        */
+                       ksocknal_conn_decref(conn);
+                       goto again;
+               }
 
-                /* we can't process stale txs right here because we're
-                 * holding only shared lock */
+               /* we can't process stale txs right here because we're
+                * holding only shared lock
+                */
                if (!list_empty(&peer_ni->ksnp_tx_queue)) {
                        struct ksock_tx *tx;
 
                        tx = list_entry(peer_ni->ksnp_tx_queue.next,
                                        struct ksock_tx, tx_list);
                        if (ktime_get_seconds() >= tx->tx_deadline) {
-                                ksocknal_peer_addref(peer_ni);
+                               ksocknal_peer_addref(peer_ni);
                                read_unlock(&ksocknal_data.ksnd_global_lock);
 
-                                ksocknal_flush_stale_txs(peer_ni);
+                               ksocknal_flush_stale_txs(peer_ni);
 
-                                ksocknal_peer_decref(peer_ni);
-                                goto again;
-                        }
-                }
+                               ksocknal_peer_decref(peer_ni);
+                               goto again;
+                       }
+               }
 
                if (list_empty(&peer_ni->ksnp_zc_req_list))
-                        continue;
+                       continue;
 
                tx_stale = NULL;
                spin_lock(&peer_ni->ksnp_lock);
@@ -2606,16 +2586,13 @@ int ksocknal_reaper(void *arg)
        wait_queue_entry_t wait;
        struct ksock_conn *conn;
        struct ksock_sched *sched;
-       struct list_head enomem_conns;
+       LIST_HEAD(enomem_conns);
        int nenomem_conns;
        time64_t timeout;
        int i;
        int peer_index = 0;
        time64_t deadline = ktime_get_seconds();
 
-        cfs_block_allsigs ();
-
-       INIT_LIST_HEAD(&enomem_conns);
        init_waitqueue_entry(&wait, current);
 
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2645,14 +2622,11 @@ int ksocknal_reaper(void *arg)
                        ksocknal_destroy_conn(conn);
 
                        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-                        continue;
-                }
+                       continue;
+               }
 
-               if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
-                       list_add(&enomem_conns,
-                                     &ksocknal_data.ksnd_enomem_conns);
-                       list_del_init(&ksocknal_data.ksnd_enomem_conns);
-                }
+               list_splice_init(&ksocknal_data.ksnd_enomem_conns,
+                                &enomem_conns);
 
                spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2677,19 +2651,20 @@ int ksocknal_reaper(void *arg)
                         nenomem_conns++;
                 }
 
-                /* careful with the jiffy wrap... */
+               /* careful with the jiffy wrap... */
                while ((timeout = deadline - ktime_get_seconds()) <= 0) {
-                        const int n = 4;
-                        const int p = 1;
-                        int       chunk = ksocknal_data.ksnd_peer_hash_size;
+                       const int n = 4;
+                       const int p = 1;
+                       int  chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
                        unsigned int lnd_timeout;
 
-                        /* Time to check for timeouts on a few more peers: I do
-                         * checks every 'p' seconds on a proportion of the peer_ni
-                         * table and I need to check every connection 'n' times
-                         * within a timeout interval, to ensure I detect a
-                         * timeout on any connection within (n+1)/n times the
-                         * timeout interval. */
+                       /* Time to check for timeouts on a few more peers: I
+                        * do checks every 'p' seconds on a proportion of the
+                        * peer_ni table and I need to check every connection
+                        * 'n' times within a timeout interval, to ensure I
+                        * detect a timeout on any connection within (n+1)/n
+                        * times the timeout interval.
+                        */
 
                        lnd_timeout = lnet_get_lnd_timeout();
                        if (lnd_timeout > n * p)
@@ -2697,14 +2672,14 @@ int ksocknal_reaper(void *arg)
                        if (chunk == 0)
                                chunk = 1;
 
-                        for (i = 0; i < chunk; i++) {
-                                ksocknal_check_peer_timeouts (peer_index);
-                                peer_index = (peer_index + 1) %
-                                             ksocknal_data.ksnd_peer_hash_size;
-                        }
+                       for (i = 0; i < chunk; i++) {
+                               ksocknal_check_peer_timeouts(peer_index);
+                               peer_index = (peer_index + 1) %
+                                       HASH_SIZE(ksocknal_data.ksnd_peers);
+                       }
 
                        deadline += p;
-                }
+               }
 
                 if (nenomem_conns != 0) {
                         /* Reduce my timeout if I rescheduled ENOMEM conns.