Whamcloud - gitweb
LU-10391 socklnd: use interface index to track local addr
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd_cb.c
index fb16e04..8439652 100644 (file)
@@ -8,7 +8,7 @@
  *   Author: Phil Schwan <phil@clusterfs.com>
  *   Author: Eric Barton <eric@bartonsoftware.com>
  *
- *   This file is part of Lustre, https://wiki.hpdd.intel.com/
+ *   This file is part of Lustre, https://wiki.whamcloud.com/
  *
  *   Portals is free software; you can redistribute it and/or
  *   modify it under the terms of version 2 of the GNU General Public
@@ -111,282 +111,286 @@ ksocknal_free_tx(struct ksock_tx *tx)
 }
 
 static int
-ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
+ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx,
+                 struct kvec *scratch_iov)
 {
        struct kvec *iov = tx->tx_iov;
-        int    nob;
-        int    rc;
+       int    nob;
+       int    rc;
 
-        LASSERT (tx->tx_niov > 0);
+       LASSERT(tx->tx_niov > 0);
 
-        /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
-        rc = ksocknal_lib_send_iov(conn, tx);
+       /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
+       rc = ksocknal_lib_send_iov(conn, tx, scratch_iov);
 
-        if (rc <= 0)                            /* sent nothing? */
-                return (rc);
+       if (rc <= 0)                            /* sent nothing? */
+               return rc;
 
-        nob = rc;
-        LASSERT (nob <= tx->tx_resid);
-        tx->tx_resid -= nob;
+       nob = rc;
+       LASSERT(nob <= tx->tx_resid);
+       tx->tx_resid -= nob;
 
-        /* "consume" iov */
-        do {
-                LASSERT (tx->tx_niov > 0);
+       /* "consume" iov */
+       do {
+               LASSERT(tx->tx_niov > 0);
 
-                if (nob < (int) iov->iov_len) {
+               if (nob < (int) iov->iov_len) {
                        iov->iov_base += nob;
-                        iov->iov_len -= nob;
-                        return (rc);
-                }
+                       iov->iov_len -= nob;
+                       return rc;
+               }
 
-                nob -= iov->iov_len;
-                tx->tx_iov = ++iov;
-                tx->tx_niov--;
-        } while (nob != 0);
+               nob -= iov->iov_len;
+               tx->tx_iov = ++iov;
+               tx->tx_niov--;
+       } while (nob != 0);
 
-        return (rc);
+       return rc;
 }
 
 static int
-ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
+ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
+                  struct kvec *scratch_iov)
 {
-       lnet_kiov_t *kiov = tx->tx_kiov;
+       struct bio_vec *kiov = tx->tx_kiov;
        int nob;
        int rc;
 
-        LASSERT (tx->tx_niov == 0);
-        LASSERT (tx->tx_nkiov > 0);
+       LASSERT(tx->tx_niov == 0);
+       LASSERT(tx->tx_nkiov > 0);
 
-        /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
-        rc = ksocknal_lib_send_kiov(conn, tx);
+       /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
+       rc = ksocknal_lib_send_kiov(conn, tx, scratch_iov);
 
-        if (rc <= 0)                            /* sent nothing? */
-                return (rc);
+       if (rc <= 0)                            /* sent nothing? */
+               return rc;
 
-        nob = rc;
-        LASSERT (nob <= tx->tx_resid);
-        tx->tx_resid -= nob;
+       nob = rc;
+       LASSERT(nob <= tx->tx_resid);
+       tx->tx_resid -= nob;
 
-        /* "consume" kiov */
-        do {
-                LASSERT(tx->tx_nkiov > 0);
+       /* "consume" kiov */
+       do {
+               LASSERT(tx->tx_nkiov > 0);
 
-                if (nob < (int)kiov->kiov_len) {
-                        kiov->kiov_offset += nob;
-                        kiov->kiov_len -= nob;
-                        return rc;
-                }
+               if (nob < (int)kiov->bv_len) {
+                       kiov->bv_offset += nob;
+                       kiov->bv_len -= nob;
+                       return rc;
+               }
 
-                nob -= (int)kiov->kiov_len;
-                tx->tx_kiov = ++kiov;
-                tx->tx_nkiov--;
-        } while (nob != 0);
+               nob -= (int)kiov->bv_len;
+               tx->tx_kiov = ++kiov;
+               tx->tx_nkiov--;
+       } while (nob != 0);
 
-        return (rc);
+       return rc;
 }
 
 static int
-ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
+ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
+                 struct kvec *scratch_iov)
 {
        int     rc;
        int     bufnob;
 
-       if (ksocknal_data.ksnd_stall_tx != 0) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
-       }
+       if (ksocknal_data.ksnd_stall_tx != 0)
+               schedule_timeout_uninterruptible(
+                       cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
 
        LASSERT(tx->tx_resid != 0);
 
-        rc = ksocknal_connsock_addref(conn);
-        if (rc != 0) {
-                LASSERT (conn->ksnc_closing);
-                return (-ESHUTDOWN);
-        }
+       rc = ksocknal_connsock_addref(conn);
+       if (rc != 0) {
+               LASSERT(conn->ksnc_closing);
+               return -ESHUTDOWN;
+       }
 
-        do {
-                if (ksocknal_data.ksnd_enomem_tx > 0) {
-                        /* testing... */
-                        ksocknal_data.ksnd_enomem_tx--;
-                        rc = -EAGAIN;
-                } else if (tx->tx_niov != 0) {
-                        rc = ksocknal_send_iov (conn, tx);
-                } else {
-                        rc = ksocknal_send_kiov (conn, tx);
-                }
+       do {
+               if (ksocknal_data.ksnd_enomem_tx > 0) {
+                       /* testing... */
+                       ksocknal_data.ksnd_enomem_tx--;
+                       rc = -EAGAIN;
+               } else if (tx->tx_niov != 0) {
+                       rc = ksocknal_send_iov(conn, tx, scratch_iov);
+               } else {
+                       rc = ksocknal_send_kiov(conn, tx, scratch_iov);
+               }
 
                bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
-                if (rc > 0)                     /* sent something? */
-                        conn->ksnc_tx_bufnob += rc; /* account it */
+               if (rc > 0)                     /* sent something? */
+                       conn->ksnc_tx_bufnob += rc; /* account it */
 
                if (bufnob < conn->ksnc_tx_bufnob) {
                        /* allocated send buffer bytes < computed; infer
                         * something got ACKed */
                        conn->ksnc_tx_deadline = ktime_get_seconds() +
-                                                *ksocknal_tunables.ksnd_timeout;
+                                                lnet_get_lnd_timeout();
                        conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
                        conn->ksnc_tx_bufnob = bufnob;
                        smp_mb();
                }
 
                if (rc <= 0) { /* Didn't write anything? */
+                       /* some stacks return 0 instead of -EAGAIN */
+                       if (rc == 0)
+                               rc = -EAGAIN;
 
-                        if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
-                                rc = -EAGAIN;
-
-                        /* Check if EAGAIN is due to memory pressure */
-                        if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
-                                rc = -ENOMEM;
+                       /* Check if EAGAIN is due to memory pressure */
+                       if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
+                               rc = -ENOMEM;
 
-                        break;
-                }
+                       break;
+               }
 
-                /* socket's wmem_queued now includes 'rc' bytes */
+               /* socket's wmem_queued now includes 'rc' bytes */
                atomic_sub (rc, &conn->ksnc_tx_nob);
-                rc = 0;
+               rc = 0;
 
-        } while (tx->tx_resid != 0);
+       } while (tx->tx_resid != 0);
 
-        ksocknal_connsock_decref(conn);
-        return (rc);
+       ksocknal_connsock_decref(conn);
+       return rc;
 }
 
 static int
-ksocknal_recv_iov(struct ksock_conn *conn)
+ksocknal_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov)
 {
        struct kvec *iov = conn->ksnc_rx_iov;
-        int     nob;
-        int     rc;
+       int     nob;
+       int     rc;
 
-        LASSERT (conn->ksnc_rx_niov > 0);
+       LASSERT(conn->ksnc_rx_niov > 0);
 
        /* Never touch conn->ksnc_rx_iov or change connection
-         * status inside ksocknal_lib_recv_iov */
-        rc = ksocknal_lib_recv_iov(conn);
+        * status inside ksocknal_lib_recv_iov */
+       rc = ksocknal_lib_recv_iov(conn, scratchiov);
 
-        if (rc <= 0)
-                return (rc);
+       if (rc <= 0)
+               return rc;
 
-        /* received something... */
-        nob = rc;
+       /* received something... */
+       nob = rc;
 
        conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
        conn->ksnc_rx_deadline = ktime_get_seconds() +
-                                *ksocknal_tunables.ksnd_timeout;
+                                lnet_get_lnd_timeout();
        smp_mb();                       /* order with setting rx_started */
        conn->ksnc_rx_started = 1;
 
        conn->ksnc_rx_nob_wanted -= nob;
        conn->ksnc_rx_nob_left -= nob;
 
-        do {
-                LASSERT (conn->ksnc_rx_niov > 0);
+       do {
+               LASSERT(conn->ksnc_rx_niov > 0);
 
-                if (nob < (int)iov->iov_len) {
-                        iov->iov_len -= nob;
+               if (nob < (int)iov->iov_len) {
+                       iov->iov_len -= nob;
                        iov->iov_base += nob;
-                        return (-EAGAIN);
-                }
+                       return -EAGAIN;
+               }
 
-                nob -= iov->iov_len;
-                conn->ksnc_rx_iov = ++iov;
-                conn->ksnc_rx_niov--;
-        } while (nob != 0);
+               nob -= iov->iov_len;
+               conn->ksnc_rx_iov = ++iov;
+               conn->ksnc_rx_niov--;
+       } while (nob != 0);
 
-        return (rc);
+       return rc;
 }
 
 static int
-ksocknal_recv_kiov(struct ksock_conn *conn)
+ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
+                  struct kvec *scratch_iov)
 {
-       lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+       struct bio_vec *kiov = conn->ksnc_rx_kiov;
        int nob;
        int rc;
-        LASSERT (conn->ksnc_rx_nkiov > 0);
+       LASSERT(conn->ksnc_rx_nkiov > 0);
 
        /* Never touch conn->ksnc_rx_kiov or change connection
-         * status inside ksocknal_lib_recv_iov */
-        rc = ksocknal_lib_recv_kiov(conn);
+        * status inside ksocknal_lib_recv_iov */
+       rc = ksocknal_lib_recv_kiov(conn, rx_scratch_pgs, scratch_iov);
 
-        if (rc <= 0)
-                return (rc);
+       if (rc <= 0)
+               return rc;
 
-        /* received something... */
-        nob = rc;
+       /* received something... */
+       nob = rc;
 
        conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
        conn->ksnc_rx_deadline = ktime_get_seconds() +
-                                *ksocknal_tunables.ksnd_timeout;
+                                lnet_get_lnd_timeout();
        smp_mb();                       /* order with setting rx_started */
        conn->ksnc_rx_started = 1;
 
        conn->ksnc_rx_nob_wanted -= nob;
        conn->ksnc_rx_nob_left -= nob;
 
-        do {
-                LASSERT (conn->ksnc_rx_nkiov > 0);
+       do {
+               LASSERT(conn->ksnc_rx_nkiov > 0);
 
-                if (nob < (int) kiov->kiov_len) {
-                        kiov->kiov_offset += nob;
-                        kiov->kiov_len -= nob;
-                        return -EAGAIN;
-                }
+               if (nob < (int) kiov->bv_len) {
+                       kiov->bv_offset += nob;
+                       kiov->bv_len -= nob;
+                       return -EAGAIN;
+               }
 
-                nob -= kiov->kiov_len;
-                conn->ksnc_rx_kiov = ++kiov;
-                conn->ksnc_rx_nkiov--;
-        } while (nob != 0);
+               nob -= kiov->bv_len;
+               conn->ksnc_rx_kiov = ++kiov;
+               conn->ksnc_rx_nkiov--;
+       } while (nob != 0);
 
-        return 1;
+       return 1;
 }
 
 static int
-ksocknal_receive(struct ksock_conn *conn)
+ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs,
+                struct kvec *scratch_iov)
 {
-        /* Return 1 on success, 0 on EOF, < 0 on error.
-         * Caller checks ksnc_rx_nob_wanted to determine
-         * progress/completion. */
-        int     rc;
-        ENTRY;
+       /* Return 1 on success, 0 on EOF, < 0 on error.
+        * Caller checks ksnc_rx_nob_wanted to determine
+        * progress/completion. */
+       int     rc;
+       ENTRY;
+
+       if (ksocknal_data.ksnd_stall_rx != 0)
+               schedule_timeout_uninterruptible(
+                       cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
 
-       if (ksocknal_data.ksnd_stall_rx != 0) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
+       rc = ksocknal_connsock_addref(conn);
+       if (rc != 0) {
+               LASSERT(conn->ksnc_closing);
+               return -ESHUTDOWN;
        }
 
-        rc = ksocknal_connsock_addref(conn);
-        if (rc != 0) {
-                LASSERT (conn->ksnc_closing);
-                return (-ESHUTDOWN);
-        }
+       for (;;) {
+               if (conn->ksnc_rx_niov != 0)
+                       rc = ksocknal_recv_iov(conn, scratch_iov);
+               else
+                       rc = ksocknal_recv_kiov(conn, rx_scratch_pgs,
+                                                scratch_iov);
 
-        for (;;) {
-                if (conn->ksnc_rx_niov != 0)
-                        rc = ksocknal_recv_iov (conn);
-                else
-                        rc = ksocknal_recv_kiov (conn);
-
-                if (rc <= 0) {
-                        /* error/EOF or partial receive */
-                        if (rc == -EAGAIN) {
-                                rc = 1;
-                        } else if (rc == 0 && conn->ksnc_rx_started) {
-                                /* EOF in the middle of a message */
-                                rc = -EPROTO;
-                        }
-                        break;
-                }
+               if (rc <= 0) {
+                       /* error/EOF or partial receive */
+                       if (rc == -EAGAIN) {
+                               rc = 1;
+                       } else if (rc == 0 && conn->ksnc_rx_started) {
+                               /* EOF in the middle of a message */
+                               rc = -EPROTO;
+                       }
+                       break;
+               }
 
-                /* Completed a fragment */
+               /* Completed a fragment */
 
-                if (conn->ksnc_rx_nob_wanted == 0) {
-                        rc = 1;
-                        break;
-                }
-        }
+               if (conn->ksnc_rx_nob_wanted == 0) {
+                       rc = 1;
+                       break;
+               }
+       }
 
-        ksocknal_connsock_decref(conn);
-        RETURN (rc);
+       ksocknal_connsock_decref(conn);
+       RETURN(rc);
 }
 
 void
@@ -400,7 +404,8 @@ ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
 
        if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) {
                rc = -EIO;
-               hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+               if (hstatus == LNET_MSG_STATUS_OK)
+                       hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
        }
 
        if (tx->tx_conn != NULL)
@@ -408,9 +413,6 @@ ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
 
        ksocknal_free_tx(tx);
        if (lnetmsg != NULL) { /* KSOCK_MSG_NOOP go without lnetmsg */
-               if (rc)
-                       CERROR("tx failure rc = %d, hstatus = %d\n", rc,
-                              hstatus);
                lnetmsg->msg_health_status = hstatus;
                lnet_finalize(lnetmsg, rc);
        }
@@ -444,8 +446,10 @@ ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
                                  LNET_MSG_STATUS_LOCAL_TIMEOUT;
                        else if (error == -ENETDOWN ||
                                 error == -EHOSTUNREACH ||
-                                error == -ENETUNREACH)
-                               tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_DROPPED;
+                                error == -ENETUNREACH ||
+                                error == -ECONNREFUSED ||
+                                error == -ECONNRESET)
+                               tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
                        /*
                         * for all other errors we don't want to
                         * retransmit
@@ -489,7 +493,7 @@ ksocknal_check_zc_req(struct ksock_tx *tx)
 
         /* ZC_REQ is going to be pinned to the peer_ni */
        tx->tx_deadline = ktime_get_seconds() +
-                         *ksocknal_tunables.ksnd_timeout;
+                         lnet_get_lnd_timeout();
 
         LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
 
@@ -530,42 +534,50 @@ ksocknal_uncheck_zc_req(struct ksock_tx *tx)
 }
 
 static int
-ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
+ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
+                         struct kvec *scratch_iov)
 {
        int rc;
+       bool error_sim = false;
 
-        if (tx->tx_zc_capable && !tx->tx_zc_checked)
-                ksocknal_check_zc_req(tx);
+       if (lnet_send_error_simulation(tx->tx_lnetmsg, &tx->tx_hstatus)) {
+               error_sim = true;
+               rc = -EINVAL;
+               goto simulate_error;
+       }
 
-        rc = ksocknal_transmit (conn, tx);
+       if (tx->tx_zc_capable && !tx->tx_zc_checked)
+               ksocknal_check_zc_req(tx);
 
-        CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
+       rc = ksocknal_transmit(conn, tx, scratch_iov);
 
-        if (tx->tx_resid == 0) {
-                /* Sent everything OK */
-                LASSERT (rc == 0);
+       CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
 
-                return (0);
-        }
+       if (tx->tx_resid == 0) {
+               /* Sent everything OK */
+               LASSERT(rc == 0);
 
-        if (rc == -EAGAIN)
-                return (rc);
+               return 0;
+       }
 
-        if (rc == -ENOMEM) {
-                static int counter;
+       if (rc == -EAGAIN)
+               return rc;
 
-                counter++;   /* exponential backoff warnings */
-                if ((counter & (-counter)) == counter)
-                        CWARN("%u ENOMEM tx %p (%u allocated)\n",
+       if (rc == -ENOMEM) {
+               static int counter;
+
+               counter++;   /* exponential backoff warnings */
+               if ((counter & (-counter)) == counter)
+                       CWARN("%u ENOMEM tx %p (%u allocated)\n",
                              counter, conn, atomic_read(&libcfs_kmemory));
 
-                /* Queue on ksnd_enomem_conns for retry after a timeout */
+               /* Queue on ksnd_enomem_conns for retry after a timeout */
                spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
-                /* enomem list takes over scheduler's ref... */
-                LASSERT (conn->ksnc_tx_scheduled);
+               /* enomem list takes over scheduler's ref... */
+               LASSERT(conn->ksnc_tx_scheduled);
                list_add_tail(&conn->ksnc_tx_list,
-                                  &ksocknal_data.ksnd_enomem_conns);
+                                 &ksocknal_data.ksnd_enomem_conns);
                if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY <
                    ksocknal_data.ksnd_reaper_waketime)
                        wake_up(&ksocknal_data.ksnd_reaper_waitq);
@@ -580,17 +592,21 @@ ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
                return (rc);
        }
 
+simulate_error:
+
        /* Actual error */
        LASSERT(rc < 0);
 
-       /*
-        * set the health status of the message which determines
-        * whether we should retry the transmit
-        */
-       if (rc == -ETIMEDOUT)
-               tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
-       else
-               tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+       if (!error_sim) {
+               /*
+               * set the health status of the message which determines
+               * whether we should retry the transmit
+               */
+               if (rc == -ETIMEDOUT)
+                       tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
+               else
+                       tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+       }
 
        if (!conn->ksnc_closing) {
                switch (rc) {
@@ -770,7 +786,7 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
        if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
                /* First packet starts the timeout */
                conn->ksnc_tx_deadline = ktime_get_seconds() +
-                                        *ksocknal_tunables.ksnd_timeout;
+                                        lnet_get_lnd_timeout();
                if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
                        conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
                conn->ksnc_tx_bufnob = 0;
@@ -947,7 +963,7 @@ ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
             ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
                 /* the message is going to be pinned to the peer_ni */
                tx->tx_deadline = ktime_get_seconds() +
-                                 *ksocknal_tunables.ksnd_timeout;
+                                 lnet_get_lnd_timeout();
 
                 /* Queue the message until a connection is established */
                list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
@@ -959,6 +975,7 @@ ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
 
         /* NB Routes may be ignored if connections to them failed recently */
         CNETERR("No usable routes to %s\n", libcfs_id2str(id));
+       tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
         return (-EHOSTUNREACH);
 }
 
@@ -968,14 +985,14 @@ ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
        int mpflag = 1;
        int type = lntmsg->msg_type;
        struct lnet_process_id target = lntmsg->msg_target;
-        unsigned int      payload_niov = lntmsg->msg_niov;
+       unsigned int     payload_niov = lntmsg->msg_niov;
        struct kvec *payload_iov = lntmsg->msg_iov;
-        lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
-        unsigned int      payload_offset = lntmsg->msg_offset;
-        unsigned int      payload_nob = lntmsg->msg_len;
-       struct ksock_tx *tx;
-        int               desc_size;
-        int               rc;
+       struct bio_vec  *payload_kiov = lntmsg->msg_kiov;
+       unsigned int     payload_offset = lntmsg->msg_offset;
+       unsigned int     payload_nob = lntmsg->msg_len;
+       struct ksock_tx *tx;
+       int              desc_size;
+       int              rc;
 
         /* NB 'private' is different depending on what we're sending.
          * Just ignore it... */
@@ -1043,6 +1060,7 @@ ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
         if (rc == 0)
                 return (0);
 
+       lntmsg->msg_health_status = tx->tx_hstatus;
         ksocknal_free_tx(tx);
         return (-EIO);
 }
@@ -1065,7 +1083,8 @@ void
 ksocknal_thread_fini (void)
 {
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
-        ksocknal_data.ksnd_nthreads--;
+       if (--ksocknal_data.ksnd_nthreads == 0)
+               wake_up_var(&ksocknal_data.ksnd_nthreads);
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 }
 
@@ -1125,22 +1144,22 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
         /* Set up to skip as much as possible now.  If there's more left
          * (ran out of iov entries) we'll get called again */
 
-        conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
-        conn->ksnc_rx_nob_left = nob_to_skip;
+       conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
+       conn->ksnc_rx_nob_left = nob_to_skip;
        conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
-        skipped = 0;
-        niov = 0;
+       skipped = 0;
+       niov = 0;
 
-        do {
-                nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
+       do {
+               nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
 
-                conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
-                conn->ksnc_rx_iov[niov].iov_len  = nob;
-                niov++;
-                skipped += nob;
-                nob_to_skip -=nob;
+               conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
+               conn->ksnc_rx_iov[niov].iov_len  = nob;
+               niov++;
+               skipped += nob;
+               nob_to_skip -= nob;
 
-        } while (nob_to_skip != 0 &&    /* mustn't overflow conn's rx iov */
+       } while (nob_to_skip != 0 &&    /* mustn't overflow conn's rx iov */
                 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
 
         conn->ksnc_rx_niov = niov;
@@ -1151,7 +1170,9 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
 }
 
 static int
-ksocknal_process_receive(struct ksock_conn *conn)
+ksocknal_process_receive(struct ksock_conn *conn,
+                        struct page **rx_scratch_pgs,
+                        struct kvec *scratch_iov)
 {
        struct lnet_hdr *lhdr;
        struct lnet_process_id *id;
@@ -1161,13 +1182,14 @@ ksocknal_process_receive(struct ksock_conn *conn)
 
        /* NB: sched lock NOT held */
        /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
-        LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
-                 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
-                 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
-                 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
+       LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
+               conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
+               conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
+               conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
  again:
-        if (conn->ksnc_rx_nob_wanted != 0) {
-                rc = ksocknal_receive(conn);
+       if (conn->ksnc_rx_nob_wanted != 0) {
+               rc = ksocknal_receive(conn, rx_scratch_pgs,
+                                     scratch_iov);
 
                if (rc <= 0) {
                        struct lnet_process_id ksnp_id;
@@ -1333,7 +1355,10 @@ ksocknal_process_receive(struct ksock_conn *conn)
                                         le64_to_cpu(lhdr->src_nid) != id->nid);
                 }
 
-               lnet_finalize(conn->ksnc_cookie, rc);
+               if (rc && conn->ksnc_lnet_msg)
+                       conn->ksnc_lnet_msg->msg_health_status =
+                               LNET_MSG_STATUS_REMOTE_ERROR;
+               lnet_finalize(conn->ksnc_lnet_msg, rc);
 
                 if (rc != 0) {
                         ksocknal_new_packet(conn, 0);
@@ -1360,7 +1385,7 @@ ksocknal_process_receive(struct ksock_conn *conn)
 int
 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
              int delayed, unsigned int niov, struct kvec *iov,
-             lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
+             struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
              unsigned int rlen)
 {
        struct ksock_conn *conn = private;
@@ -1369,9 +1394,9 @@ ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
         LASSERT (mlen <= rlen);
         LASSERT (niov <= LNET_MAX_IOV);
 
-        conn->ksnc_cookie = msg;
-        conn->ksnc_rx_nob_wanted = mlen;
-        conn->ksnc_rx_nob_left   = rlen;
+       conn->ksnc_lnet_msg = msg;
+       conn->ksnc_rx_nob_wanted = mlen;
+       conn->ksnc_rx_nob_left   = rlen;
 
         if (mlen == 0 || iov != NULL) {
                 conn->ksnc_rx_nkiov = 0;
@@ -1433,154 +1458,163 @@ ksocknal_sched_cansleep(struct ksock_sched *sched)
 
 int ksocknal_scheduler(void *arg)
 {
-       struct ksock_sched_info *info;
        struct ksock_sched *sched;
        struct ksock_conn *conn;
        struct ksock_tx *tx;
        int rc;
        int nloops = 0;
        long id = (long)arg;
+       struct page **rx_scratch_pgs;
+       struct kvec *scratch_iov;
 
-       info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
-       sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
+       sched = ksocknal_data.ksnd_schedulers[KSOCK_THREAD_CPT(id)];
 
-       cfs_block_allsigs();
+       LIBCFS_CPT_ALLOC(rx_scratch_pgs, lnet_cpt_table(), sched->kss_cpt,
+                        sizeof(*rx_scratch_pgs) * LNET_MAX_IOV);
+       if (!rx_scratch_pgs) {
+               CERROR("Unable to allocate scratch pages\n");
+               return -ENOMEM;
+       }
+
+       LIBCFS_CPT_ALLOC(scratch_iov, lnet_cpt_table(), sched->kss_cpt,
+                        sizeof(*scratch_iov) * LNET_MAX_IOV);
+       if (!scratch_iov) {
+               CERROR("Unable to allocate scratch iov\n");
+               return -ENOMEM;
+       }
 
-       rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
+       rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
        if (rc != 0) {
                CWARN("Can't set CPU partition affinity to %d: %d\n",
-                       info->ksi_cpt, rc);
+                       sched->kss_cpt, rc);
        }
 
        spin_lock_bh(&sched->kss_lock);
 
-        while (!ksocknal_data.ksnd_shuttingdown) {
-                int did_something = 0;
+       while (!ksocknal_data.ksnd_shuttingdown) {
+               int did_something = 0;
 
-                /* Ensure I progress everything semi-fairly */
+               /* Ensure I progress everything semi-fairly */
 
                if (!list_empty(&sched->kss_rx_conns)) {
                        conn = list_entry(sched->kss_rx_conns.next,
                                          struct ksock_conn, ksnc_rx_list);
                        list_del(&conn->ksnc_rx_list);
 
-                        LASSERT(conn->ksnc_rx_scheduled);
-                        LASSERT(conn->ksnc_rx_ready);
+                       LASSERT(conn->ksnc_rx_scheduled);
+                       LASSERT(conn->ksnc_rx_ready);
 
-                        /* clear rx_ready in case receive isn't complete.
-                         * Do it BEFORE we call process_recv, since
-                         * data_ready can set it any time after we release
-                         * kss_lock. */
-                        conn->ksnc_rx_ready = 0;
+                       /* clear rx_ready in case receive isn't complete.
+                        * Do it BEFORE we call process_recv, since
+                        * data_ready can set it any time after we release
+                        * kss_lock. */
+                       conn->ksnc_rx_ready = 0;
                        spin_unlock_bh(&sched->kss_lock);
 
-                       rc = ksocknal_process_receive(conn);
+                       rc = ksocknal_process_receive(conn, rx_scratch_pgs,
+                                                     scratch_iov);
 
                        spin_lock_bh(&sched->kss_lock);
 
-                        /* I'm the only one that can clear this flag */
-                        LASSERT(conn->ksnc_rx_scheduled);
+                       /* I'm the only one that can clear this flag */
+                       LASSERT(conn->ksnc_rx_scheduled);
 
-                        /* Did process_receive get everything it wanted? */
-                        if (rc == 0)
-                                conn->ksnc_rx_ready = 1;
-
-                        if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
-                                /* Conn blocked waiting for ksocknal_recv()
-                                 * I change its state (under lock) to signal
-                                 * it can be rescheduled */
-                                conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
-                        } else if (conn->ksnc_rx_ready) {
-                                /* reschedule for rx */
+                       /* Did process_receive get everything it wanted? */
+                       if (rc == 0)
+                               conn->ksnc_rx_ready = 1;
+
+                       if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
+                               /* Conn blocked waiting for ksocknal_recv()
+                                * I change its state (under lock) to signal
+                                * it can be rescheduled */
+                               conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
+                       } else if (conn->ksnc_rx_ready) {
+                               /* reschedule for rx */
                                list_add_tail(&conn->ksnc_rx_list,
-                                                   &sched->kss_rx_conns);
-                        } else {
-                                conn->ksnc_rx_scheduled = 0;
-                                /* drop my ref */
-                                ksocknal_conn_decref(conn);
-                        }
+                                                  &sched->kss_rx_conns);
+                       } else {
+                               conn->ksnc_rx_scheduled = 0;
+                               /* drop my ref */
+                               ksocknal_conn_decref(conn);
+                       }
 
-                        did_something = 1;
-                }
+                       did_something = 1;
+               }
 
                if (!list_empty(&sched->kss_tx_conns)) {
-                       struct list_head zlist = LIST_HEAD_INIT(zlist);
+                       LIST_HEAD(zlist);
 
-                       if (!list_empty(&sched->kss_zombie_noop_txs)) {
-                               list_add(&zlist,
-                                             &sched->kss_zombie_noop_txs);
-                               list_del_init(&sched->kss_zombie_noop_txs);
-                        }
+                       list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
 
                        conn = list_entry(sched->kss_tx_conns.next,
                                          struct ksock_conn, ksnc_tx_list);
                        list_del(&conn->ksnc_tx_list);
 
-                        LASSERT(conn->ksnc_tx_scheduled);
-                        LASSERT(conn->ksnc_tx_ready);
+                       LASSERT(conn->ksnc_tx_scheduled);
+                       LASSERT(conn->ksnc_tx_ready);
                        LASSERT(!list_empty(&conn->ksnc_tx_queue));
 
                        tx = list_entry(conn->ksnc_tx_queue.next,
                                        struct ksock_tx, tx_list);
 
-                        if (conn->ksnc_tx_carrier == tx)
-                                ksocknal_next_tx_carrier(conn);
+                       if (conn->ksnc_tx_carrier == tx)
+                               ksocknal_next_tx_carrier(conn);
 
-                        /* dequeue now so empty list => more to send */
+                       /* dequeue now so empty list => more to send */
                        list_del(&tx->tx_list);
 
-                        /* Clear tx_ready in case send isn't complete.  Do
-                         * it BEFORE we call process_transmit, since
-                         * write_space can set it any time after we release
-                         * kss_lock. */
-                        conn->ksnc_tx_ready = 0;
+                       /* Clear tx_ready in case send isn't complete.  Do
+                        * it BEFORE we call process_transmit, since
+                        * write_space can set it any time after we release
+                        * kss_lock. */
+                       conn->ksnc_tx_ready = 0;
                        spin_unlock_bh(&sched->kss_lock);
 
                        if (!list_empty(&zlist)) {
                                /* free zombie noop txs, it's fast because
-                                 * noop txs are just put in freelist */
-                                ksocknal_txlist_done(NULL, &zlist, 0);
-                        }
+                                * noop txs are just put in freelist */
+                               ksocknal_txlist_done(NULL, &zlist, 0);
+                       }
 
-                        rc = ksocknal_process_transmit(conn, tx);
+                       rc = ksocknal_process_transmit(conn, tx, scratch_iov);
 
-                        if (rc == -ENOMEM || rc == -EAGAIN) {
-                                /* Incomplete send: replace tx on HEAD of tx_queue */
+                       if (rc == -ENOMEM || rc == -EAGAIN) {
+                               /* Incomplete send: replace tx on HEAD of tx_queue */
                                spin_lock_bh(&sched->kss_lock);
                                list_add(&tx->tx_list,
-                                            &conn->ksnc_tx_queue);
+                                        &conn->ksnc_tx_queue);
                        } else {
                                /* Complete send; tx -ref */
                                ksocknal_tx_decref(tx);
 
                                spin_lock_bh(&sched->kss_lock);
-                                /* assume space for more */
-                                conn->ksnc_tx_ready = 1;
-                        }
+                               /* assume space for more */
+                               conn->ksnc_tx_ready = 1;
+                       }
 
-                        if (rc == -ENOMEM) {
-                                /* Do nothing; after a short timeout, this
-                                 * conn will be reposted on kss_tx_conns. */
-                        } else if (conn->ksnc_tx_ready &&
+                       if (rc == -ENOMEM) {
+                               /* Do nothing; after a short timeout, this
+                                * conn will be reposted on kss_tx_conns. */
+                       } else if (conn->ksnc_tx_ready &&
                                   !list_empty(&conn->ksnc_tx_queue)) {
-                                /* reschedule for tx */
+                               /* reschedule for tx */
                                list_add_tail(&conn->ksnc_tx_list,
-                                                   &sched->kss_tx_conns);
-                        } else {
-                                conn->ksnc_tx_scheduled = 0;
-                                /* drop my ref */
-                                ksocknal_conn_decref(conn);
-                        }
+                                             &sched->kss_tx_conns);
+                       } else {
+                               conn->ksnc_tx_scheduled = 0;
+                               /* drop my ref */
+                               ksocknal_conn_decref(conn);
+                       }
 
-                        did_something = 1;
-                }
-                if (!did_something ||           /* nothing to do */
-                    ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
+                       did_something = 1;
+               }
+               if (!did_something ||           /* nothing to do */
+                   ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
                        spin_unlock_bh(&sched->kss_lock);
 
-                        nloops = 0;
+                       nloops = 0;
 
-                        if (!did_something) {   /* wait for something to do */
+                       if (!did_something) {   /* wait for something to do */
                                rc = wait_event_interruptible_exclusive(
                                        sched->kss_waitq,
                                        !ksocknal_sched_cansleep(sched));
@@ -1594,6 +1628,8 @@ int ksocknal_scheduler(void *arg)
        }
 
        spin_unlock_bh(&sched->kss_lock);
+       CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
+       CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
        ksocknal_thread_fini();
        return 0;
 }
@@ -1657,8 +1693,8 @@ void ksocknal_write_callback(struct ksock_conn *conn)
        EXIT;
 }
 
-static struct ksock_proto *
-ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
+static const struct ksock_proto *
+ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
 {
         __u32   version = 0;
 
@@ -1688,7 +1724,7 @@ ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
         if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
                struct lnet_magicversion *hmv;
 
-               CLASSERT(sizeof(struct lnet_magicversion) ==
+               BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
                         offsetof(struct ksock_hello_msg, kshm_src_nid));
 
                hmv = (struct lnet_magicversion *)hello;
@@ -1756,13 +1792,13 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
         int                  timeout;
         int                  proto_match;
         int                  rc;
-       struct ksock_proto *proto;
+       const struct ksock_proto *proto;
        struct lnet_process_id recv_id;
 
        /* socket type set on active connections - not set on passive */
        LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
 
-       timeout = active ? *ksocknal_tunables.ksnd_timeout :
+       timeout = active ? lnet_get_lnd_timeout() :
                            lnet_acceptor_timeout();
 
        rc = lnet_sock_read(sock, &hello->kshm_magic,
@@ -1828,11 +1864,11 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
 
         *incarnation = hello->kshm_src_incarnation;
 
-        if (hello->kshm_src_nid == LNET_NID_ANY) {
-                CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
-                      "from %pI4h\n", &conn->ksnc_ipaddr);
-                return -EPROTO;
-        }
+       if (hello->kshm_src_nid == LNET_NID_ANY) {
+               CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
+                      &conn->ksnc_ipaddr);
+               return -EPROTO;
+       }
 
         if (!active &&
             conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
@@ -1888,7 +1924,7 @@ ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
 static int
 ksocknal_connect(struct ksock_route *route)
 {
-       struct list_head zombies = LIST_HEAD_INIT(zombies);
+       LIST_HEAD(zombies);
        struct ksock_peer_ni *peer_ni = route->ksnr_peer;
         int               type;
         int               wanted;
@@ -1897,7 +1933,7 @@ ksocknal_connect(struct ksock_route *route)
         int               retry_later = 0;
         int               rc = 0;
 
-       deadline = ktime_get_seconds() + *ksocknal_tunables.ksnd_timeout;
+       deadline = ktime_get_seconds() + lnet_get_lnd_timeout();
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
@@ -1949,11 +1985,14 @@ ksocknal_connect(struct ksock_route *route)
                         goto failed;
                 }
 
-                rc = lnet_connect(&sock, peer_ni->ksnp_id.nid,
-                                  route->ksnr_myipaddr,
-                                  route->ksnr_ipaddr, route->ksnr_port);
-                if (rc != 0)
-                        goto failed;
+               sock = lnet_connect(peer_ni->ksnp_id.nid,
+                                   route->ksnr_myiface,
+                                   route->ksnr_ipaddr, route->ksnr_port,
+                                   peer_ni->ksnp_ni->ni_net_ns);
+               if (IS_ERR(sock)) {
+                       rc = PTR_ERR(sock);
+                       goto failed;
+               }
 
                 rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
                 if (rc < 0) {
@@ -2176,8 +2215,6 @@ ksocknal_connd(void *arg)
        int nloops = 0;
        int cons_retry = 0;
 
-       cfs_block_allsigs();
-
        init_waitqueue_entry(&wait, current);
 
        spin_lock_bh(connd_lock);
@@ -2269,7 +2306,6 @@ ksocknal_connd(void *arg)
                nloops = 0;
                schedule_timeout(timeout);
 
-               set_current_state(TASK_RUNNING);
                remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
                spin_lock_bh(connd_lock);
        }
@@ -2370,7 +2406,7 @@ static inline void
 ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
 {
        struct ksock_tx *tx;
-       struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
+       LIST_HEAD(stale_txs);
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
@@ -2383,8 +2419,7 @@ ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
 
                tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
 
-               list_del(&tx->tx_list);
-               list_add_tail(&tx->tx_list, &stale_txs);
+               list_move_tail(&tx->tx_list, &stale_txs);
        }
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2457,62 +2492,65 @@ __must_hold(&ksocknal_data.ksnd_global_lock)
 static void
 ksocknal_check_peer_timeouts(int idx)
 {
-       struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+       struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
        struct ksock_peer_ni *peer_ni;
        struct ksock_conn *conn;
        struct ksock_tx *tx;
 
  again:
-        /* NB. We expect to have a look at all the peers and not find any
-         * connections to time out, so we just use a shared lock while we
-         * take a look... */
+       /* NB. We expect to have a look at all the peers and not find any
+        * connections to time out, so we just use a shared lock while we
+        * take a look...
+        */
        read_lock(&ksocknal_data.ksnd_global_lock);
 
-       list_for_each_entry(peer_ni, peers, ksnp_list) {
+       hlist_for_each_entry(peer_ni, peers, ksnp_list) {
                struct ksock_tx *tx_stale;
                time64_t deadline = 0;
                int resid = 0;
                int n = 0;
 
-                if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
+               if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
                        read_unlock(&ksocknal_data.ksnd_global_lock);
-                        goto again;
-                }
+                       goto again;
+               }
 
-                conn = ksocknal_find_timed_out_conn (peer_ni);
+               conn = ksocknal_find_timed_out_conn(peer_ni);
 
-                if (conn != NULL) {
+               if (conn != NULL) {
                        read_unlock(&ksocknal_data.ksnd_global_lock);
 
-                        ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+                       ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
 
-                        /* NB we won't find this one again, but we can't
-                         * just proceed with the next peer_ni, since we dropped
-                         * ksnd_global_lock and it might be dead already! */
-                        ksocknal_conn_decref(conn);
-                        goto again;
-                }
+                       /* NB we won't find this one again, but we can't
+                        * just proceed with the next peer_ni, since we dropped
+                        * ksnd_global_lock and it might be dead already!
+                        */
+                       ksocknal_conn_decref(conn);
+                       goto again;
+               }
 
-                /* we can't process stale txs right here because we're
-                 * holding only shared lock */
+               /* we can't process stale txs right here because we're
+                * holding only shared lock
+                */
                if (!list_empty(&peer_ni->ksnp_tx_queue)) {
                        struct ksock_tx *tx;
 
                        tx = list_entry(peer_ni->ksnp_tx_queue.next,
                                        struct ksock_tx, tx_list);
                        if (ktime_get_seconds() >= tx->tx_deadline) {
-                                ksocknal_peer_addref(peer_ni);
+                               ksocknal_peer_addref(peer_ni);
                                read_unlock(&ksocknal_data.ksnd_global_lock);
 
-                                ksocknal_flush_stale_txs(peer_ni);
+                               ksocknal_flush_stale_txs(peer_ni);
 
-                                ksocknal_peer_decref(peer_ni);
-                                goto again;
-                        }
-                }
+                               ksocknal_peer_decref(peer_ni);
+                               goto again;
+                       }
+               }
 
                if (list_empty(&peer_ni->ksnp_zc_req_list))
-                        continue;
+                       continue;
 
                tx_stale = NULL;
                spin_lock(&peer_ni->ksnp_lock);
@@ -2560,16 +2598,13 @@ int ksocknal_reaper(void *arg)
        wait_queue_entry_t wait;
        struct ksock_conn *conn;
        struct ksock_sched *sched;
-       struct list_head enomem_conns;
+       LIST_HEAD(enomem_conns);
        int nenomem_conns;
        time64_t timeout;
        int i;
        int peer_index = 0;
        time64_t deadline = ktime_get_seconds();
 
-        cfs_block_allsigs ();
-
-       INIT_LIST_HEAD(&enomem_conns);
        init_waitqueue_entry(&wait, current);
 
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2599,14 +2634,11 @@ int ksocknal_reaper(void *arg)
                        ksocknal_destroy_conn(conn);
 
                        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
-                        continue;
-                }
+                       continue;
+               }
 
-               if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
-                       list_add(&enomem_conns,
-                                     &ksocknal_data.ksnd_enomem_conns);
-                       list_del_init(&ksocknal_data.ksnd_enomem_conns);
-                }
+               list_splice_init(&ksocknal_data.ksnd_enomem_conns,
+                                &enomem_conns);
 
                spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2631,33 +2663,35 @@ int ksocknal_reaper(void *arg)
                         nenomem_conns++;
                 }
 
-                /* careful with the jiffy wrap... */
+               /* careful with the jiffy wrap... */
                while ((timeout = deadline - ktime_get_seconds()) <= 0) {
-                        const int n = 4;
-                        const int p = 1;
-                        int       chunk = ksocknal_data.ksnd_peer_hash_size;
-
-                        /* Time to check for timeouts on a few more peers: I do
-                         * checks every 'p' seconds on a proportion of the peer_ni
-                         * table and I need to check every connection 'n' times
-                         * within a timeout interval, to ensure I detect a
-                         * timeout on any connection within (n+1)/n times the
-                         * timeout interval. */
-
-                        if (*ksocknal_tunables.ksnd_timeout > n * p)
-                                chunk = (chunk * n * p) /
-                                        *ksocknal_tunables.ksnd_timeout;
-                        if (chunk == 0)
-                                chunk = 1;
-
-                        for (i = 0; i < chunk; i++) {
-                                ksocknal_check_peer_timeouts (peer_index);
-                                peer_index = (peer_index + 1) %
-                                             ksocknal_data.ksnd_peer_hash_size;
-                        }
+                       const int n = 4;
+                       const int p = 1;
+                       int  chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
+                       unsigned int lnd_timeout;
+
+                       /* Time to check for timeouts on a few more peers: I
+                        * do checks every 'p' seconds on a proportion of the
+                        * peer_ni table and I need to check every connection
+                        * 'n' times within a timeout interval, to ensure I
+                        * detect a timeout on any connection within (n+1)/n
+                        * times the timeout interval.
+                        */
+
+                       lnd_timeout = lnet_get_lnd_timeout();
+                       if (lnd_timeout > n * p)
+                               chunk = (chunk * n * p) / lnd_timeout;
+                       if (chunk == 0)
+                               chunk = 1;
+
+                       for (i = 0; i < chunk; i++) {
+                               ksocknal_check_peer_timeouts(peer_index);
+                               peer_index = (peer_index + 1) %
+                                       HASH_SIZE(ksocknal_data.ksnd_peers);
+                       }
 
                        deadline += p;
-                }
+               }
 
                 if (nenomem_conns != 0) {
                         /* Reduce my timeout if I rescheduled ENOMEM conns.