* Author: Phil Schwan <phil@clusterfs.com>
* Author: Eric Barton <eric@bartonsoftware.com>
*
- * This file is part of Lustre, https://wiki.hpdd.intel.com/
+ * This file is part of Lustre, https://wiki.whamcloud.com/
*
* Portals is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <libcfs/linux/linux-mem.h>
#include "socklnd.h"
+#include <linux/sunrpc/addr.h>
struct ksock_tx *
ksocknal_alloc_tx(int type, int size)
/* searching for a noop tx in free list */
spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
- struct ksock_tx, tx_list);
+ tx = list_first_entry_or_null(&ksocknal_data.ksnd_idle_noop_txs,
+ struct ksock_tx, tx_list);
+ if (tx) {
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
if (tx == NULL)
return NULL;
- atomic_set(&tx->tx_refcount, 1);
+ refcount_set(&tx->tx_refcount, 1);
tx->tx_zc_aborted = 0;
tx->tx_zc_capable = 0;
tx->tx_zc_checked = 0;
+ tx->tx_hstatus = LNET_MSG_STATUS_OK;
tx->tx_desc_size = size;
atomic_inc(&ksocknal_data.ksnd_nactive_txs);
{
struct ksock_tx *tx;
- tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
- if (tx == NULL) {
- CERROR("Can't allocate noop tx desc\n");
- return NULL;
- }
+ tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
+ if (tx == NULL) {
+ CERROR("Can't allocate noop tx desc\n");
+ return NULL;
+ }
- tx->tx_conn = NULL;
- tx->tx_lnetmsg = NULL;
- tx->tx_kiov = NULL;
- tx->tx_nkiov = 0;
- tx->tx_iov = tx->tx_frags.virt.iov;
- tx->tx_niov = 1;
- tx->tx_nonblk = nonblk;
+ tx->tx_conn = NULL;
+ tx->tx_lnetmsg = NULL;
+ tx->tx_kiov = NULL;
+ tx->tx_nkiov = 0;
+ tx->tx_niov = 1;
+ tx->tx_nonblk = nonblk;
tx->tx_msg.ksm_csum = 0;
tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
tx->tx_msg.ksm_zc_cookies[0] = 0;
- tx->tx_msg.ksm_zc_cookies[1] = cookie;
+ tx->tx_msg.ksm_zc_cookies[1] = cookie;
- return tx;
+ return tx;
}
}
static int
-ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
+ksocknal_send_hdr(struct ksock_conn *conn, struct ksock_tx *tx,
+ struct kvec *scratch_iov)
{
- struct kvec *iov = tx->tx_iov;
- int nob;
- int rc;
+ struct kvec *iov = &tx->tx_hdr;
+ int nob;
+ int rc;
- LASSERT (tx->tx_niov > 0);
+ LASSERT(tx->tx_niov > 0);
- /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
- rc = ksocknal_lib_send_iov(conn, tx);
+ /* Never touch tx->tx_hdr inside ksocknal_lib_send_hdr() */
+ rc = ksocknal_lib_send_hdr(conn, tx, scratch_iov);
- if (rc <= 0) /* sent nothing? */
- return (rc);
+ if (rc <= 0) /* sent nothing? */
+ return rc;
- nob = rc;
- LASSERT (nob <= tx->tx_resid);
- tx->tx_resid -= nob;
+ nob = rc;
+ LASSERT(nob <= tx->tx_resid);
+ tx->tx_resid -= nob;
- /* "consume" iov */
- do {
- LASSERT (tx->tx_niov > 0);
+ /* "consume" iov */
+ LASSERT(tx->tx_niov == 1);
- if (nob < (int) iov->iov_len) {
- iov->iov_base += nob;
- iov->iov_len -= nob;
- return (rc);
- }
+ if (nob < (int) iov->iov_len) {
+ iov->iov_base += nob;
+ iov->iov_len -= nob;
+ return rc;
+ }
- nob -= iov->iov_len;
- tx->tx_iov = ++iov;
- tx->tx_niov--;
- } while (nob != 0);
+ LASSERT(nob == iov->iov_len);
+ tx->tx_niov--;
- return (rc);
+ return rc;
}
static int
-ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
+ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
+ struct kvec *scratch_iov)
{
- lnet_kiov_t *kiov = tx->tx_kiov;
+ struct bio_vec *kiov = tx->tx_kiov;
int nob;
int rc;
- LASSERT (tx->tx_niov == 0);
- LASSERT (tx->tx_nkiov > 0);
+ LASSERT(tx->tx_niov == 0);
+ LASSERT(tx->tx_nkiov > 0);
- /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
- rc = ksocknal_lib_send_kiov(conn, tx);
+ /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
+ rc = ksocknal_lib_send_kiov(conn, tx, scratch_iov);
- if (rc <= 0) /* sent nothing? */
- return (rc);
+ if (rc <= 0) /* sent nothing? */
+ return rc;
- nob = rc;
- LASSERT (nob <= tx->tx_resid);
- tx->tx_resid -= nob;
+ nob = rc;
+ LASSERT(nob <= tx->tx_resid);
+ tx->tx_resid -= nob;
- /* "consume" kiov */
- do {
- LASSERT(tx->tx_nkiov > 0);
+ /* "consume" kiov */
+ do {
+ LASSERT(tx->tx_nkiov > 0);
- if (nob < (int)kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
- return rc;
- }
+ if (nob < (int)kiov->bv_len) {
+ kiov->bv_offset += nob;
+ kiov->bv_len -= nob;
+ return rc;
+ }
- nob -= (int)kiov->kiov_len;
- tx->tx_kiov = ++kiov;
- tx->tx_nkiov--;
- } while (nob != 0);
+ nob -= (int)kiov->bv_len;
+ tx->tx_kiov = ++kiov;
+ tx->tx_nkiov--;
+ } while (nob != 0);
- return (rc);
+ return rc;
}
static int
-ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
+ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
+ struct kvec *scratch_iov)
{
int rc;
int bufnob;
- if (ksocknal_data.ksnd_stall_tx != 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
- }
+ if (ksocknal_data.ksnd_stall_tx != 0)
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
LASSERT(tx->tx_resid != 0);
- rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
- LASSERT (conn->ksnc_closing);
- return (-ESHUTDOWN);
- }
+ rc = ksocknal_connsock_addref(conn);
+ if (rc != 0) {
+ LASSERT(conn->ksnc_closing);
+ return -ESHUTDOWN;
+ }
- do {
- if (ksocknal_data.ksnd_enomem_tx > 0) {
- /* testing... */
- ksocknal_data.ksnd_enomem_tx--;
- rc = -EAGAIN;
- } else if (tx->tx_niov != 0) {
- rc = ksocknal_send_iov (conn, tx);
- } else {
- rc = ksocknal_send_kiov (conn, tx);
- }
+ do {
+ if (ksocknal_data.ksnd_enomem_tx > 0) {
+ /* testing... */
+ ksocknal_data.ksnd_enomem_tx--;
+ rc = -EAGAIN;
+ } else if (tx->tx_niov != 0) {
+ rc = ksocknal_send_hdr(conn, tx, scratch_iov);
+ } else {
+ rc = ksocknal_send_kiov(conn, tx, scratch_iov);
+ }
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
- if (rc > 0) /* sent something? */
- conn->ksnc_tx_bufnob += rc; /* account it */
+ if (rc > 0) /* sent something? */
+ conn->ksnc_tx_bufnob += rc; /* account it */
if (bufnob < conn->ksnc_tx_bufnob) {
/* allocated send buffer bytes < computed; infer
* something got ACKed */
conn->ksnc_tx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ ksocknal_timeout();
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = bufnob;
smp_mb();
}
if (rc <= 0) { /* Didn't write anything? */
+ /* some stacks return 0 instead of -EAGAIN */
+ if (rc == 0)
+ rc = -EAGAIN;
- if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
- rc = -EAGAIN;
-
- /* Check if EAGAIN is due to memory pressure */
- if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
- rc = -ENOMEM;
+ /* Check if EAGAIN is due to memory pressure */
+ if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
+ rc = -ENOMEM;
- break;
- }
+ break;
+ }
- /* socket's wmem_queued now includes 'rc' bytes */
+ /* socket's wmem_queued now includes 'rc' bytes */
atomic_sub (rc, &conn->ksnc_tx_nob);
- rc = 0;
+ rc = 0;
- } while (tx->tx_resid != 0);
+ } while (tx->tx_resid != 0);
- ksocknal_connsock_decref(conn);
- return (rc);
+ ksocknal_connsock_decref(conn);
+ return rc;
}
static int
-ksocknal_recv_iov(struct ksock_conn *conn)
+ksocknal_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov)
{
struct kvec *iov = conn->ksnc_rx_iov;
- int nob;
- int rc;
+ int nob;
+ int rc;
- LASSERT (conn->ksnc_rx_niov > 0);
+ LASSERT(conn->ksnc_rx_niov > 0);
/* Never touch conn->ksnc_rx_iov or change connection
- * status inside ksocknal_lib_recv_iov */
- rc = ksocknal_lib_recv_iov(conn);
+ * status inside ksocknal_lib_recv_iov */
+ rc = ksocknal_lib_recv_iov(conn, scratchiov);
- if (rc <= 0)
- return (rc);
+ if (rc <= 0)
+ return rc;
- /* received something... */
- nob = rc;
+ /* received something... */
+ nob = rc;
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_rx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ ksocknal_timeout();
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
conn->ksnc_rx_nob_wanted -= nob;
conn->ksnc_rx_nob_left -= nob;
- do {
- LASSERT (conn->ksnc_rx_niov > 0);
+ do {
+ LASSERT(conn->ksnc_rx_niov > 0);
- if (nob < (int)iov->iov_len) {
- iov->iov_len -= nob;
+ if (nob < (int)iov->iov_len) {
+ iov->iov_len -= nob;
iov->iov_base += nob;
- return (-EAGAIN);
- }
+ return -EAGAIN;
+ }
- nob -= iov->iov_len;
- conn->ksnc_rx_iov = ++iov;
- conn->ksnc_rx_niov--;
- } while (nob != 0);
+ nob -= iov->iov_len;
+ conn->ksnc_rx_iov = ++iov;
+ conn->ksnc_rx_niov--;
+ } while (nob != 0);
- return (rc);
+ return rc;
}
static int
-ksocknal_recv_kiov(struct ksock_conn *conn)
+ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
+ struct kvec *scratch_iov)
{
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ struct bio_vec *kiov = conn->ksnc_rx_kiov;
int nob;
int rc;
- LASSERT (conn->ksnc_rx_nkiov > 0);
+ LASSERT(conn->ksnc_rx_nkiov > 0);
/* Never touch conn->ksnc_rx_kiov or change connection
- * status inside ksocknal_lib_recv_iov */
- rc = ksocknal_lib_recv_kiov(conn);
+ * status inside ksocknal_lib_recv_iov */
+ rc = ksocknal_lib_recv_kiov(conn, rx_scratch_pgs, scratch_iov);
- if (rc <= 0)
- return (rc);
+ if (rc <= 0)
+ return rc;
- /* received something... */
- nob = rc;
+ /* received something... */
+ nob = rc;
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_rx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ ksocknal_timeout();
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
conn->ksnc_rx_nob_wanted -= nob;
conn->ksnc_rx_nob_left -= nob;
- do {
- LASSERT (conn->ksnc_rx_nkiov > 0);
+ do {
+ LASSERT(conn->ksnc_rx_nkiov > 0);
- if (nob < (int) kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
- return -EAGAIN;
- }
+ if (nob < (int) kiov->bv_len) {
+ kiov->bv_offset += nob;
+ kiov->bv_len -= nob;
+ return -EAGAIN;
+ }
- nob -= kiov->kiov_len;
- conn->ksnc_rx_kiov = ++kiov;
- conn->ksnc_rx_nkiov--;
- } while (nob != 0);
+ nob -= kiov->bv_len;
+ conn->ksnc_rx_kiov = ++kiov;
+ conn->ksnc_rx_nkiov--;
+ } while (nob != 0);
- return 1;
+ return 1;
}
static int
-ksocknal_receive(struct ksock_conn *conn)
+ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs,
+ struct kvec *scratch_iov)
{
- /* Return 1 on success, 0 on EOF, < 0 on error.
- * Caller checks ksnc_rx_nob_wanted to determine
- * progress/completion. */
- int rc;
- ENTRY;
+ /* Return 1 on success, 0 on EOF, < 0 on error.
+ * Caller checks ksnc_rx_nob_wanted to determine
+ * progress/completion. */
+ int rc;
+ ENTRY;
+
+ if (ksocknal_data.ksnd_stall_rx != 0)
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
- if (ksocknal_data.ksnd_stall_rx != 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
+ rc = ksocknal_connsock_addref(conn);
+ if (rc != 0) {
+ LASSERT(conn->ksnc_closing);
+ return -ESHUTDOWN;
}
- rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
- LASSERT (conn->ksnc_closing);
- return (-ESHUTDOWN);
- }
+ for (;;) {
+ if (conn->ksnc_rx_niov != 0)
+ rc = ksocknal_recv_iov(conn, scratch_iov);
+ else
+ rc = ksocknal_recv_kiov(conn, rx_scratch_pgs,
+ scratch_iov);
- for (;;) {
- if (conn->ksnc_rx_niov != 0)
- rc = ksocknal_recv_iov (conn);
- else
- rc = ksocknal_recv_kiov (conn);
-
- if (rc <= 0) {
- /* error/EOF or partial receive */
- if (rc == -EAGAIN) {
- rc = 1;
- } else if (rc == 0 && conn->ksnc_rx_started) {
- /* EOF in the middle of a message */
- rc = -EPROTO;
- }
- break;
- }
+ if (rc <= 0) {
+ /* error/EOF or partial receive */
+ if (rc == -EAGAIN) {
+ rc = 1;
+ } else if (rc == 0 && conn->ksnc_rx_started) {
+ /* EOF in the middle of a message */
+ rc = -EPROTO;
+ }
+ break;
+ }
- /* Completed a fragment */
+ /* Completed a fragment */
- if (conn->ksnc_rx_nob_wanted == 0) {
- rc = 1;
- break;
- }
- }
+ if (conn->ksnc_rx_nob_wanted == 0) {
+ rc = 1;
+ break;
+ }
+ }
- ksocknal_connsock_decref(conn);
- RETURN (rc);
+ ksocknal_connsock_decref(conn);
+ RETURN(rc);
}
void
ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
{
struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
+ enum lnet_msg_hstatus hstatus = tx->tx_hstatus;
ENTRY;
LASSERT(ni != NULL || tx->tx_conn != NULL);
- if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted))
+ if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) {
rc = -EIO;
+ if (hstatus == LNET_MSG_STATUS_OK)
+ hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ }
if (tx->tx_conn != NULL)
ksocknal_conn_decref(tx->tx_conn);
ksocknal_free_tx(tx);
- if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
+ if (lnetmsg != NULL) { /* KSOCK_MSG_NOOP go without lnetmsg */
+ lnetmsg->msg_health_status = hstatus;
lnet_finalize(lnetmsg, rc);
+ }
EXIT;
}
{
struct ksock_tx *tx;
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, struct ksock_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(txlist, struct ksock_tx,
+ tx_list)) != NULL) {
if (error && tx->tx_lnetmsg != NULL) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
list_del(&tx->tx_list);
- LASSERT(atomic_read(&tx->tx_refcount) == 1);
+ if (tx->tx_hstatus == LNET_MSG_STATUS_OK) {
+ if (error == -ETIMEDOUT)
+ tx->tx_hstatus =
+ LNET_MSG_STATUS_LOCAL_TIMEOUT;
+ else if (error == -ENETDOWN ||
+ error == -EHOSTUNREACH ||
+ error == -ENETUNREACH ||
+ error == -ECONNREFUSED ||
+ error == -ECONNRESET)
+ tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
+ /*
+ * for all other errors we don't want to
+ * retransmit
+ */
+ else if (error)
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ }
+
+ LASSERT(refcount_read(&tx->tx_refcount) == 1);
ksocknal_tx_done(ni, tx, error);
}
}
/* ZC_REQ is going to be pinned to the peer_ni */
tx->tx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ ksocknal_timeout();
LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
}
static int
-ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
+ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
+ struct kvec *scratch_iov)
{
int rc;
+ bool error_sim = false;
+
+ if (lnet_send_error_simulation(tx->tx_lnetmsg, &tx->tx_hstatus)) {
+ error_sim = true;
+ rc = -EINVAL;
+ goto simulate_error;
+ }
- if (tx->tx_zc_capable && !tx->tx_zc_checked)
- ksocknal_check_zc_req(tx);
+ if (tx->tx_zc_capable && !tx->tx_zc_checked)
+ ksocknal_check_zc_req(tx);
- rc = ksocknal_transmit (conn, tx);
+ rc = ksocknal_transmit(conn, tx, scratch_iov);
- CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
+ CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
- if (tx->tx_resid == 0) {
- /* Sent everything OK */
- LASSERT (rc == 0);
+ if (tx->tx_resid == 0) {
+ /* Sent everything OK */
+ LASSERT(rc == 0);
- return (0);
- }
+ return 0;
+ }
- if (rc == -EAGAIN)
- return (rc);
+ if (rc == -EAGAIN)
+ return rc;
- if (rc == -ENOMEM) {
- static int counter;
+ if (rc == -ENOMEM) {
+ static int counter;
- counter++; /* exponential backoff warnings */
- if ((counter & (-counter)) == counter)
- CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, atomic_read(&libcfs_kmemory));
+ counter++; /* exponential backoff warnings */
+ if ((counter & (-counter)) == counter)
+ CWARN("%u ENOMEM tx %p (%lld allocated)\n",
+ counter, conn, libcfs_kmem_read());
- /* Queue on ksnd_enomem_conns for retry after a timeout */
+ /* Queue on ksnd_enomem_conns for retry after a timeout */
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- /* enomem list takes over scheduler's ref... */
- LASSERT (conn->ksnc_tx_scheduled);
+ /* enomem list takes over scheduler's ref... */
+ LASSERT(conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list,
- &ksocknal_data.ksnd_enomem_conns);
+ &ksocknal_data.ksnd_enomem_conns);
if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY <
ksocknal_data.ksnd_reaper_waketime)
wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ /*
+ * set the health status of the message which determines
+ * whether we should retry the transmit
+ */
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
return (rc);
}
- /* Actual error */
- LASSERT (rc < 0);
+simulate_error:
- if (!conn->ksnc_closing) {
- switch (rc) {
- case -ECONNRESET:
- LCONSOLE_WARN("Host %pI4h reset our connection "
- "while we were sending data; it may have "
- "rebooted.\n",
- &conn->ksnc_ipaddr);
- break;
- default:
- LCONSOLE_WARN("There was an unexpected network error "
- "while writing to %pI4h: %d.\n",
- &conn->ksnc_ipaddr, rc);
- break;
- }
- CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
+ /* Actual error */
+ LASSERT(rc < 0);
+
+ if (!error_sim) {
+ /*
+ * set the health status of the message which determines
+ * whether we should retry the transmit
+ */
+ if (rc == -ETIMEDOUT)
+ tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
+ else
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ }
+
+ if (!conn->ksnc_closing) {
+ switch (rc) {
+ case -ECONNRESET:
+ LCONSOLE_WARN("Host %pIS reset our connection while we were sending data; it may have rebooted.\n",
+ &conn->ksnc_peeraddr);
+ break;
+ default:
+ LCONSOLE_WARN("There was an unexpected network error while writing to %pIS: %d.\n",
+ &conn->ksnc_peeraddr, rc);
+ break;
+ }
+ CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pISp\n",
conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port);
- }
+ &conn->ksnc_peeraddr);
+ }
- if (tx->tx_zc_checked)
- ksocknal_uncheck_zc_req(tx);
+ if (tx->tx_zc_checked)
+ ksocknal_uncheck_zc_req(tx);
- /* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings (conn,
- (conn->ksnc_closing) ? 0 : rc);
+ /* it's not an error if conn is being closed */
+ ksocknal_close_conn_and_siblings(conn,
+ (conn->ksnc_closing) ? 0 : rc);
- return (rc);
+ return rc;
}
static void
-ksocknal_launch_connection_locked(struct ksock_route *route)
+ksocknal_launch_connection_locked(struct ksock_conn_cb *conn_cb)
{
+ /* called holding write lock on ksnd_global_lock */
- /* called holding write lock on ksnd_global_lock */
+ LASSERT(!conn_cb->ksnr_scheduled);
+ LASSERT(!conn_cb->ksnr_connecting);
+ LASSERT((ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected) != 0);
- LASSERT (!route->ksnr_scheduled);
- LASSERT (!route->ksnr_connecting);
- LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
+ /* scheduling conn for connd */
+ conn_cb->ksnr_scheduled = 1;
- route->ksnr_scheduled = 1; /* scheduling conn for connd */
- ksocknal_route_addref(route); /* extra ref for connd */
+ /* extra ref for connd */
+ ksocknal_conn_cb_addref(conn_cb);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- list_add_tail(&route->ksnr_connd_list,
- &ksocknal_data.ksnd_connd_routes);
+ list_add_tail(&conn_cb->ksnr_connd_list,
+ &ksocknal_data.ksnd_connd_routes);
wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
void
ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
{
- struct ksock_route *route;
+ struct ksock_conn_cb *conn_cb;
- /* called holding write lock on ksnd_global_lock */
- for (;;) {
- /* launch any/all connections that need it */
- route = ksocknal_find_connectable_route_locked(peer_ni);
- if (route == NULL)
- return;
+ /* called holding write lock on ksnd_global_lock */
+ for (;;) {
+ /* launch any/all connections that need it */
+ conn_cb = ksocknal_find_connectable_conn_cb_locked(peer_ni);
+ if (conn_cb == NULL)
+ return;
- ksocknal_launch_connection_locked(route);
- }
+ ksocknal_launch_connection_locked(conn_cb);
+ }
}
struct ksock_conn *
* ksnc_sock... */
LASSERT(!conn->ksnc_closing);
- CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
+ CDEBUG(D_NET, "Sending to %s ip %pISp\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port);
+ &conn->ksnc_peeraddr);
ksocknal_tx_prep(conn, tx);
- /* Ensure the frags we've been given EXACTLY match the number of
- * bytes we want to send. Many TCP/IP stacks disregard any total
+ /* Ensure the frags we've been given EXACTLY match the number of
+ * bytes we want to send. Many TCP/IP stacks disregard any total
* size parameters passed to them and just look at the frags.
- *
- * We always expect at least 1 mapped fragment containing the
- * complete ksocknal message header. */
- LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
- lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
- (unsigned int)tx->tx_nob);
- LASSERT (tx->tx_niov >= 1);
- LASSERT (tx->tx_resid == tx->tx_nob);
+ *
+ * We always expect at least 1 mapped fragment containing the
+ * complete ksocknal message header.
+ */
+ LASSERT(lnet_iov_nob(tx->tx_niov, &tx->tx_hdr) +
+ lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
+ (unsigned int)tx->tx_nob);
+ LASSERT(tx->tx_niov >= 1);
+ LASSERT(tx->tx_resid == tx->tx_nob);
CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ ksocknal_timeout();
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = 0;
}
-struct ksock_route *
-ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni)
+struct ksock_conn_cb *
+ksocknal_find_connectable_conn_cb_locked(struct ksock_peer_ni *peer_ni)
{
time64_t now = ktime_get_seconds();
- struct list_head *tmp;
- struct ksock_route *route;
-
- list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
-
- LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
-
- if (route->ksnr_scheduled) /* connections being established */
- continue;
-
- /* all route types connected ? */
- if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
- continue;
-
- if (!(route->ksnr_retry_interval == 0 || /* first attempt */
- now >= route->ksnr_timeout)) {
- CDEBUG(D_NET,
- "Too soon to retry route %pI4h "
- "(cnted %d, interval %lld, %lld secs later)\n",
- &route->ksnr_ipaddr,
- route->ksnr_connected,
- route->ksnr_retry_interval,
- route->ksnr_timeout - now);
- continue;
- }
-
- return (route);
- }
+ struct ksock_conn_cb *conn_cb;
+
+ conn_cb = peer_ni->ksnp_conn_cb;
+ if (!conn_cb)
+ return NULL;
+
+ LASSERT(!conn_cb->ksnr_connecting || conn_cb->ksnr_scheduled);
+
+ if (conn_cb->ksnr_scheduled) /* connections being established */
+ return NULL;
+
+ /* all conn types connected ? */
+ if ((ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected) == 0)
+ return NULL;
+
+ if (!(conn_cb->ksnr_retry_interval == 0 || /* first attempt */
+ now >= conn_cb->ksnr_timeout)) {
+ CDEBUG(D_NET,
+ "Too soon to retry route %pIS (cnted %d, interval %lld, %lld secs later)\n",
+ &conn_cb->ksnr_addr,
+ conn_cb->ksnr_connected,
+ conn_cb->ksnr_retry_interval,
+ conn_cb->ksnr_timeout - now);
+ return NULL;
+ }
- return (NULL);
+ return conn_cb;
}
-struct ksock_route *
-ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni)
+struct ksock_conn_cb *
+ksocknal_find_connecting_conn_cb_locked(struct ksock_peer_ni *peer_ni)
{
- struct list_head *tmp;
- struct ksock_route *route;
+ struct ksock_conn_cb *conn_cb;
- list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, struct ksock_route, ksnr_list);
+ conn_cb = peer_ni->ksnp_conn_cb;
+ if (!conn_cb)
+ return NULL;
- LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
+ LASSERT(!conn_cb->ksnr_connecting || conn_cb->ksnr_scheduled);
- if (route->ksnr_scheduled)
- return (route);
- }
-
- return (NULL);
+ return conn_cb->ksnr_scheduled ? conn_cb : NULL;
}
int
{
struct ksock_peer_ni *peer_ni;
struct ksock_conn *conn;
+ struct sockaddr_in sa;
rwlock_t *g_lock;
int retry;
int rc;
- LASSERT (tx->tx_conn == NULL);
+ LASSERT(tx->tx_conn == NULL);
- g_lock = &ksocknal_data.ksnd_global_lock;
+ g_lock = &ksocknal_data.ksnd_global_lock;
- for (retry = 0;; retry = 1) {
+ for (retry = 0;; retry = 1) {
read_lock(g_lock);
- peer_ni = ksocknal_find_peer_locked(ni, id);
- if (peer_ni != NULL) {
- if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) {
- conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
- if (conn != NULL) {
- /* I've got no routes that need to be
- * connecting and I do have an actual
- * connection... */
+ peer_ni = ksocknal_find_peer_locked(ni, id);
+ if (peer_ni != NULL) {
+ if (ksocknal_find_connectable_conn_cb_locked(peer_ni) == NULL) {
+ conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
+ if (conn != NULL) {
+ /* I've got nothing that need to be
+ * connecting and I do have an actual
+ * connection...
+ */
ksocknal_queue_tx_locked (tx, conn);
read_unlock(g_lock);
return (0);
- }
- }
- }
+ }
+ }
+ }
/* I'll need a write lock... */
read_unlock(g_lock);
return -EHOSTUNREACH;
}
- rc = ksocknal_add_peer(ni, id,
- LNET_NIDADDR(id.nid),
- lnet_acceptor_port());
+ memset(&sa, 0, sizeof(sa));
+ sa.sin_family = AF_INET;
+ sa.sin_addr.s_addr = htonl(LNET_NIDADDR(id.nid));
+ sa.sin_port = htons(lnet_acceptor_port());
+ rc = ksocknal_add_peer(ni, id, (struct sockaddr *)&sa);
if (rc != 0) {
CERROR("Can't add peer_ni %s: %d\n",
libcfs_id2str(id), rc);
return (0);
}
- if (peer_ni->ksnp_accepting > 0 ||
- ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
+ if (peer_ni->ksnp_accepting > 0 ||
+ ksocknal_find_connecting_conn_cb_locked(peer_ni) != NULL) {
/* the message is going to be pinned to the peer_ni */
tx->tx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ ksocknal_timeout();
/* Queue the message until a connection is established */
list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
write_unlock_bh(g_lock);
return 0;
- }
+ }
write_unlock_bh(g_lock);
/* NB Routes may be ignored if connections to them failed recently */
CNETERR("No usable routes to %s\n", libcfs_id2str(id));
+ tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
return (-EHOSTUNREACH);
}
int
ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
{
- int mpflag = 1;
+ /* '1' for consistency with code that checks !mpflag to restore */
+ unsigned int mpflag = 1;
int type = lntmsg->msg_type;
struct lnet_process_id target = lntmsg->msg_target;
- unsigned int payload_niov = lntmsg->msg_niov;
- struct kvec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
+ unsigned int payload_niov = lntmsg->msg_niov;
+ struct bio_vec *payload_kiov = lntmsg->msg_kiov;
+ unsigned int payload_offset = lntmsg->msg_offset;
+ unsigned int payload_nob = lntmsg->msg_len;
struct ksock_tx *tx;
- int desc_size;
- int rc;
+ int desc_size;
+ int rc;
/* NB 'private' is different depending on what we're sending.
* Just ignore it... */
LASSERT (payload_nob == 0 || payload_niov > 0);
LASSERT (payload_niov <= LNET_MAX_IOV);
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
LASSERT (!in_interrupt ());
- if (payload_iov != NULL)
- desc_size = offsetof(struct ksock_tx,
- tx_frags.virt.iov[1 + payload_niov]);
- else
- desc_size = offsetof(struct ksock_tx,
- tx_frags.paged.kiov[payload_niov]);
+ desc_size = offsetof(struct ksock_tx,
+ tx_payload[payload_niov]);
if (lntmsg->msg_vmflush)
- mpflag = cfs_memory_pressure_get_and_set();
- tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
- if (tx == NULL) {
- CERROR("Can't allocate tx desc type %d size %d\n",
- type, desc_size);
- if (lntmsg->msg_vmflush)
- cfs_memory_pressure_restore(mpflag);
- return (-ENOMEM);
- }
+ mpflag = memalloc_noreclaim_save();
- tx->tx_conn = NULL; /* set when assigned a conn */
- tx->tx_lnetmsg = lntmsg;
-
- if (payload_iov != NULL) {
- tx->tx_kiov = NULL;
- tx->tx_nkiov = 0;
- tx->tx_iov = tx->tx_frags.virt.iov;
- tx->tx_niov = 1 +
- lnet_extract_iov(payload_niov, &tx->tx_iov[1],
- payload_niov, payload_iov,
- payload_offset, payload_nob);
- } else {
- tx->tx_niov = 1;
- tx->tx_iov = &tx->tx_frags.paged.iov;
- tx->tx_kiov = tx->tx_frags.paged.kiov;
- tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
-
- if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
- tx->tx_zc_capable = 1;
- }
+ tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
+ if (tx == NULL) {
+ CERROR("Can't allocate tx desc type %d size %d\n",
+ type, desc_size);
+ if (lntmsg->msg_vmflush)
+ memalloc_noreclaim_restore(mpflag);
+ return -ENOMEM;
+ }
+
+ tx->tx_conn = NULL; /* set when assigned a conn */
+ tx->tx_lnetmsg = lntmsg;
+
+ tx->tx_niov = 1;
+ tx->tx_kiov = tx->tx_payload;
+ tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+
+ if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
+ tx->tx_zc_capable = 1;
tx->tx_msg.ksm_csum = 0;
tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
tx->tx_msg.ksm_zc_cookies[0] = 0;
tx->tx_msg.ksm_zc_cookies[1] = 0;
- /* The first fragment will be set later in pro_pack */
- rc = ksocknal_launch_packet(ni, tx, target);
- if (!mpflag)
- cfs_memory_pressure_restore(mpflag);
+ /* The first fragment will be set later in pro_pack */
+ rc = ksocknal_launch_packet(ni, tx, target);
+ /*
+ * We can't test lntsmg->msg_vmflush again as lntmsg may
+ * have been freed.
+ */
+ if (!mpflag)
+ memalloc_noreclaim_restore(mpflag);
if (rc == 0)
return (0);
+ lntmsg->msg_health_status = tx->tx_hstatus;
ksocknal_free_tx(tx);
return (-EIO);
}
int
ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- struct task_struct *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
if (IS_ERR(task))
return PTR_ERR(task);
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ atomic_inc(&ksocknal_data.ksnd_nthreads);
return 0;
}
void
ksocknal_thread_fini (void)
{
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads--;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ if (atomic_dec_and_test(&ksocknal_data.ksnd_nthreads))
+ wake_up_var(&ksocknal_data.ksnd_nthreads);
}
int
/* Set up to skip as much as possible now. If there's more left
* (ran out of iov entries) we'll get called again */
- conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
- conn->ksnc_rx_nob_left = nob_to_skip;
+ conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
+ conn->ksnc_rx_nob_left = nob_to_skip;
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- skipped = 0;
- niov = 0;
+ skipped = 0;
+ niov = 0;
- do {
- nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
+ do {
+ nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
- conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
- conn->ksnc_rx_iov[niov].iov_len = nob;
- niov++;
- skipped += nob;
- nob_to_skip -=nob;
+ conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
+ conn->ksnc_rx_iov[niov].iov_len = nob;
+ niov++;
+ skipped += nob;
+ nob_to_skip -= nob;
- } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
+ } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
conn->ksnc_rx_niov = niov;
}
static int
-ksocknal_process_receive(struct ksock_conn *conn)
+ksocknal_process_receive(struct ksock_conn *conn,
+ struct page **rx_scratch_pgs,
+ struct kvec *scratch_iov)
{
struct lnet_hdr *lhdr;
struct lnet_process_id *id;
int rc;
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT(refcount_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
- LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
- conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
+ LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
+ conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
again:
- if (conn->ksnc_rx_nob_wanted != 0) {
- rc = ksocknal_receive(conn);
+ if (conn->ksnc_rx_nob_wanted != 0) {
+ rc = ksocknal_receive(conn, rx_scratch_pgs,
+ scratch_iov);
if (rc <= 0) {
struct lnet_process_id ksnp_id;
LASSERT(rc != -EAGAIN);
if (rc == 0)
- CDEBUG(D_NET, "[%p] EOF from %s "
- "ip %pI4h:%d\n", conn,
- libcfs_id2str(ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ CDEBUG(D_NET, "[%p] EOF from %s ip %pISp\n",
+ conn, libcfs_id2str(ksnp_id),
+ &conn->ksnc_peeraddr);
else if (!conn->ksnc_closing)
- CERROR("[%p] Error %d on read from %s "
- "ip %pI4h:%d\n", conn, rc,
- libcfs_id2str(ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ CERROR("[%p] Error %d on read from %s ip %pISp\n",
+ conn, rc, libcfs_id2str(ksnp_id),
+ &conn->ksnc_peeraddr);
/* it's not an error if conn is being closed */
ksocknal_close_conn_and_siblings (conn,
le64_to_cpu(lhdr->src_nid) != id->nid);
}
- lnet_finalize(conn->ksnc_cookie, rc);
+ if (rc && conn->ksnc_lnet_msg)
+ conn->ksnc_lnet_msg->msg_health_status =
+ LNET_MSG_STATUS_REMOTE_ERROR;
+ lnet_finalize(conn->ksnc_lnet_msg, rc);
if (rc != 0) {
ksocknal_new_packet(conn, 0);
int
ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
- int delayed, unsigned int niov, struct kvec *iov,
- lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
+ int delayed, unsigned int niov,
+ struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen)
{
struct ksock_conn *conn = private;
LASSERT (mlen <= rlen);
LASSERT (niov <= LNET_MAX_IOV);
- conn->ksnc_cookie = msg;
- conn->ksnc_rx_nob_wanted = mlen;
- conn->ksnc_rx_nob_left = rlen;
+ conn->ksnc_lnet_msg = msg;
+ conn->ksnc_rx_nob_wanted = mlen;
+ conn->ksnc_rx_nob_left = rlen;
- if (mlen == 0 || iov != NULL) {
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
- conn->ksnc_rx_niov =
- lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
- niov, iov, offset, mlen);
- } else {
- conn->ksnc_rx_niov = 0;
- conn->ksnc_rx_iov = NULL;
- conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
- conn->ksnc_rx_nkiov =
- lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
- niov, kiov, offset, mlen);
- }
+ if (mlen == 0) {
+ conn->ksnc_rx_nkiov = 0;
+ conn->ksnc_rx_kiov = NULL;
+ conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
+ conn->ksnc_rx_niov = 0;
+ } else {
+ conn->ksnc_rx_niov = 0;
+ conn->ksnc_rx_iov = NULL;
+ conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
+ conn->ksnc_rx_nkiov =
+ lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
+ niov, kiov, offset, mlen);
+ }
LASSERT (mlen ==
lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
int ksocknal_scheduler(void *arg)
{
- struct ksock_sched_info *info;
struct ksock_sched *sched;
struct ksock_conn *conn;
struct ksock_tx *tx;
int rc;
- int nloops = 0;
long id = (long)arg;
+ struct page **rx_scratch_pgs;
+ struct kvec *scratch_iov;
+
+ sched = ksocknal_data.ksnd_schedulers[KSOCK_THREAD_CPT(id)];
- info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
- sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
+ LIBCFS_CPT_ALLOC(rx_scratch_pgs, lnet_cpt_table(), sched->kss_cpt,
+ sizeof(*rx_scratch_pgs) * LNET_MAX_IOV);
+ if (!rx_scratch_pgs) {
+ CERROR("Unable to allocate scratch pages\n");
+ return -ENOMEM;
+ }
- cfs_block_allsigs();
+ LIBCFS_CPT_ALLOC(scratch_iov, lnet_cpt_table(), sched->kss_cpt,
+ sizeof(*scratch_iov) * LNET_MAX_IOV);
+ if (!scratch_iov) {
+ CERROR("Unable to allocate scratch iov\n");
+ return -ENOMEM;
+ }
- rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
+ rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
if (rc != 0) {
CWARN("Can't set CPU partition affinity to %d: %d\n",
- info->ksi_cpt, rc);
+ sched->kss_cpt, rc);
}
spin_lock_bh(&sched->kss_lock);
- while (!ksocknal_data.ksnd_shuttingdown) {
- int did_something = 0;
-
- /* Ensure I progress everything semi-fairly */
+ while (!ksocknal_data.ksnd_shuttingdown) {
+ bool did_something = false;
- if (!list_empty(&sched->kss_rx_conns)) {
- conn = list_entry(sched->kss_rx_conns.next,
- struct ksock_conn, ksnc_rx_list);
+ /* Ensure I progress everything semi-fairly */
+ conn = list_first_entry_or_null(&sched->kss_rx_conns,
+ struct ksock_conn,
+ ksnc_rx_list);
+ if (conn) {
list_del(&conn->ksnc_rx_list);
- LASSERT(conn->ksnc_rx_scheduled);
- LASSERT(conn->ksnc_rx_ready);
+ LASSERT(conn->ksnc_rx_scheduled);
+ LASSERT(conn->ksnc_rx_ready);
- /* clear rx_ready in case receive isn't complete.
- * Do it BEFORE we call process_recv, since
- * data_ready can set it any time after we release
- * kss_lock. */
- conn->ksnc_rx_ready = 0;
+ /* clear rx_ready in case receive isn't complete.
+ * Do it BEFORE we call process_recv, since
+ * data_ready can set it any time after we release
+ * kss_lock. */
+ conn->ksnc_rx_ready = 0;
spin_unlock_bh(&sched->kss_lock);
- rc = ksocknal_process_receive(conn);
+ rc = ksocknal_process_receive(conn, rx_scratch_pgs,
+ scratch_iov);
spin_lock_bh(&sched->kss_lock);
- /* I'm the only one that can clear this flag */
- LASSERT(conn->ksnc_rx_scheduled);
-
- /* Did process_receive get everything it wanted? */
- if (rc == 0)
- conn->ksnc_rx_ready = 1;
+ /* I'm the only one that can clear this flag */
+ LASSERT(conn->ksnc_rx_scheduled);
- if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
- /* Conn blocked waiting for ksocknal_recv()
- * I change its state (under lock) to signal
- * it can be rescheduled */
- conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
- } else if (conn->ksnc_rx_ready) {
- /* reschedule for rx */
+ /* Did process_receive get everything it wanted? */
+ if (rc == 0)
+ conn->ksnc_rx_ready = 1;
+
+ if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
+ /* Conn blocked waiting for ksocknal_recv()
+ * I change its state (under lock) to signal
+ * it can be rescheduled */
+ conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
+ } else if (conn->ksnc_rx_ready) {
+ /* reschedule for rx */
list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
- } else {
- conn->ksnc_rx_scheduled = 0;
- /* drop my ref */
- ksocknal_conn_decref(conn);
- }
+ &sched->kss_rx_conns);
+ } else {
+ conn->ksnc_rx_scheduled = 0;
+ /* drop my ref */
+ ksocknal_conn_decref(conn);
+ }
- did_something = 1;
- }
+ did_something = true;
+ }
if (!list_empty(&sched->kss_tx_conns)) {
- struct list_head zlist = LIST_HEAD_INIT(zlist);
+ LIST_HEAD(zlist);
- if (!list_empty(&sched->kss_zombie_noop_txs)) {
- list_add(&zlist,
- &sched->kss_zombie_noop_txs);
- list_del_init(&sched->kss_zombie_noop_txs);
- }
+ list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
- conn = list_entry(sched->kss_tx_conns.next,
- struct ksock_conn, ksnc_tx_list);
+ conn = list_first_entry(&sched->kss_tx_conns,
+ struct ksock_conn,
+ ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
- LASSERT(conn->ksnc_tx_scheduled);
- LASSERT(conn->ksnc_tx_ready);
+ LASSERT(conn->ksnc_tx_scheduled);
+ LASSERT(conn->ksnc_tx_ready);
LASSERT(!list_empty(&conn->ksnc_tx_queue));
- tx = list_entry(conn->ksnc_tx_queue.next,
- struct ksock_tx, tx_list);
+ tx = list_first_entry(&conn->ksnc_tx_queue,
+ struct ksock_tx, tx_list);
- if (conn->ksnc_tx_carrier == tx)
- ksocknal_next_tx_carrier(conn);
+ if (conn->ksnc_tx_carrier == tx)
+ ksocknal_next_tx_carrier(conn);
- /* dequeue now so empty list => more to send */
+ /* dequeue now so empty list => more to send */
list_del(&tx->tx_list);
- /* Clear tx_ready in case send isn't complete. Do
- * it BEFORE we call process_transmit, since
- * write_space can set it any time after we release
- * kss_lock. */
- conn->ksnc_tx_ready = 0;
+ /* Clear tx_ready in case send isn't complete. Do
+ * it BEFORE we call process_transmit, since
+ * write_space can set it any time after we release
+ * kss_lock. */
+ conn->ksnc_tx_ready = 0;
spin_unlock_bh(&sched->kss_lock);
if (!list_empty(&zlist)) {
/* free zombie noop txs, it's fast because
- * noop txs are just put in freelist */
- ksocknal_txlist_done(NULL, &zlist, 0);
- }
+ * noop txs are just put in freelist */
+ ksocknal_txlist_done(NULL, &zlist, 0);
+ }
- rc = ksocknal_process_transmit(conn, tx);
+ rc = ksocknal_process_transmit(conn, tx, scratch_iov);
- if (rc == -ENOMEM || rc == -EAGAIN) {
- /* Incomplete send: replace tx on HEAD of tx_queue */
+ if (rc == -ENOMEM || rc == -EAGAIN) {
+ /* Incomplete send: replace tx on HEAD of tx_queue */
spin_lock_bh(&sched->kss_lock);
list_add(&tx->tx_list,
- &conn->ksnc_tx_queue);
+ &conn->ksnc_tx_queue);
} else {
/* Complete send; tx -ref */
ksocknal_tx_decref(tx);
spin_lock_bh(&sched->kss_lock);
- /* assume space for more */
- conn->ksnc_tx_ready = 1;
- }
+ /* assume space for more */
+ conn->ksnc_tx_ready = 1;
+ }
- if (rc == -ENOMEM) {
- /* Do nothing; after a short timeout, this
- * conn will be reposted on kss_tx_conns. */
- } else if (conn->ksnc_tx_ready &&
+ if (rc == -ENOMEM) {
+ /* Do nothing; after a short timeout, this
+ * conn will be reposted on kss_tx_conns. */
+ } else if (conn->ksnc_tx_ready &&
!list_empty(&conn->ksnc_tx_queue)) {
- /* reschedule for tx */
+ /* reschedule for tx */
list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- } else {
- conn->ksnc_tx_scheduled = 0;
- /* drop my ref */
- ksocknal_conn_decref(conn);
- }
+ &sched->kss_tx_conns);
+ } else {
+ conn->ksnc_tx_scheduled = 0;
+ /* drop my ref */
+ ksocknal_conn_decref(conn);
+ }
- did_something = 1;
- }
- if (!did_something || /* nothing to do */
- ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
+ did_something = true;
+ }
+ if (!did_something || /* nothing to do */
+ need_resched()) { /* hogging CPU? */
spin_unlock_bh(&sched->kss_lock);
- nloops = 0;
-
- if (!did_something) { /* wait for something to do */
+ if (!did_something) { /* wait for something to do */
rc = wait_event_interruptible_exclusive(
sched->kss_waitq,
!ksocknal_sched_cansleep(sched));
}
spin_unlock_bh(&sched->kss_lock);
+ CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
+ CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
ksocknal_thread_fini();
return 0;
}
EXIT;
}
-static struct ksock_proto *
-ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
+static const struct ksock_proto *
+ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
{
__u32 version = 0;
if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
struct lnet_magicversion *hmv;
- CLASSERT(sizeof(struct lnet_magicversion) ==
+ BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
offsetof(struct ksock_hello_msg, kshm_src_nid));
hmv = (struct lnet_magicversion *)hello;
int timeout;
int proto_match;
int rc;
- struct ksock_proto *proto;
+ const struct ksock_proto *proto;
struct lnet_process_id recv_id;
/* socket type set on active connections - not set on passive */
LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
- timeout = active ? *ksocknal_tunables.ksnd_timeout :
+ timeout = active ? ksocknal_timeout() :
lnet_acceptor_timeout();
rc = lnet_sock_read(sock, &hello->kshm_magic,
sizeof(hello->kshm_magic), timeout);
- if (rc != 0) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT (rc < 0);
- return rc;
- }
+ if (rc != 0) {
+ CERROR("Error %d reading HELLO from %pIS\n",
+ rc, &conn->ksnc_peeraddr);
+ LASSERT(rc < 0);
+ return rc;
+ }
- if (hello->kshm_magic != LNET_PROTO_MAGIC &&
- hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
- hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
- /* Unexpected magic! */
- CERROR ("Bad magic(1) %#08x (%#08x expected) from "
- "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
- LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr);
- return -EPROTO;
- }
+ if (hello->kshm_magic != LNET_PROTO_MAGIC &&
+ hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
+ hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
+ /* Unexpected magic! */
+ CERROR("Bad magic(1) %#08x (%#08x expected) from %pIS\n",
+ __cpu_to_le32 (hello->kshm_magic),
+ LNET_PROTO_TCP_MAGIC, &conn->ksnc_peeraddr);
+ return -EPROTO;
+ }
rc = lnet_sock_read(sock, &hello->kshm_version,
sizeof(hello->kshm_version), timeout);
if (rc != 0) {
- CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ CERROR("Error %d reading HELLO from %pIS\n",
+ rc, &conn->ksnc_peeraddr);
LASSERT(rc < 0);
return rc;
}
ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
}
- CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
- conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr);
+ CERROR("Unknown protocol version (%d.x expected) from %pIS\n",
+ conn->ksnc_proto->pro_version, &conn->ksnc_peeraddr);
return -EPROTO;
}
/* receive the rest of hello message anyway */
rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
if (rc != 0) {
- CERROR("Error %d reading or checking hello from from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ CERROR("Error %d reading or checking hello from from %pIS\n",
+ rc, &conn->ksnc_peeraddr);
LASSERT (rc < 0);
return rc;
}
*incarnation = hello->kshm_src_incarnation;
- if (hello->kshm_src_nid == LNET_NID_ANY) {
- CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
- "from %pI4h\n", &conn->ksnc_ipaddr);
- return -EPROTO;
- }
+ if (hello->kshm_src_nid == LNET_NID_ANY) {
+ CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pIS\n",
+ &conn->ksnc_peeraddr);
+ return -EPROTO;
+ }
- if (!active &&
- conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
- /* Userspace NAL assigns peer_ni process ID from socket */
- recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
- recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
- } else {
- recv_id.nid = hello->kshm_src_nid;
- recv_id.pid = hello->kshm_src_pid;
- }
+ if (!active &&
+ rpc_get_port((struct sockaddr *)&conn->ksnc_peeraddr) >
+ LNET_ACCEPTOR_MAX_RESERVED_PORT) {
+ /* Userspace NAL assigns peer_ni process ID from socket */
+ recv_id.pid = rpc_get_port((struct sockaddr *)
+ &conn->ksnc_peeraddr) |
+ LNET_PID_USERFLAG;
+ LASSERT(conn->ksnc_peeraddr.ss_family == AF_INET);
+ recv_id.nid = LNET_MKNID(
+ LNET_NIDNET(ni->ni_nid),
+ ntohl(((struct sockaddr_in *)
+ &conn->ksnc_peeraddr)->sin_addr.s_addr));
+ } else {
+ recv_id.nid = hello->kshm_src_nid;
+ recv_id.pid = hello->kshm_src_pid;
+ }
if (!active) {
*peerid = recv_id;
/* peer_ni determines type */
conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
if (conn->ksnc_type == SOCKLND_CONN_NONE) {
- CERROR("Unexpected type %d from %s ip %pI4h\n",
+ CERROR("Unexpected type %d from %s ip %pIS\n",
hello->kshm_ctype, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr);
+ &conn->ksnc_peeraddr);
return -EPROTO;
}
return 0;
}
- if (peerid->pid != recv_id.pid ||
- peerid->nid != recv_id.nid) {
- LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
- " %pI4h, but they claimed they were "
- "%s; please check your Lustre "
- "configuration.\n",
- libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr,
- libcfs_id2str(recv_id));
- return -EPROTO;
- }
+ if (peerid->pid != recv_id.pid ||
+ peerid->nid != recv_id.nid) {
+ LCONSOLE_ERROR_MSG(0x130,
+ "Connected successfully to %s on host %pIS, but they claimed they were %s; please check your Lustre configuration.\n",
+ libcfs_id2str(*peerid),
+ &conn->ksnc_peeraddr,
+ libcfs_id2str(recv_id));
+ return -EPROTO;
+ }
if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
/* Possible protocol mismatch or I lost the connection race */
}
if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
- CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
+ CERROR("Mismatched types: me %d, %s ip %pIS %d\n",
conn->ksnc_type, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr,
+ &conn->ksnc_peeraddr,
hello->kshm_ctype);
return -EPROTO;
}
return 0;
}
-static int
-ksocknal_connect(struct ksock_route *route)
+static bool
+ksocknal_connect(struct ksock_conn_cb *conn_cb)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
- struct ksock_peer_ni *peer_ni = route->ksnr_peer;
- int type;
- int wanted;
- struct socket *sock;
+ LIST_HEAD(zombies);
+ struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer;
+ int type;
+ int wanted;
+ struct socket *sock;
time64_t deadline;
- int retry_later = 0;
- int rc = 0;
+ bool retry_later = false;
+ int rc = 0;
- deadline = ktime_get_seconds() + *ksocknal_tunables.ksnd_timeout;
+ deadline = ktime_get_seconds() + ksocknal_timeout();
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- LASSERT (route->ksnr_scheduled);
- LASSERT (!route->ksnr_connecting);
+ LASSERT(conn_cb->ksnr_scheduled);
+ LASSERT(!conn_cb->ksnr_connecting);
- route->ksnr_connecting = 1;
+ conn_cb->ksnr_connecting = 1;
- for (;;) {
- wanted = ksocknal_route_mask() & ~route->ksnr_connected;
+ for (;;) {
+ wanted = ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected;
- /* stop connecting if peer_ni/route got closed under me, or
- * route got connected while queued */
- if (peer_ni->ksnp_closing || route->ksnr_deleted ||
- wanted == 0) {
- retry_later = 0;
- break;
- }
+ /* stop connecting if peer_ni/cb got closed under me, or
+ * conn cb got connected while queued
+ */
+ if (peer_ni->ksnp_closing || conn_cb->ksnr_deleted ||
+ wanted == 0) {
+ retry_later = false;
+ break;
+ }
- /* reschedule if peer_ni is connecting to me */
- if (peer_ni->ksnp_accepting > 0) {
- CDEBUG(D_NET,
- "peer_ni %s(%d) already connecting to me, retry later.\n",
- libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
- retry_later = 1;
- }
+ /* reschedule if peer_ni is connecting to me */
+ if (peer_ni->ksnp_accepting > 0) {
+ CDEBUG(D_NET,
+ "peer_ni %s(%d) already connecting to me, retry later.\n",
+ libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
+ retry_later = true;
+ }
- if (retry_later) /* needs reschedule */
- break;
+ if (retry_later) /* needs reschedule */
+ break;
- if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
- type = SOCKLND_CONN_ANY;
- } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
- type = SOCKLND_CONN_CONTROL;
- } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
- type = SOCKLND_CONN_BULK_IN;
- } else {
- LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
- type = SOCKLND_CONN_BULK_OUT;
- }
+ if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) {
+ type = SOCKLND_CONN_ANY;
+ } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
+ type = SOCKLND_CONN_CONTROL;
+ } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0 &&
+ conn_cb->ksnr_blki_conn_count <= conn_cb->ksnr_blko_conn_count) {
+ type = SOCKLND_CONN_BULK_IN;
+ } else {
+ LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
+ type = SOCKLND_CONN_BULK_OUT;
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (ktime_get_seconds() >= deadline) {
- rc = -ETIMEDOUT;
- lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
+ rc = -ETIMEDOUT;
+ lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
+ (struct sockaddr *)
+ &conn_cb->ksnr_addr);
+ goto failed;
+ }
- rc = lnet_connect(&sock, peer_ni->ksnp_id.nid,
- route->ksnr_myipaddr,
- route->ksnr_ipaddr, route->ksnr_port);
- if (rc != 0)
- goto failed;
+ sock = lnet_connect(peer_ni->ksnp_id.nid,
+ conn_cb->ksnr_myiface,
+ (struct sockaddr *)&conn_cb->ksnr_addr,
+ peer_ni->ksnp_ni->ni_net_ns);
+ if (IS_ERR(sock)) {
+ rc = PTR_ERR(sock);
+ goto failed;
+ }
- rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
- if (rc < 0) {
- lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
- route->ksnr_ipaddr,
- route->ksnr_port);
- goto failed;
- }
+ rc = ksocknal_create_conn(peer_ni->ksnp_ni, conn_cb, sock,
+ type);
+ if (rc < 0) {
+ lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
+ (struct sockaddr *)
+ &conn_cb->ksnr_addr);
+ goto failed;
+ }
- /* A +ve RC means I have to retry because I lost the connection
- * race or I have to renegotiate protocol version */
- retry_later = (rc != 0);
- if (retry_later)
- CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
- libcfs_nid2str(peer_ni->ksnp_id.nid));
+ /* A +ve RC means I have to retry because I lost the connection
+ * race or I have to renegotiate protocol version */
+ retry_later = (rc != 0);
+ if (retry_later)
+ CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
+ libcfs_nid2str(peer_ni->ksnp_id.nid));
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- }
+ }
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
-
- if (retry_later) {
- /* re-queue for attention; this frees me up to handle
- * the peer_ni's incoming connection request */
-
- if (rc == EALREADY ||
- (rc == 0 && peer_ni->ksnp_accepting > 0)) {
- /* We want to introduce a delay before next
- * attempt to connect if we lost conn race,
- * but the race is resolved quickly usually,
- * so min_reconnectms should be good heuristic */
- route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000;
- route->ksnr_timeout = ktime_get_seconds() +
- route->ksnr_retry_interval;
- }
+ conn_cb->ksnr_scheduled = 0;
+ conn_cb->ksnr_connecting = 0;
+
+ if (retry_later) {
+ /* re-queue for attention; this frees me up to handle
+ * the peer_ni's incoming connection request
+ */
+
+ if (rc == EALREADY ||
+ (rc == 0 && peer_ni->ksnp_accepting > 0)) {
+ /* We want to introduce a delay before next
+ * attempt to connect if we lost conn race, but
+ * the race is resolved quickly usually, so
+ * min_reconnectms should be good heuristic
+ */
+ conn_cb->ksnr_retry_interval =
+ *ksocknal_tunables.ksnd_min_reconnectms / 1000;
+ conn_cb->ksnr_timeout = ktime_get_seconds() +
+ conn_cb->ksnr_retry_interval;
+ }
- ksocknal_launch_connection_locked(route);
- }
+ ksocknal_launch_connection_locked(conn_cb);
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- return retry_later;
+ return retry_later;
failed:
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- route->ksnr_scheduled = 0;
- route->ksnr_connecting = 0;
+ conn_cb->ksnr_scheduled = 0;
+ conn_cb->ksnr_connecting = 0;
/* This is a retry rather than a new connection */
- route->ksnr_retry_interval *= 2;
- route->ksnr_retry_interval =
- max_t(time64_t, route->ksnr_retry_interval,
+ conn_cb->ksnr_retry_interval *= 2;
+ conn_cb->ksnr_retry_interval =
+ max_t(time64_t, conn_cb->ksnr_retry_interval,
*ksocknal_tunables.ksnd_min_reconnectms / 1000);
- route->ksnr_retry_interval =
- min_t(time64_t, route->ksnr_retry_interval,
+ conn_cb->ksnr_retry_interval =
+ min_t(time64_t, conn_cb->ksnr_retry_interval,
*ksocknal_tunables.ksnd_max_reconnectms / 1000);
- LASSERT(route->ksnr_retry_interval);
- route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval;
+ LASSERT(conn_cb->ksnr_retry_interval);
+ conn_cb->ksnr_timeout = ktime_get_seconds() +
+ conn_cb->ksnr_retry_interval;
if (!list_empty(&peer_ni->ksnp_tx_queue) &&
- peer_ni->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
+ peer_ni->ksnp_accepting == 0 &&
+ !ksocknal_find_connecting_conn_cb_locked(peer_ni)) {
struct ksock_conn *conn;
- /* ksnp_tx_queue is queued on a conn on successful
- * connection for V1.x and V2.x */
- if (!list_empty(&peer_ni->ksnp_conns)) {
- conn = list_entry(peer_ni->ksnp_conns.next,
- struct ksock_conn, ksnc_list);
- LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
- }
-
- /* take all the blocked packets while I've got the lock and
- * complete below... */
+ /* ksnp_tx_queue is queued on a conn on successful
+ * connection for V1.x and V2.x
+ */
+ conn = list_first_entry_or_null(&peer_ni->ksnp_conns,
+ struct ksock_conn, ksnc_list);
+ if (conn)
+ LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
+
+ /* take all the blocked packets while I've got the lock and
+ * complete below...
+ */
list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
- }
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
}
-/* Go through connd_routes queue looking for a route that we can process
+/* Go through connd_cbs queue looking for a conn_cb that we can process
* right now, @timeout_p can be updated if we need to come back later */
-static struct ksock_route *
-ksocknal_connd_get_route_locked(signed long *timeout_p)
+static struct ksock_conn_cb *
+ksocknal_connd_get_conn_cb_locked(signed long *timeout_p)
{
time64_t now = ktime_get_seconds();
- struct ksock_route *route;
+ time64_t conn_timeout;
+ struct ksock_conn_cb *conn_cb;
/* connd_routes can contain both pending and ordinary routes */
- list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
- ksnr_connd_list) {
+ list_for_each_entry(conn_cb, &ksocknal_data.ksnd_connd_routes,
+ ksnr_connd_list) {
- if (route->ksnr_retry_interval == 0 ||
- now >= route->ksnr_timeout)
- return route;
+ conn_timeout = conn_cb->ksnr_timeout;
+
+ if (conn_cb->ksnr_retry_interval == 0 ||
+ now >= conn_timeout)
+ return conn_cb;
if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
- *timeout_p > cfs_time_seconds(route->ksnr_timeout - now))
- *timeout_p = cfs_time_seconds(route->ksnr_timeout - now);
+ *timeout_p > cfs_time_seconds(conn_timeout - now))
+ *timeout_p = cfs_time_seconds(conn_timeout - now);
}
return NULL;
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
struct ksock_connreq *cr;
wait_queue_entry_t wait;
- int nloops = 0;
int cons_retry = 0;
- cfs_block_allsigs();
-
- init_waitqueue_entry(&wait, current);
+ init_wait(&wait);
spin_lock_bh(connd_lock);
ksocknal_data.ksnd_connd_running++;
while (!ksocknal_data.ksnd_shuttingdown) {
- struct ksock_route *route = NULL;
+ struct ksock_conn_cb *conn_cb = NULL;
time64_t sec = ktime_get_real_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
- int dropped_lock = 0;
+ bool dropped_lock = false;
if (ksocknal_connd_check_stop(sec, &timeout)) {
/* wakeup another one to check stop */
break;
}
- if (ksocknal_connd_check_start(sec, &timeout)) {
- /* created new thread */
- dropped_lock = 1;
- }
-
- if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
- /* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
- struct ksock_connreq, ksncr_list);
+ if (ksocknal_connd_check_start(sec, &timeout)) {
+ /* created new thread */
+ dropped_lock = true;
+ }
+ cr = list_first_entry_or_null(&ksocknal_data.ksnd_connd_connreqs,
+ struct ksock_connreq, ksncr_list);
+ if (cr) {
+ /* Connection accepted by the listener */
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
- dropped_lock = 1;
+ dropped_lock = true;
ksocknal_create_conn(cr->ksncr_ni, NULL,
cr->ksncr_sock, SOCKLND_CONN_NONE);
LIBCFS_FREE(cr, sizeof(*cr));
spin_lock_bh(connd_lock);
- }
+ }
- /* Only handle an outgoing connection request if there
- * is a thread left to handle incoming connections and
- * create new connd */
- if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
- ksocknal_data.ksnd_connd_running) {
- route = ksocknal_connd_get_route_locked(&timeout);
- }
- if (route != NULL) {
- list_del(&route->ksnr_connd_list);
- ksocknal_data.ksnd_connd_connecting++;
+ /* Only handle an outgoing connection request if there
+ * is a thread left to handle incoming connections and
+ * create new connd
+ */
+ if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
+ ksocknal_data.ksnd_connd_running)
+ conn_cb = ksocknal_connd_get_conn_cb_locked(&timeout);
+
+ if (conn_cb) {
+ list_del(&conn_cb->ksnr_connd_list);
+ ksocknal_data.ksnd_connd_connecting++;
spin_unlock_bh(connd_lock);
- dropped_lock = 1;
-
- if (ksocknal_connect(route)) {
- /* consecutive retry */
- if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
- CWARN("massive consecutive "
- "re-connecting to %pI4h\n",
- &route->ksnr_ipaddr);
- cons_retry = 0;
- }
- } else {
- cons_retry = 0;
- }
+ dropped_lock = true;
+
+ if (ksocknal_connect(conn_cb)) {
+ /* consecutive retry */
+ if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
+ CWARN("massive consecutive re-connecting to %pIS\n",
+ &conn_cb->ksnr_addr);
+ cons_retry = 0;
+ }
+ } else {
+ cons_retry = 0;
+ }
- ksocknal_route_decref(route);
+ ksocknal_conn_cb_decref(conn_cb);
spin_lock_bh(connd_lock);
ksocknal_data.ksnd_connd_connecting--;
}
if (dropped_lock) {
- if (++nloops < SOCKNAL_RESCHED)
+ if (!need_resched())
continue;
spin_unlock_bh(connd_lock);
- nloops = 0;
cond_resched();
spin_lock_bh(connd_lock);
continue;
/* Nothing to do for 'timeout' */
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
+ &wait);
spin_unlock_bh(connd_lock);
- nloops = 0;
schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_lock_bh(connd_lock);
}
/* We're called with a shared lock on ksnd_global_lock */
struct ksock_conn *conn;
struct list_head *ctmp;
+ struct ksock_tx *tx;
list_for_each(ctmp, &peer_ni->ksnp_conns) {
int error;
if (error != 0) {
ksocknal_conn_addref(conn);
- switch (error) {
- case ECONNRESET:
- CNETERR("A connection with %s "
- "(%pI4h:%d) was reset; "
- "it may have rebooted.\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- case ETIMEDOUT:
- CNETERR("A connection with %s "
- "(%pI4h:%d) timed out; the "
- "network or node may be down.\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- default:
- CNETERR("An unexpected network error %d "
- "occurred with %s "
- "(%pI4h:%d\n", error,
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
- break;
- }
+ switch (error) {
+ case ECONNRESET:
+ CNETERR("A connection with %s (%pISp) was reset; it may have rebooted.\n",
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr);
+ break;
+ case ETIMEDOUT:
+ CNETERR("A connection with %s (%pISp) timed out; the network or node may be down.\n",
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr);
+ break;
+ default:
+ CNETERR("An unexpected network error %d occurred with %s (%pISp\n",
+ error,
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr);
+ break;
+ }
- return (conn);
- }
+ return conn;
+ }
- if (conn->ksnc_rx_started &&
+ if (conn->ksnc_rx_started &&
ktime_get_seconds() >= conn->ksnc_rx_deadline) {
- /* Timed out incomplete incoming message */
- ksocknal_conn_addref(conn);
- CNETERR("Timeout receiving from %s (%pI4h:%d), "
- "state %d wanted %d left %d\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port,
- conn->ksnc_rx_state,
- conn->ksnc_rx_nob_wanted,
- conn->ksnc_rx_nob_left);
- return (conn);
- }
+ /* Timed out incomplete incoming message */
+ ksocknal_conn_addref(conn);
+ CNETERR("Timeout receiving from %s (%pISp), state %d wanted %d left %d\n",
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr,
+ conn->ksnc_rx_state,
+ conn->ksnc_rx_nob_wanted,
+ conn->ksnc_rx_nob_left);
+ return conn;
+ }
if ((!list_empty(&conn->ksnc_tx_queue) ||
conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
ktime_get_seconds() >= conn->ksnc_tx_deadline) {
- /* Timed out messages queued for sending or
- * buffered in the socket's send buffer */
- ksocknal_conn_addref(conn);
- CNETERR("Timeout sending data to %s (%pI4h:%d) "
- "the network or that node may be down.\n",
- libcfs_id2str(peer_ni->ksnp_id),
- &conn->ksnc_ipaddr, conn->ksnc_port);
- return (conn);
- }
- }
+ /* Timed out messages queued for sending or
+ * buffered in the socket's send buffer
+ */
+ ksocknal_conn_addref(conn);
+ list_for_each_entry(tx, &conn->ksnc_tx_queue,
+ tx_list)
+ tx->tx_hstatus =
+ LNET_MSG_STATUS_LOCAL_TIMEOUT;
+ CNETERR("Timeout sending data to %s (%pISp) the network or that node may be down.\n",
+ libcfs_id2str(peer_ni->ksnp_id),
+ &conn->ksnc_peeraddr);
+ return conn;
+ }
+ }
- return (NULL);
+ return (NULL);
}
static inline void
ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
{
struct ksock_tx *tx;
- struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
+ LIST_HEAD(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- while (!list_empty(&peer_ni->ksnp_tx_queue)) {
- tx = list_entry(peer_ni->ksnp_tx_queue.next,
- struct ksock_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
+ struct ksock_tx,
+ tx_list)) != NULL) {
if (ktime_get_seconds() < tx->tx_deadline)
break;
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &stale_txs);
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
+
+ list_move_tail(&tx->tx_list, &stale_txs);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
static void
ksocknal_check_peer_timeouts(int idx)
{
- struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+ struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
struct ksock_peer_ni *peer_ni;
struct ksock_conn *conn;
struct ksock_tx *tx;
again:
- /* NB. We expect to have a look at all the peers and not find any
- * connections to time out, so we just use a shared lock while we
- * take a look... */
+ /* NB. We expect to have a look at all the peers and not find any
+ * connections to time out, so we just use a shared lock while we
+ * take a look...
+ */
read_lock(&ksocknal_data.ksnd_global_lock);
- list_for_each_entry(peer_ni, peers, ksnp_list) {
+ hlist_for_each_entry(peer_ni, peers, ksnp_list) {
struct ksock_tx *tx_stale;
time64_t deadline = 0;
int resid = 0;
int n = 0;
- if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
+ if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
read_unlock(&ksocknal_data.ksnd_global_lock);
- goto again;
- }
+ goto again;
+ }
- conn = ksocknal_find_timed_out_conn (peer_ni);
+ conn = ksocknal_find_timed_out_conn(peer_ni);
- if (conn != NULL) {
+ if (conn != NULL) {
read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
-
- /* NB we won't find this one again, but we can't
- * just proceed with the next peer_ni, since we dropped
- * ksnd_global_lock and it might be dead already! */
- ksocknal_conn_decref(conn);
- goto again;
- }
+ ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
- /* we can't process stale txs right here because we're
- * holding only shared lock */
- if (!list_empty(&peer_ni->ksnp_tx_queue)) {
- struct ksock_tx *tx;
+ /* NB we won't find this one again, but we can't
+ * just proceed with the next peer_ni, since we dropped
+ * ksnd_global_lock and it might be dead already!
+ */
+ ksocknal_conn_decref(conn);
+ goto again;
+ }
- tx = list_entry(peer_ni->ksnp_tx_queue.next,
- struct ksock_tx, tx_list);
- if (ktime_get_seconds() >= tx->tx_deadline) {
- ksocknal_peer_addref(peer_ni);
- read_unlock(&ksocknal_data.ksnd_global_lock);
+ /* we can't process stale txs right here because we're
+ * holding only shared lock
+ */
+ tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
+ struct ksock_tx, tx_list);
+ if (tx && ktime_get_seconds() >= tx->tx_deadline) {
+ ksocknal_peer_addref(peer_ni);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_flush_stale_txs(peer_ni);
+ ksocknal_flush_stale_txs(peer_ni);
- ksocknal_peer_decref(peer_ni);
- goto again;
- }
- }
+ ksocknal_peer_decref(peer_ni);
+ goto again;
+ }
if (list_empty(&peer_ni->ksnp_zc_req_list))
- continue;
+ continue;
tx_stale = NULL;
spin_lock(&peer_ni->ksnp_lock);
wait_queue_entry_t wait;
struct ksock_conn *conn;
struct ksock_sched *sched;
- struct list_head enomem_conns;
+ LIST_HEAD(enomem_conns);
int nenomem_conns;
time64_t timeout;
int i;
int peer_index = 0;
time64_t deadline = ktime_get_seconds();
- cfs_block_allsigs ();
-
- INIT_LIST_HEAD(&enomem_conns);
- init_waitqueue_entry(&wait, current);
+ init_wait(&wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
- if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
- struct ksock_conn, ksnc_list);
+ conn = list_first_entry_or_null(&ksocknal_data.ksnd_deathrow_conns,
+ struct ksock_conn, ksnc_list);
+ if (conn) {
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
- if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
- struct ksock_conn, ksnc_list);
+ conn = list_first_entry_or_null(&ksocknal_data.ksnd_zombie_conns,
+ struct ksock_conn, ksnc_list);
+ if (conn) {
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
ksocknal_destroy_conn(conn);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- continue;
- }
+ continue;
+ }
- if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
- list_add(&enomem_conns,
- &ksocknal_data.ksnd_enomem_conns);
- list_del_init(&ksocknal_data.ksnd_enomem_conns);
- }
+ list_splice_init(&ksocknal_data.ksnd_enomem_conns,
+ &enomem_conns);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
- while (!list_empty(&enomem_conns)) {
- conn = list_entry(enomem_conns.next,
- struct ksock_conn, ksnc_tx_list);
+ while ((conn = list_first_entry_or_null(&enomem_conns,
+ struct ksock_conn,
+ ksnc_tx_list)) != NULL) {
list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
nenomem_conns++;
}
- /* careful with the jiffy wrap... */
+ /* careful with the jiffy wrap... */
while ((timeout = deadline - ktime_get_seconds()) <= 0) {
- const int n = 4;
- const int p = 1;
- int chunk = ksocknal_data.ksnd_peer_hash_size;
-
- /* Time to check for timeouts on a few more peers: I do
- * checks every 'p' seconds on a proportion of the peer_ni
- * table and I need to check every connection 'n' times
- * within a timeout interval, to ensure I detect a
- * timeout on any connection within (n+1)/n times the
- * timeout interval. */
-
- if (*ksocknal_tunables.ksnd_timeout > n * p)
- chunk = (chunk * n * p) /
- *ksocknal_tunables.ksnd_timeout;
- if (chunk == 0)
- chunk = 1;
-
- for (i = 0; i < chunk; i++) {
- ksocknal_check_peer_timeouts (peer_index);
- peer_index = (peer_index + 1) %
- ksocknal_data.ksnd_peer_hash_size;
- }
+ const int n = 4;
+ const int p = 1;
+ int chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
+ unsigned int lnd_timeout;
+
+ /* Time to check for timeouts on a few more peers: I
+ * do checks every 'p' seconds on a proportion of the
+ * peer_ni table and I need to check every connection
+ * 'n' times within a timeout interval, to ensure I
+ * detect a timeout on any connection within (n+1)/n
+ * times the timeout interval.
+ */
+
+ lnd_timeout = ksocknal_timeout();
+ if (lnd_timeout > n * p)
+ chunk = (chunk * n * p) / lnd_timeout;
+ if (chunk == 0)
+ chunk = 1;
+
+ for (i = 0; i < chunk; i++) {
+ ksocknal_check_peer_timeouts(peer_index);
+ peer_index = (peer_index + 1) %
+ HASH_SIZE(ksocknal_data.ksnd_peers);
+ }
deadline += p;
- }
+ }
if (nenomem_conns != 0) {
/* Reduce my timeout if I rescheduled ENOMEM conns.