/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*
* Author: Zach Brown <zab@zabbo.net>
* Author: Peter J. Braam <braam@clusterfs.com>
#include "socklnd.h"
-ksock_tx_t *
+struct ksock_tx *
ksocknal_alloc_tx(int type, int size)
{
- ksock_tx_t *tx = NULL;
+ struct ksock_tx *tx = NULL;
if (type == KSOCK_MSG_NOOP) {
LASSERT(size == KSOCK_NOOP_TX_SIZE);
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
- next, ksock_tx_t, tx_list);
+ tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
+ struct ksock_tx, tx_list);
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
tx->tx_zc_aborted = 0;
tx->tx_zc_capable = 0;
tx->tx_zc_checked = 0;
+ tx->tx_hstatus = LNET_MSG_STATUS_OK;
tx->tx_desc_size = size;
atomic_inc(&ksocknal_data.ksnd_nactive_txs);
return tx;
}
-ksock_tx_t *
+struct ksock_tx *
ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
{
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
if (tx == NULL) {
void
-ksocknal_free_tx (ksock_tx_t *tx)
+ksocknal_free_tx(struct ksock_tx *tx)
{
atomic_dec(&ksocknal_data.ksnd_nactive_txs);
}
static int
-ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct kvec *iov = tx->tx_iov;
int nob;
}
static int
-ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
{
- lnet_kiov_t *kiov = tx->tx_kiov;
- int nob;
- int rc;
+ lnet_kiov_t *kiov = tx->tx_kiov;
+ int nob;
+ int rc;
LASSERT (tx->tx_niov == 0);
LASSERT (tx->tx_nkiov > 0);
}
static int
-ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
{
int rc;
int bufnob;
/* allocated send buffer bytes < computed; infer
* something got ACKed */
conn->ksnc_tx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ lnet_get_lnd_timeout();
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = bufnob;
smp_mb();
}
static int
-ksocknal_recv_iov (ksock_conn_t *conn)
+ksocknal_recv_iov(struct ksock_conn *conn)
{
struct kvec *iov = conn->ksnc_rx_iov;
int nob;
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_rx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ lnet_get_lnd_timeout();
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
}
static int
-ksocknal_recv_kiov (ksock_conn_t *conn)
+ksocknal_recv_kiov(struct ksock_conn *conn)
{
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
- int nob;
- int rc;
+ lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ int nob;
+ int rc;
LASSERT (conn->ksnc_rx_nkiov > 0);
/* Never touch conn->ksnc_rx_kiov or change connection
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_rx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ lnet_get_lnd_timeout();
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
}
static int
-ksocknal_receive (ksock_conn_t *conn)
+ksocknal_receive(struct ksock_conn *conn)
{
/* Return 1 on success, 0 on EOF, < 0 on error.
* Caller checks ksnc_rx_nob_wanted to determine
}
void
-ksocknal_tx_done(struct lnet_ni *ni, ksock_tx_t *tx, int rc)
+ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
{
struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
+ enum lnet_msg_hstatus hstatus = tx->tx_hstatus;
ENTRY;
LASSERT(ni != NULL || tx->tx_conn != NULL);
- if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted))
+ if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) {
rc = -EIO;
+ if (hstatus == LNET_MSG_STATUS_OK)
+ hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ }
if (tx->tx_conn != NULL)
ksocknal_conn_decref(tx->tx_conn);
ksocknal_free_tx(tx);
- if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
+ if (lnetmsg != NULL) { /* KSOCK_MSG_NOOP go without lnetmsg */
+ if (rc)
+ CERROR("tx failure rc = %d, hstatus = %d\n", rc,
+ hstatus);
+ lnetmsg->msg_health_status = hstatus;
lnet_finalize(lnetmsg, rc);
+ }
EXIT;
}
void
ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
{
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, ksock_tx_t, tx_list);
+ tx = list_entry(txlist->next, struct ksock_tx, tx_list);
if (error && tx->tx_lnetmsg != NULL) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
list_del(&tx->tx_list);
+ if (tx->tx_hstatus == LNET_MSG_STATUS_OK) {
+ if (error == -ETIMEDOUT)
+ tx->tx_hstatus =
+ LNET_MSG_STATUS_LOCAL_TIMEOUT;
+ else if (error == -ENETDOWN ||
+ error == -EHOSTUNREACH ||
+ error == -ENETUNREACH)
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_DROPPED;
+ /*
+ * for all other errors we don't want to
+ * retransmit
+ */
+ else if (error)
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ }
+
LASSERT(atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done(ni, tx, error);
}
}
static void
-ksocknal_check_zc_req(ksock_tx_t *tx)
+ksocknal_check_zc_req(struct ksock_tx *tx)
{
- ksock_conn_t *conn = tx->tx_conn;
- ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
+ struct ksock_conn *conn = tx->tx_conn;
+ struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
/* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
* to ksnp_zc_req_list if some fragment of this message should be sent
/* ZC_REQ is going to be pinned to the peer_ni */
tx->tx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ lnet_get_lnd_timeout();
LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
}
static void
-ksocknal_uncheck_zc_req(ksock_tx_t *tx)
+ksocknal_uncheck_zc_req(struct ksock_tx *tx)
{
- ksock_peer_ni_t *peer_ni = tx->tx_conn->ksnc_peer;
+ struct ksock_peer_ni *peer_ni = tx->tx_conn->ksnc_peer;
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
}
static int
-ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
{
- int rc;
+ int rc;
+ bool error_sim = false;
+
+ if (lnet_send_error_simulation(tx->tx_lnetmsg, &tx->tx_hstatus)) {
+ error_sim = true;
+ rc = -EINVAL;
+ goto simulate_error;
+ }
if (tx->tx_zc_capable && !tx->tx_zc_checked)
ksocknal_check_zc_req(tx);
wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ /*
+ * set the health status of the message which determines
+ * whether we should retry the transmit
+ */
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
return (rc);
}
- /* Actual error */
- LASSERT (rc < 0);
+simulate_error:
- if (!conn->ksnc_closing) {
- switch (rc) {
- case -ECONNRESET:
+ /* Actual error */
+ LASSERT(rc < 0);
+
+ if (!error_sim) {
+ /*
+ * set the health status of the message which determines
+ * whether we should retry the transmit
+ */
+ if (rc == -ETIMEDOUT)
+ tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
+ else
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ }
+
+ if (!conn->ksnc_closing) {
+ switch (rc) {
+ case -ECONNRESET:
LCONSOLE_WARN("Host %pI4h reset our connection "
- "while we were sending data; it may have "
- "rebooted.\n",
+ "while we were sending data; it may have "
+ "rebooted.\n",
&conn->ksnc_ipaddr);
- break;
- default:
- LCONSOLE_WARN("There was an unexpected network error "
+ break;
+ default:
+ LCONSOLE_WARN("There was an unexpected network error "
"while writing to %pI4h: %d.\n",
&conn->ksnc_ipaddr, rc);
- break;
- }
+ break;
+ }
CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr, conn->ksnc_port);
- }
+ }
- if (tx->tx_zc_checked)
- ksocknal_uncheck_zc_req(tx);
+ if (tx->tx_zc_checked)
+ ksocknal_uncheck_zc_req(tx);
- /* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings (conn,
- (conn->ksnc_closing) ? 0 : rc);
+ /* it's not an error if conn is being closed */
+ ksocknal_close_conn_and_siblings(conn,
+ (conn->ksnc_closing) ? 0 : rc);
- return (rc);
+ return rc;
}
static void
-ksocknal_launch_connection_locked (ksock_route_t *route)
+ksocknal_launch_connection_locked(struct ksock_route *route)
{
/* called holding write lock on ksnd_global_lock */
}
void
-ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni)
+ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
{
- ksock_route_t *route;
+ struct ksock_route *route;
/* called holding write lock on ksnd_global_lock */
for (;;) {
}
}
-ksock_conn_t *
-ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk)
+struct ksock_conn *
+ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
{
struct list_head *tmp;
- ksock_conn_t *conn;
- ksock_conn_t *typed = NULL;
- ksock_conn_t *fallback = NULL;
- int tnob = 0;
- int fnob = 0;
+ struct ksock_conn *conn;
+ struct ksock_conn *typed = NULL;
+ struct ksock_conn *fallback = NULL;
+ int tnob = 0;
+ int fnob = 0;
list_for_each(tmp, &peer_ni->ksnp_conns) {
- ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
- int nob = atomic_read(&c->ksnc_tx_nob) +
- c->ksnc_sock->sk->sk_wmem_queued;
- int rc;
+ struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
+ ksnc_list);
+ int nob = atomic_read(&c->ksnc_tx_nob) +
+ c->ksnc_sock->sk->sk_wmem_queued;
+ int rc;
LASSERT (!c->ksnc_closing);
LASSERT (c->ksnc_proto != NULL &&
}
void
-ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
{
conn->ksnc_proto->pro_pack(tx);
}
void
-ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
{
- ksock_sched_t *sched = conn->ksnc_scheduler;
- struct ksock_msg *msg = &tx->tx_msg;
- ksock_tx_t *ztx = NULL;
- int bufnob = 0;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
+ struct ksock_msg *msg = &tx->tx_msg;
+ struct ksock_tx *ztx = NULL;
+ int bufnob = 0;
/* called holding global lock (read or irq-write) and caller may
* not have dropped this lock between finding conn and calling me,
if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ lnet_get_lnd_timeout();
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = 0;
}
-ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni)
+struct ksock_route *
+ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni)
{
time64_t now = ktime_get_seconds();
struct list_head *tmp;
- ksock_route_t *route;
+ struct ksock_route *route;
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
return (NULL);
}
-ksock_route_t *
-ksocknal_find_connecting_route_locked (ksock_peer_ni_t *peer_ni)
+struct ksock_route *
+ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni)
{
- struct list_head *tmp;
- ksock_route_t *route;
+ struct list_head *tmp;
+ struct ksock_route *route;
list_for_each(tmp, &peer_ni->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
}
int
-ksocknal_launch_packet(struct lnet_ni *ni, ksock_tx_t *tx,
+ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
struct lnet_process_id id)
{
- ksock_peer_ni_t *peer_ni;
- ksock_conn_t *conn;
- rwlock_t *g_lock;
- int retry;
- int rc;
+ struct ksock_peer_ni *peer_ni;
+ struct ksock_conn *conn;
+ rwlock_t *g_lock;
+ int retry;
+ int rc;
LASSERT (tx->tx_conn == NULL);
ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
/* the message is going to be pinned to the peer_ni */
tx->tx_deadline = ktime_get_seconds() +
- *ksocknal_tunables.ksnd_timeout;
+ lnet_get_lnd_timeout();
/* Queue the message until a connection is established */
list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
int desc_size;
int rc;
LASSERT (!in_interrupt ());
if (payload_iov != NULL)
- desc_size = offsetof(ksock_tx_t,
+ desc_size = offsetof(struct ksock_tx,
tx_frags.virt.iov[1 + payload_niov]);
else
- desc_size = offsetof(ksock_tx_t,
+ desc_size = offsetof(struct ksock_tx,
tx_frags.paged.kiov[payload_niov]);
if (lntmsg->msg_vmflush)
}
int
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
-
- int nob;
- unsigned int niov;
- int skipped;
+ int nob;
+ unsigned int niov;
+ int skipped;
LASSERT(conn->ksnc_proto != NULL);
}
static int
-ksocknal_process_receive (ksock_conn_t *conn)
+ksocknal_process_receive(struct ksock_conn *conn)
{
struct lnet_hdr *lhdr;
struct lnet_process_id *id;
lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen)
{
- ksock_conn_t *conn = (ksock_conn_t *)private;
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_conn *conn = private;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
LASSERT (mlen <= rlen);
LASSERT (niov <= LNET_MAX_IOV);
}
static inline int
-ksocknal_sched_cansleep(ksock_sched_t *sched)
+ksocknal_sched_cansleep(struct ksock_sched *sched)
{
int rc;
int ksocknal_scheduler(void *arg)
{
struct ksock_sched_info *info;
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
- int rc;
- int nloops = 0;
- long id = (long)arg;
+ struct ksock_sched *sched;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
+ int rc;
+ int nloops = 0;
+ long id = (long)arg;
info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
if (!list_empty(&sched->kss_rx_conns)) {
conn = list_entry(sched->kss_rx_conns.next,
- ksock_conn_t, ksnc_rx_list);
+ struct ksock_conn, ksnc_rx_list);
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
}
conn = list_entry(sched->kss_tx_conns.next,
- ksock_conn_t, ksnc_tx_list);
+ struct ksock_conn, ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(!list_empty(&conn->ksnc_tx_queue));
tx = list_entry(conn->ksnc_tx_queue.next,
- ksock_tx_t, tx_list);
+ struct ksock_tx, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
* Add connection to kss_rx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_read_callback (ksock_conn_t *conn)
+void ksocknal_read_callback(struct ksock_conn *conn)
{
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
ENTRY;
sched = conn->ksnc_scheduler;
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_write_callback(ksock_conn_t *conn)
+void ksocknal_write_callback(struct ksock_conn *conn)
{
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
ENTRY;
sched = conn->ksnc_scheduler;
EXIT;
}
-static ksock_proto_t *
+static struct ksock_proto *
ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
{
__u32 version = 0;
}
int
-ksocknal_send_hello(struct lnet_ni *ni, ksock_conn_t *conn,
+ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
- ksock_net_t *net = (ksock_net_t *)ni->ni_data;
+ struct ksock_net *net = (struct ksock_net *)ni->ni_data;
- LASSERT(hello->kshm_nips <= LNET_NUM_INTERFACES);
+ LASSERT(hello->kshm_nips <= LNET_INTERFACES_NUM);
/* rely on caller to hold a ref on socket so it wouldn't disappear */
LASSERT(conn->ksnc_proto != NULL);
}
int
-ksocknal_recv_hello(struct lnet_ni *ni, ksock_conn_t *conn,
+ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
struct ksock_hello_msg *hello,
struct lnet_process_id *peerid,
__u64 *incarnation)
int timeout;
int proto_match;
int rc;
- ksock_proto_t *proto;
- struct lnet_process_id recv_id;
+ struct ksock_proto *proto;
+ struct lnet_process_id recv_id;
/* socket type set on active connections - not set on passive */
LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
- timeout = active ? *ksocknal_tunables.ksnd_timeout :
+ timeout = active ? lnet_get_lnd_timeout() :
lnet_acceptor_timeout();
rc = lnet_sock_read(sock, &hello->kshm_magic,
}
static int
-ksocknal_connect (ksock_route_t *route)
+ksocknal_connect(struct ksock_route *route)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
- ksock_peer_ni_t *peer_ni = route->ksnr_peer;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct ksock_peer_ni *peer_ni = route->ksnr_peer;
int type;
int wanted;
struct socket *sock;
int retry_later = 0;
int rc = 0;
- deadline = ktime_get_seconds() + *ksocknal_tunables.ksnd_timeout;
+ deadline = ktime_get_seconds() + lnet_get_lnd_timeout();
write_lock_bh(&ksocknal_data.ksnd_global_lock);
if (!list_empty(&peer_ni->ksnp_tx_queue) &&
peer_ni->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
/* ksnp_tx_queue is queued on a conn on successful
* connection for V1.x and V2.x */
if (!list_empty(&peer_ni->ksnp_conns)) {
conn = list_entry(peer_ni->ksnp_conns.next,
- ksock_conn_t, ksnc_list);
+ struct ksock_conn, ksnc_list);
LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
}
/* Go through connd_routes queue looking for a route that we can process
* right now, @timeout_p can be updated if we need to come back later */
-static ksock_route_t *
+static struct ksock_route *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
time64_t now = ktime_get_seconds();
- ksock_route_t *route;
+ struct ksock_route *route;
/* connd_routes can contain both pending and ordinary routes */
list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
}
int
-ksocknal_connd (void *arg)
+ksocknal_connd(void *arg)
{
- spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
- ksock_connreq_t *cr;
- wait_queue_t wait;
- int nloops = 0;
- int cons_retry = 0;
+ spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
+ struct ksock_connreq *cr;
+ wait_queue_entry_t wait;
+ int nloops = 0;
+ int cons_retry = 0;
cfs_block_allsigs();
ksocknal_data.ksnd_connd_running++;
while (!ksocknal_data.ksnd_shuttingdown) {
- ksock_route_t *route = NULL;
+ struct ksock_route *route = NULL;
time64_t sec = ktime_get_real_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
int dropped_lock = 0;
if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
/* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
- next, ksock_connreq_t, ksncr_list);
+ cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
+ struct ksock_connreq, ksncr_list);
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
return 0;
}
-static ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_ni_t *peer_ni)
+static struct ksock_conn *
+ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
{
/* We're called with a shared lock on ksnd_global_lock */
- ksock_conn_t *conn;
- struct list_head *ctmp;
+ struct ksock_conn *conn;
+ struct list_head *ctmp;
+ struct ksock_tx *tx;
list_for_each(ctmp, &peer_ni->ksnp_conns) {
- int error;
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ int error;
+
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
/* Timed out messages queued for sending or
* buffered in the socket's send buffer */
ksocknal_conn_addref(conn);
+ list_for_each_entry(tx, &conn->ksnc_tx_queue,
+ tx_list)
+ tx->tx_hstatus =
+ LNET_MSG_STATUS_LOCAL_TIMEOUT;
CNETERR("Timeout sending data to %s (%pI4h:%d) "
"the network or that node may be down.\n",
libcfs_id2str(peer_ni->ksnp_id),
}
static inline void
-ksocknal_flush_stale_txs(ksock_peer_ni_t *peer_ni)
+ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
{
- ksock_tx_t *tx;
- struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
+ struct ksock_tx *tx;
+ struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
while (!list_empty(&peer_ni->ksnp_tx_queue)) {
tx = list_entry(peer_ni->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ struct ksock_tx, tx_list);
if (ktime_get_seconds() < tx->tx_deadline)
break;
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
+
list_del(&tx->tx_list);
list_add_tail(&tx->tx_list, &stale_txs);
}
}
static int
-ksocknal_send_keepalive_locked(ksock_peer_ni_t *peer_ni)
+ksocknal_send_keepalive_locked(struct ksock_peer_ni *peer_ni)
__must_hold(&ksocknal_data.ksnd_global_lock)
{
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_sched *sched;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
/* last_alive will be updated by create_conn */
if (list_empty(&peer_ni->ksnp_conns))
static void
-ksocknal_check_peer_timeouts (int idx)
+ksocknal_check_peer_timeouts(int idx)
{
- struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
- ksock_peer_ni_t *peer_ni;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+ struct ksock_peer_ni *peer_ni;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
again:
/* NB. We expect to have a look at all the peers and not find any
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each_entry(peer_ni, peers, ksnp_list) {
- ksock_tx_t *tx_stale;
+ struct ksock_tx *tx_stale;
time64_t deadline = 0;
int resid = 0;
int n = 0;
/* we can't process stale txs right here because we're
* holding only shared lock */
if (!list_empty(&peer_ni->ksnp_tx_queue)) {
- ksock_tx_t *tx =
- list_entry(peer_ni->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ struct ksock_tx *tx;
+ tx = list_entry(peer_ni->ksnp_tx_queue.next,
+ struct ksock_tx, tx_list);
if (ktime_get_seconds() >= tx->tx_deadline) {
ksocknal_peer_addref(peer_ni);
read_unlock(&ksocknal_data.ksnd_global_lock);
int ksocknal_reaper(void *arg)
{
- wait_queue_t wait;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
- struct list_head enomem_conns;
- int nenomem_conns;
+ wait_queue_entry_t wait;
+ struct ksock_conn *conn;
+ struct ksock_sched *sched;
+ struct list_head enomem_conns;
+ int nenomem_conns;
time64_t timeout;
- int i;
- int peer_index = 0;
+ int i;
+ int peer_index = 0;
time64_t deadline = ktime_get_seconds();
cfs_block_allsigs ();
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
-
if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry(ksocknal_data. \
- ksnd_deathrow_conns.next,
- ksock_conn_t, ksnc_list);
+ conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
+ struct ksock_conn, ksnc_list);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry(ksocknal_data.ksnd_zombie_conns.\
- next, ksock_conn_t, ksnc_list);
+ conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
+ struct ksock_conn, ksnc_list);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
nenomem_conns = 0;
while (!list_empty(&enomem_conns)) {
conn = list_entry(enomem_conns.next,
- ksock_conn_t, ksnc_tx_list);
+ struct ksock_conn, ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
const int n = 4;
const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size;
+ unsigned int lnd_timeout;
/* Time to check for timeouts on a few more peers: I do
* checks every 'p' seconds on a proportion of the peer_ni
* timeout on any connection within (n+1)/n times the
* timeout interval. */
- if (*ksocknal_tunables.ksnd_timeout > n * p)
- chunk = (chunk * n * p) /
- *ksocknal_tunables.ksnd_timeout;
- if (chunk == 0)
- chunk = 1;
+ lnd_timeout = lnet_get_lnd_timeout();
+ if (lnd_timeout > n * p)
+ chunk = (chunk * n * p) / lnd_timeout;
+ if (chunk == 0)
+ chunk = 1;
for (i = 0; i < chunk; i++) {
ksocknal_check_peer_timeouts (peer_index);
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
list_empty(&ksocknal_data.ksnd_zombie_conns))
- schedule_timeout(cfs_duration_sec(timeout));
+ schedule_timeout(cfs_time_seconds(timeout));
set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);