-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2011, 2014, Intel Corporation.
*
* Author: Zach Brown <zab@zabbo.net>
* Author: Peter J. Braam <braam@clusterfs.com>
* Author: Phil Schwan <phil@clusterfs.com>
* Author: Eric Barton <eric@bartonsoftware.com>
*
- * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
+ * This file is part of Lustre, https://wiki.hpdd.intel.com/
*
* Portals is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
ksock_tx_t *
ksocknal_alloc_tx(int type, int size)
{
- ksock_tx_t *tx = NULL;
+ ksock_tx_t *tx = NULL;
- if (type == KSOCK_MSG_NOOP) {
- LASSERT (size == KSOCK_NOOP_TX_SIZE);
+ if (type == KSOCK_MSG_NOOP) {
+ LASSERT(size == KSOCK_NOOP_TX_SIZE);
- /* searching for a noop tx in free list */
- cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
+ /* searching for a noop tx in free list */
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
- ksock_tx_t, tx_list);
- LASSERT(tx->tx_desc_size == size);
- list_del(&tx->tx_list);
- }
+ if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
+ next, ksock_tx_t, tx_list);
+ LASSERT(tx->tx_desc_size == size);
+ list_del(&tx->tx_list);
+ }
- cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
}
if (tx == NULL)
if (tx == NULL)
return NULL;
- cfs_atomic_set(&tx->tx_refcount, 1);
- tx->tx_zc_capable = 0;
- tx->tx_zc_checked = 0;
- tx->tx_desc_size = size;
+ atomic_set(&tx->tx_refcount, 1);
+ tx->tx_zc_aborted = 0;
+ tx->tx_zc_capable = 0;
+ tx->tx_zc_checked = 0;
+ tx->tx_desc_size = size;
- cfs_atomic_inc(&ksocknal_data.ksnd_nactive_txs);
+ atomic_inc(&ksocknal_data.ksnd_nactive_txs);
- return tx;
+ return tx;
}
ksock_tx_t *
tx->tx_niov = 1;
tx->tx_nonblk = nonblk;
- socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
+ tx->tx_msg.ksm_csum = 0;
+ tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
+ tx->tx_msg.ksm_zc_cookies[0] = 0;
tx->tx_msg.ksm_zc_cookies[1] = cookie;
return tx;
void
ksocknal_free_tx (ksock_tx_t *tx)
{
- cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+ atomic_dec(&ksocknal_data.ksnd_nactive_txs);
- if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
- /* it's a noop tx */
- cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
+ if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
+ /* it's a noop tx */
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
- list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
+ list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
- cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
- } else {
- LIBCFS_FREE(tx, tx->tx_desc_size);
- }
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ } else {
+ LIBCFS_FREE(tx, tx->tx_desc_size);
+ }
}
-int
+static int
ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
{
- struct iovec *iov = tx->tx_iov;
+ struct kvec *iov = tx->tx_iov;
int nob;
int rc;
LASSERT (tx->tx_niov > 0);
if (nob < (int) iov->iov_len) {
- iov->iov_base = (void *)((char *)iov->iov_base + nob);
+ iov->iov_base += nob;
iov->iov_len -= nob;
return (rc);
}
return (rc);
}
-int
+static int
ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
{
lnet_kiov_t *kiov = tx->tx_kiov;
return (rc);
}
-int
-ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+static int
+ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
{
- int rc;
- int bufnob;
+ int rc;
+ int bufnob;
- if (ksocknal_data.ksnd_stall_tx != 0) {
- cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
- }
+ if (ksocknal_data.ksnd_stall_tx != 0) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
+ }
- LASSERT (tx->tx_resid != 0);
+ LASSERT(tx->tx_resid != 0);
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
rc = ksocknal_send_kiov (conn, tx);
}
- bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
+ bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
if (rc > 0) /* sent something? */
conn->ksnc_tx_bufnob += rc; /* account it */
- if (bufnob < conn->ksnc_tx_bufnob) {
- /* allocated send buffer bytes < computed; infer
- * something got ACKed */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_tx_bufnob = bufnob;
- cfs_mb();
- }
+ if (bufnob < conn->ksnc_tx_bufnob) {
+ /* allocated send buffer bytes < computed; infer
+ * something got ACKed */
+ conn->ksnc_tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_tx_bufnob = bufnob;
+ smp_mb();
+ }
- if (rc <= 0) { /* Didn't write anything? */
+ if (rc <= 0) { /* Didn't write anything? */
if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
rc = -EAGAIN;
}
/* socket's wmem_queued now includes 'rc' bytes */
- cfs_atomic_sub (rc, &conn->ksnc_tx_nob);
+ atomic_sub (rc, &conn->ksnc_tx_nob);
rc = 0;
} while (tx->tx_resid != 0);
return (rc);
}
-int
+static int
ksocknal_recv_iov (ksock_conn_t *conn)
{
- struct iovec *iov = conn->ksnc_rx_iov;
+ struct kvec *iov = conn->ksnc_rx_iov;
int nob;
int rc;
LASSERT (conn->ksnc_rx_niov > 0);
- /* Never touch conn->ksnc_rx_iov or change connection
+ /* Never touch conn->ksnc_rx_iov or change connection
* status inside ksocknal_lib_recv_iov */
rc = ksocknal_lib_recv_iov(conn);
/* received something... */
nob = rc;
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- cfs_mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_rx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ smp_mb(); /* order with setting rx_started */
+ conn->ksnc_rx_started = 1;
- conn->ksnc_rx_nob_wanted -= nob;
- conn->ksnc_rx_nob_left -= nob;
+ conn->ksnc_rx_nob_wanted -= nob;
+ conn->ksnc_rx_nob_left -= nob;
do {
LASSERT (conn->ksnc_rx_niov > 0);
if (nob < (int)iov->iov_len) {
iov->iov_len -= nob;
- iov->iov_base = (void *)((char *)iov->iov_base + nob);
+ iov->iov_base += nob;
return (-EAGAIN);
}
return (rc);
}
-int
+static int
ksocknal_recv_kiov (ksock_conn_t *conn)
{
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
int rc;
LASSERT (conn->ksnc_rx_nkiov > 0);
- /* Never touch conn->ksnc_rx_kiov or change connection
+ /* Never touch conn->ksnc_rx_kiov or change connection
* status inside ksocknal_lib_recv_iov */
rc = ksocknal_lib_recv_kiov(conn);
/* received something... */
nob = rc;
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- cfs_mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_rx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ smp_mb(); /* order with setting rx_started */
+ conn->ksnc_rx_started = 1;
- conn->ksnc_rx_nob_wanted -= nob;
- conn->ksnc_rx_nob_left -= nob;
+ conn->ksnc_rx_nob_wanted -= nob;
+ conn->ksnc_rx_nob_left -= nob;
do {
LASSERT (conn->ksnc_rx_nkiov > 0);
return 1;
}
-int
+static int
ksocknal_receive (ksock_conn_t *conn)
{
/* Return 1 on success, 0 on EOF, < 0 on error.
int rc;
ENTRY;
- if (ksocknal_data.ksnd_stall_rx != 0) {
- cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
- }
+ if (ksocknal_data.ksnd_stall_rx != 0) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
+ }
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
{
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
- int rc = (tx->tx_resid == 0) ? 0 : -EIO;
+ int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
ENTRY;
LASSERT(ni != NULL || tx->tx_conn != NULL);
}
void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
{
ksock_tx_t *tx;
- while (!list_empty (txlist)) {
- tx = list_entry (txlist->next, ksock_tx_t, tx_list);
+ while (!list_empty(txlist)) {
+ tx = list_entry(txlist->next, ksock_tx_t, tx_list);
if (error && tx->tx_lnetmsg != NULL) {
- CDEBUG (D_NETERROR, "Deleting packet type %d len %d %s->%s\n",
+ CNETERR("Deleting packet type %d len %d %s->%s\n",
le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
} else if (error) {
- CDEBUG (D_NETERROR, "Deleting noop packet\n");
+ CNETERR("Deleting noop packet\n");
}
- list_del (&tx->tx_list);
+ list_del(&tx->tx_list);
- LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1);
+ LASSERT (atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done (ni, tx);
}
}
ksocknal_tx_addref(tx);
- cfs_spin_lock(&peer->ksnp_lock);
+ spin_lock(&peer->ksnp_lock);
/* ZC_REQ is going to be pinned to the peer */
tx->tx_deadline =
if (peer->ksnp_zc_next_cookie == 0)
peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
- list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
+ list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
- cfs_spin_unlock(&peer->ksnp_lock);
+ spin_unlock(&peer->ksnp_lock);
}
static void
ksocknal_uncheck_zc_req(ksock_tx_t *tx)
{
- ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
+ ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
- LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT (tx->tx_zc_capable);
+ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
+ LASSERT(tx->tx_zc_capable);
- tx->tx_zc_checked = 0;
+ tx->tx_zc_checked = 0;
- cfs_spin_lock(&peer->ksnp_lock);
+ spin_lock(&peer->ksnp_lock);
- if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
- /* Not waiting for an ACK */
- cfs_spin_unlock(&peer->ksnp_lock);
- return;
- }
+ if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+ /* Not waiting for an ACK */
+ spin_unlock(&peer->ksnp_lock);
+ return;
+ }
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- list_del(&tx->tx_zc_list);
+ tx->tx_msg.ksm_zc_cookies[0] = 0;
+ list_del(&tx->tx_zc_list);
- cfs_spin_unlock(&peer->ksnp_lock);
+ spin_unlock(&peer->ksnp_lock);
- ksocknal_tx_decref(tx);
+ ksocknal_tx_decref(tx);
}
-int
+static int
ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
{
int rc;
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, cfs_atomic_read(&libcfs_kmemory));
+ counter, conn, atomic_read(&libcfs_kmemory));
/* Queue on ksnd_enomem_conns for retry after a timeout */
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
/* enomem list takes over scheduler's ref... */
LASSERT (conn->ksnc_tx_scheduled);
- list_add_tail(&conn->ksnc_tx_list,
- &ksocknal_data.ksnd_enomem_conns);
- if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
- SOCKNAL_ENOMEM_RETRY),
- ksocknal_data.ksnd_reaper_waketime))
- cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
-
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
- return (rc);
- }
+ list_add_tail(&conn->ksnc_tx_list,
+ &ksocknal_data.ksnd_enomem_conns);
+ if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
+ SOCKNAL_ENOMEM_RETRY),
+ ksocknal_data.ksnd_reaper_waketime))
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
+
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+ return (rc);
+ }
/* Actual error */
LASSERT (rc < 0);
if (!conn->ksnc_closing) {
switch (rc) {
case -ECONNRESET:
- LCONSOLE_WARN("Host %u.%u.%u.%u reset our connection "
+ LCONSOLE_WARN("Host %pI4h reset our connection "
"while we were sending data; it may have "
"rebooted.\n",
- HIPQUAD(conn->ksnc_ipaddr));
+ &conn->ksnc_ipaddr);
break;
default:
LCONSOLE_WARN("There was an unexpected network error "
- "while writing to %u.%u.%u.%u: %d.\n",
- HIPQUAD(conn->ksnc_ipaddr), rc);
+ "while writing to %pI4h: %d.\n",
+ &conn->ksnc_ipaddr, rc);
break;
}
- CDEBUG(D_NET, "[%p] Error %d on write to %s"
- " ip %d.%d.%d.%d:%d\n", conn, rc,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
- conn->ksnc_port);
+ CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
+ conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr, conn->ksnc_port);
}
if (tx->tx_zc_checked)
return (rc);
}
-void
+static void
ksocknal_launch_connection_locked (ksock_route_t *route)
{
route->ksnr_scheduled = 1; /* scheduling conn for connd */
ksocknal_route_addref(route); /* extra ref for connd */
- cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- list_add_tail (&route->ksnr_connd_list,
- &ksocknal_data.ksnd_connd_routes);
- cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq);
+ list_add_tail(&route->ksnr_connd_list,
+ &ksocknal_data.ksnd_connd_routes);
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+}
+
+void
+ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
+{
+ ksock_route_t *route;
+
+ /* called holding write lock on ksnd_global_lock */
+ for (;;) {
+ /* launch any/all connections that need it */
+ route = ksocknal_find_connectable_route_locked(peer);
+ if (route == NULL)
+ return;
+
+ ksocknal_launch_connection_locked(route);
+ }
}
ksock_conn_t *
ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
{
- struct list_head *tmp;
+ struct list_head *tmp;
ksock_conn_t *conn;
ksock_conn_t *typed = NULL;
ksock_conn_t *fallback = NULL;
int tnob = 0;
int fnob = 0;
- list_for_each (tmp, &peer->ksnp_conns) {
- ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
- int nob = cfs_atomic_read(&c->ksnc_tx_nob) +
- libcfs_sock_wmem_queued(c->ksnc_sock);
+ list_for_each(tmp, &peer->ksnp_conns) {
+ ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
+ int nob = atomic_read(&c->ksnc_tx_nob) +
+ c->ksnc_sock->sk->sk_wmem_queued;
int rc;
LASSERT (!c->ksnc_closing);
{
conn->ksnc_proto->pro_pack(tx);
- cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+ atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
ksocknal_conn_addref(conn); /* +1 ref for tx */
tx->tx_conn = conn;
}
ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
{
ksock_sched_t *sched = conn->ksnc_scheduler;
- ksock_msg_t *msg = &tx->tx_msg;
+ struct ksock_msg *msg = &tx->tx_msg;
ksock_tx_t *ztx = NULL;
int bufnob = 0;
* ksnc_sock... */
LASSERT(!conn->ksnc_closing);
- CDEBUG (D_NET, "Sending to %s ip %d.%d.%d.%d:%d\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
- conn->ksnc_port);
+ CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr, conn->ksnc_port);
ksocknal_tx_prep(conn, tx);
/* Ensure the frags we've been given EXACTLY match the number of
* bytes we want to send. Many TCP/IP stacks disregard any total
- * size parameters passed to them and just look at the frags.
+ * size parameters passed to them and just look at the frags.
*
* We always expect at least 1 mapped fragment containing the
* complete ksocknal message header. */
KSOCK_MSG_NOOP,
tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
- /*
- * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
- * but they're used inside spinlocks a lot.
- */
- bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
- cfs_spin_lock_bh (&sched->kss_lock);
-
- if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
- /* First packet starts the timeout */
- conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- conn->ksnc_tx_bufnob = 0;
- cfs_mb(); /* order with adding to tx_queue */
- }
-
- if (msg->ksm_type == KSOCK_MSG_NOOP) {
- /* The packet is noop ZC ACK, try to piggyback the ack_cookie
- * on a normal packet so I don't need to send it */
+ bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
+ spin_lock_bh(&sched->kss_lock);
+
+ if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+ /* First packet starts the timeout */
+ conn->ksnc_tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_tx_bufnob = 0;
+ smp_mb(); /* order with adding to tx_queue */
+ }
+
+ if (msg->ksm_type == KSOCK_MSG_NOOP) {
+ /* The packet is noop ZC ACK, try to piggyback the ack_cookie
+ * on a normal packet so I don't need to send it */
LASSERT (msg->ksm_zc_cookies[1] != 0);
LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
}
if (ztx != NULL) {
- cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
- list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
- }
-
- if (conn->ksnc_tx_ready && /* able to send */
- !conn->ksnc_tx_scheduled) { /* not scheduled to send */
- /* +1 ref for scheduler */
- ksocknal_conn_addref(conn);
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- cfs_waitq_signal (&sched->kss_waitq);
+ atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+ list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
- cfs_spin_unlock_bh (&sched->kss_lock);
+ if (conn->ksnc_tx_ready && /* able to send */
+ !conn->ksnc_tx_scheduled) { /* not scheduled to send */
+ /* +1 ref for scheduler */
+ ksocknal_conn_addref(conn);
+ list_add_tail(&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ wake_up(&sched->kss_waitq);
+ }
+
+ spin_unlock_bh(&sched->kss_lock);
}
ksock_route_t *
ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
{
- struct list_head *tmp;
- ksock_route_t *route;
+ cfs_time_t now = cfs_time_current();
+ struct list_head *tmp;
+ ksock_route_t *route;
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
continue;
- /* too soon to retry this guy? */
if (!(route->ksnr_retry_interval == 0 || /* first attempt */
- cfs_time_aftereq (cfs_time_current(),
- route->ksnr_timeout)))
+ cfs_time_aftereq(now, route->ksnr_timeout))) {
+ CDEBUG(D_NET,
+ "Too soon to retry route %pI4h "
+ "(cnted %d, interval %ld, %ld secs later)\n",
+ &route->ksnr_ipaddr,
+ route->ksnr_connected,
+ route->ksnr_retry_interval,
+ cfs_duration_sec(route->ksnr_timeout - now));
continue;
+ }
return (route);
}
ksock_route_t *
ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
{
- struct list_head *tmp;
+ struct list_head *tmp;
ksock_route_t *route;
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
{
ksock_peer_t *peer;
ksock_conn_t *conn;
- ksock_route_t *route;
- cfs_rwlock_t *g_lock;
+ rwlock_t *g_lock;
int retry;
int rc;
g_lock = &ksocknal_data.ksnd_global_lock;
for (retry = 0;; retry = 1) {
- cfs_read_lock (g_lock);
+ read_lock(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL) {
if (ksocknal_find_connectable_route_locked(peer) == NULL) {
* connecting and I do have an actual
* connection... */
ksocknal_queue_tx_locked (tx, conn);
- cfs_read_unlock (g_lock);
+ read_unlock(g_lock);
return (0);
}
}
}
/* I'll need a write lock... */
- cfs_read_unlock (g_lock);
+ read_unlock(g_lock);
- cfs_write_lock_bh (g_lock);
+ write_lock_bh(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL)
break;
- cfs_write_unlock_bh (g_lock);
+ write_unlock_bh(g_lock);
if ((id.pid & LNET_PID_USERFLAG) != 0) {
CERROR("Refusing to create a connection to "
}
}
- for (;;) {
- /* launch any/all connections that need it */
- route = ksocknal_find_connectable_route_locked (peer);
- if (route == NULL)
- break;
-
- ksocknal_launch_connection_locked (route);
- }
+ ksocknal_launch_all_connections_locked(peer);
conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
if (conn != NULL) {
/* Connection exists; queue message on it */
ksocknal_queue_tx_locked (tx, conn);
- cfs_write_unlock_bh (g_lock);
+ write_unlock_bh(g_lock);
return (0);
}
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
/* Queue the message until a connection is established */
- list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
- cfs_write_unlock_bh (g_lock);
+ list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
+ write_unlock_bh(g_lock);
return 0;
}
- cfs_write_unlock_bh (g_lock);
+ write_unlock_bh(g_lock);
/* NB Routes may be ignored if connections to them failed recently */
- CDEBUG(D_NETERROR, "No usable routes to %s\n", libcfs_id2str(id));
+ CNETERR("No usable routes to %s\n", libcfs_id2str(id));
return (-EHOSTUNREACH);
}
int
ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
+ int mpflag = 1;
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
- struct iovec *payload_iov = lntmsg->msg_iov;
+ struct kvec *payload_iov = lntmsg->msg_iov;
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- LASSERT (!cfs_in_interrupt ());
-
- if (payload_iov != NULL)
- desc_size = offsetof(ksock_tx_t,
- tx_frags.virt.iov[1 + payload_niov]);
- else
- desc_size = offsetof(ksock_tx_t,
- tx_frags.paged.kiov[payload_niov]);
-
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ LASSERT (!in_interrupt ());
+
+ if (payload_iov != NULL)
+ desc_size = offsetof(ksock_tx_t,
+ tx_frags.virt.iov[1 + payload_niov]);
+ else
+ desc_size = offsetof(ksock_tx_t,
+ tx_frags.paged.kiov[payload_niov]);
+
+ if (lntmsg->msg_vmflush)
+ mpflag = cfs_memory_pressure_get_and_set();
tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
if (tx == NULL) {
CERROR("Can't allocate tx desc type %d size %d\n",
type, desc_size);
+ if (lntmsg->msg_vmflush)
+ cfs_memory_pressure_restore(mpflag);
return (-ENOMEM);
}
tx->tx_zc_capable = 1;
}
- socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
+ tx->tx_msg.ksm_csum = 0;
+ tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
+ tx->tx_msg.ksm_zc_cookies[0] = 0;
+ tx->tx_msg.ksm_zc_cookies[1] = 0;
/* The first fragment will be set later in pro_pack */
rc = ksocknal_launch_packet(ni, tx, target);
+ if (!mpflag)
+ cfs_memory_pressure_restore(mpflag);
+
if (rc == 0)
return (0);
}
int
-ksocknal_thread_start (int (*fn)(void *arg), void *arg)
+ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid = cfs_kernel_thread (fn, arg, 0);
+ struct task_struct *task = kthread_run(fn, arg, name);
- if (pid < 0)
- return ((int)pid);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
- return (0);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+ ksocknal_data.ksnd_nthreads++;
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ return 0;
}
void
ksocknal_thread_fini (void)
{
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_nthreads--;
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
int
ksocknal_lib_eager_ack(conn);
}
- if (nob_to_skip == 0) { /* right at next packet boundary now */
- conn->ksnc_rx_started = 0;
- cfs_mb(); /* racing with timeout thread */
+ if (nob_to_skip == 0) { /* right at next packet boundary now */
+ conn->ksnc_rx_started = 0;
+ smp_mb(); /* racing with timeout thread */
- switch (conn->ksnc_proto->pro_version) {
- case KSOCK_PROTO_V2:
- case KSOCK_PROTO_V3:
+ switch (conn->ksnc_proto->pro_version) {
+ case KSOCK_PROTO_V2:
+ case KSOCK_PROTO_V3:
conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
- conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
- conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
- conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
+ conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
+ conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
+ conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
break;
case KSOCK_PROTO_V1:
conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
break;
conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
conn->ksnc_rx_nob_left = nob_to_skip;
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
skipped = 0;
niov = 0;
nob_to_skip -=nob;
} while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
- niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
+ niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
conn->ksnc_rx_niov = niov;
conn->ksnc_rx_kiov = NULL;
return (0);
}
-int
+static int
ksocknal_process_receive (ksock_conn_t *conn)
{
lnet_hdr_t *lhdr;
lnet_process_id_t *id;
int rc;
- LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
- /* NB: sched lock NOT held */
- /* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
+ /* NB: sched lock NOT held */
+ /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
if (conn->ksnc_rx_nob_wanted != 0) {
rc = ksocknal_receive(conn);
- if (rc <= 0) {
- LASSERT (rc != -EAGAIN);
+ if (rc <= 0) {
+ lnet_process_id_t ksnp_id = conn->ksnc_peer->ksnp_id;
- if (rc == 0)
- CDEBUG (D_NET, "[%p] EOF from %s"
- " ip %d.%d.%d.%d:%d\n", conn,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
- conn->ksnc_port);
- else if (!conn->ksnc_closing)
- CERROR ("[%p] Error %d on read from %s"
- " ip %d.%d.%d.%d:%d\n",
- conn, rc,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ LASSERT(rc != -EAGAIN);
+
+ if (rc == 0)
+ CDEBUG(D_NET, "[%p] EOF from %s "
+ "ip %pI4h:%d\n", conn,
+ libcfs_id2str(ksnp_id),
+ &conn->ksnc_ipaddr,
conn->ksnc_port);
+ else if (!conn->ksnc_closing)
+ CERROR("[%p] Error %d on read from %s "
+ "ip %pI4h:%d\n", conn, rc,
+ libcfs_id2str(ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
/* it's not an error if conn is being closed */
ksocknal_close_conn_and_siblings (conn,
conn->ksnc_msg.ksm_zc_cookies[1]);
if (rc != 0) {
- CERROR("%s: Unknown ZC-ACK cookie: "LPU64", "LPU64"\n",
+ CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
ksocknal_new_packet(conn, 0);
}
conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
- conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
+ conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
+ conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
+ conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
conn->ksnc_rx_niov = 1;
conn->ksnc_rx_kiov = NULL;
id = &conn->ksnc_peer->ksnp_id;
rc = conn->ksnc_proto->pro_handle_zcreq(conn,
- conn->ksnc_msg.ksm_zc_cookies[0],
- le64_to_cpu(lhdr->src_nid) != id->nid);
+ conn->ksnc_msg.ksm_zc_cookies[0],
+ *ksocknal_tunables.ksnd_nonblk_zcack ||
+ le64_to_cpu(lhdr->src_nid) != id->nid);
}
lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
int
ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
ksock_conn_t *conn = (ksock_conn_t *)private;
LASSERT (conn->ksnc_rx_scheduled);
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_PARSE_WAIT:
- list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
- cfs_waitq_signal (&sched->kss_waitq);
- LASSERT (conn->ksnc_rx_ready);
- break;
+ switch (conn->ksnc_rx_state) {
+ case SOCKNAL_RX_PARSE_WAIT:
+ list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+ wake_up(&sched->kss_waitq);
+ LASSERT(conn->ksnc_rx_ready);
+ break;
case SOCKNAL_RX_PARSE:
/* scheduler hasn't noticed I'm parsing yet */
conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
- cfs_spin_unlock_bh (&sched->kss_lock);
- ksocknal_conn_decref(conn);
- return (0);
+ spin_unlock_bh(&sched->kss_lock);
+ ksocknal_conn_decref(conn);
+ return 0;
}
static inline int
ksocknal_sched_cansleep(ksock_sched_t *sched)
{
- int rc;
+ int rc;
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
- rc = (!ksocknal_data.ksnd_shuttingdown &&
- list_empty(&sched->kss_rx_conns) &&
- list_empty(&sched->kss_tx_conns));
+ rc = (!ksocknal_data.ksnd_shuttingdown &&
+ list_empty(&sched->kss_rx_conns) &&
+ list_empty(&sched->kss_tx_conns));
- cfs_spin_unlock_bh (&sched->kss_lock);
- return (rc);
+ spin_unlock_bh(&sched->kss_lock);
+ return rc;
}
-int ksocknal_scheduler (void *arg)
+int ksocknal_scheduler(void *arg)
{
- ksock_sched_t *sched = (ksock_sched_t *)arg;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
- int rc;
- int nloops = 0;
- int id = (int)(sched - ksocknal_data.ksnd_schedulers);
- char name[16];
+ struct ksock_sched_info *info;
+ ksock_sched_t *sched;
+ ksock_conn_t *conn;
+ ksock_tx_t *tx;
+ int rc;
+ int nloops = 0;
+ long id = (long)arg;
- snprintf (name, sizeof (name),"socknal_sd%02d", id);
- cfs_daemonize (name);
- cfs_block_allsigs ();
+ info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
+ sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
- if (ksocknal_lib_bind_thread_to_cpu(id))
- CERROR ("Can't set CPU affinity for %s to %d\n", name, id);
+ cfs_block_allsigs();
- cfs_spin_lock_bh (&sched->kss_lock);
+ rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
+ if (rc != 0) {
+ CWARN("Can't set CPU partition affinity to %d: %d\n",
+ info->ksi_cpt, rc);
+ }
+
+ spin_lock_bh(&sched->kss_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
int did_something = 0;
/* Ensure I progress everything semi-fairly */
- if (!list_empty (&sched->kss_rx_conns)) {
- conn = list_entry(sched->kss_rx_conns.next,
- ksock_conn_t, ksnc_rx_list);
- list_del(&conn->ksnc_rx_list);
+ if (!list_empty(&sched->kss_rx_conns)) {
+ conn = list_entry(sched->kss_rx_conns.next,
+ ksock_conn_t, ksnc_rx_list);
+ list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
LASSERT(conn->ksnc_rx_ready);
* data_ready can set it any time after we release
* kss_lock. */
conn->ksnc_rx_ready = 0;
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- rc = ksocknal_process_receive(conn);
+ rc = ksocknal_process_receive(conn);
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
/* I'm the only one that can clear this flag */
LASSERT(conn->ksnc_rx_scheduled);
conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
} else if (conn->ksnc_rx_ready) {
/* reschedule for rx */
- list_add_tail (&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
+ list_add_tail(&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
} else {
conn->ksnc_rx_scheduled = 0;
/* drop my ref */
did_something = 1;
}
- if (!list_empty (&sched->kss_tx_conns)) {
- CFS_LIST_HEAD (zlist);
+ if (!list_empty(&sched->kss_tx_conns)) {
+ struct list_head zlist = LIST_HEAD_INIT(zlist);
- if (!list_empty(&sched->kss_zombie_noop_txs)) {
- list_add(&zlist, &sched->kss_zombie_noop_txs);
- list_del_init(&sched->kss_zombie_noop_txs);
+ if (!list_empty(&sched->kss_zombie_noop_txs)) {
+ list_add(&zlist,
+ &sched->kss_zombie_noop_txs);
+ list_del_init(&sched->kss_zombie_noop_txs);
}
- conn = list_entry(sched->kss_tx_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
+ conn = list_entry(sched->kss_tx_conns.next,
+ ksock_conn_t, ksnc_tx_list);
+ list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(conn->ksnc_tx_ready);
- LASSERT(!list_empty(&conn->ksnc_tx_queue));
+ LASSERT(!list_empty(&conn->ksnc_tx_queue));
- tx = list_entry(conn->ksnc_tx_queue.next,
- ksock_tx_t, tx_list);
+ tx = list_entry(conn->ksnc_tx_queue.next,
+ ksock_tx_t, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
/* dequeue now so empty list => more to send */
- list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
/* Clear tx_ready in case send isn't complete. Do
* it BEFORE we call process_transmit, since
* write_space can set it any time after we release
* kss_lock. */
conn->ksnc_tx_ready = 0;
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- if (!list_empty(&zlist)) {
- /* free zombie noop txs, it's fast because
+ if (!list_empty(&zlist)) {
+ /* free zombie noop txs, it's fast because
* noop txs are just put in freelist */
ksocknal_txlist_done(NULL, &zlist, 0);
}
if (rc == -ENOMEM || rc == -EAGAIN) {
/* Incomplete send: replace tx on HEAD of tx_queue */
- cfs_spin_lock_bh (&sched->kss_lock);
- list_add (&tx->tx_list, &conn->ksnc_tx_queue);
- } else {
- /* Complete send; tx -ref */
- ksocknal_tx_decref (tx);
-
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
+ list_add(&tx->tx_list,
+ &conn->ksnc_tx_queue);
+ } else {
+ /* Complete send; tx -ref */
+ ksocknal_tx_decref(tx);
+
+ spin_lock_bh(&sched->kss_lock);
/* assume space for more */
conn->ksnc_tx_ready = 1;
}
/* Do nothing; after a short timeout, this
* conn will be reposted on kss_tx_conns. */
} else if (conn->ksnc_tx_ready &&
- !list_empty (&conn->ksnc_tx_queue)) {
+ !list_empty(&conn->ksnc_tx_queue)) {
/* reschedule for tx */
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ list_add_tail(&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
} else {
conn->ksnc_tx_scheduled = 0;
/* drop my ref */
}
if (!did_something || /* nothing to do */
++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
nloops = 0;
if (!did_something) { /* wait for something to do */
- cfs_wait_event_interruptible_exclusive(
- sched->kss_waitq,
- !ksocknal_sched_cansleep(sched), rc);
- LASSERT (rc == 0);
- } else {
- our_cond_resched();
- }
-
- cfs_spin_lock_bh (&sched->kss_lock);
- }
- }
-
- cfs_spin_unlock_bh (&sched->kss_lock);
- ksocknal_thread_fini ();
- return (0);
+ rc = wait_event_interruptible_exclusive(
+ sched->kss_waitq,
+ !ksocknal_sched_cansleep(sched));
+ LASSERT (rc == 0);
+ } else {
+ cond_resched();
+ }
+
+ spin_lock_bh(&sched->kss_lock);
+ }
+ }
+
+ spin_unlock_bh(&sched->kss_lock);
+ ksocknal_thread_fini();
+ return 0;
}
/*
*/
void ksocknal_read_callback (ksock_conn_t *conn)
{
- ksock_sched_t *sched;
- ENTRY;
+ ksock_sched_t *sched;
+ ENTRY;
- sched = conn->ksnc_scheduler;
+ sched = conn->ksnc_scheduler;
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
- conn->ksnc_rx_ready = 1;
+ conn->ksnc_rx_ready = 1;
- if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
- conn->ksnc_rx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
+ if (!conn->ksnc_rx_scheduled) { /* not being progressed */
+ list_add_tail(&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
+ conn->ksnc_rx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
- cfs_waitq_signal (&sched->kss_waitq);
- }
- cfs_spin_unlock_bh (&sched->kss_lock);
+ wake_up (&sched->kss_waitq);
+ }
+ spin_unlock_bh(&sched->kss_lock);
- EXIT;
+ EXIT;
}
/*
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
{
- ksock_sched_t *sched;
- ENTRY;
+ ksock_sched_t *sched;
+ ENTRY;
- sched = conn->ksnc_scheduler;
+ sched = conn->ksnc_scheduler;
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
- conn->ksnc_tx_ready = 1;
+ conn->ksnc_tx_ready = 1;
- if (!conn->ksnc_tx_scheduled && // not being progressed
- !list_empty(&conn->ksnc_tx_queue)){//packets to send
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
+ if (!conn->ksnc_tx_scheduled && /* not being progressed */
+ !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
+ list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
- cfs_waitq_signal (&sched->kss_waitq);
- }
+ wake_up(&sched->kss_waitq);
+ }
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- EXIT;
+ EXIT;
}
-ksock_proto_t *
-ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
+static ksock_proto_t *
+ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
{
__u32 version = 0;
lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
CLASSERT (sizeof (lnet_magicversion_t) ==
- offsetof (ksock_hello_msg_t, kshm_src_nid));
+ offsetof (struct ksock_hello_msg, kshm_src_nid));
if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
int
ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
- lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
+ lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
ksock_net_t *net = (ksock_net_t *)ni->ni_data;
return conn->ksnc_proto->pro_send_hello(conn, hello);
}
-int
+static int
ksocknal_invert_type(int type)
{
switch (type)
}
int
-ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
- ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
- __u64 *incarnation)
+ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ struct ksock_hello_msg *hello, lnet_process_id_t *peerid,
+ __u64 *incarnation)
{
/* Return < 0 fatal error
* 0 success
* EALREADY lost connection race
* EPROTO protocol version mismatch
*/
- cfs_socket_t *sock = conn->ksnc_sock;
+ struct socket *sock = conn->ksnc_sock;
int active = (conn->ksnc_proto != NULL);
int timeout;
int proto_match;
ksock_proto_t *proto;
lnet_process_id_t recv_id;
- /* socket type set on active connections - not set on passive */
- LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
+ /* socket type set on active connections - not set on passive */
+ LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
- timeout = active ? *ksocknal_tunables.ksnd_timeout :
- lnet_acceptor_timeout();
+ timeout = active ? *ksocknal_tunables.ksnd_timeout :
+ lnet_acceptor_timeout();
- rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
+ rc = lnet_sock_read(sock, &hello->kshm_magic,
+ sizeof(hello->kshm_magic), timeout);
if (rc != 0) {
- CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Error %d reading HELLO from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
return rc;
}
hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
/* Unexpected magic! */
CERROR ("Bad magic(1) %#08x (%#08x expected) from "
- "%u.%u.%u.%u\n", __cpu_to_le32 (hello->kshm_magic),
- LNET_PROTO_TCP_MAGIC,
- HIPQUAD(conn->ksnc_ipaddr));
+ "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
+ LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr);
return -EPROTO;
}
- rc = libcfs_sock_read(sock, &hello->kshm_version,
- sizeof(hello->kshm_version), timeout);
+ rc = lnet_sock_read(sock, &hello->kshm_version,
+ sizeof(hello->kshm_version), timeout);
if (rc != 0) {
- CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
- LASSERT (rc < 0);
+ CERROR("Error %d reading HELLO from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT(rc < 0);
return rc;
}
ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
}
- CERROR ("Unknown protocol version (%d.x expected)"
- " from %u.%u.%u.%u\n",
- conn->ksnc_proto->pro_version,
- HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
+ conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr);
return -EPROTO;
}
/* receive the rest of hello message anyway */
rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
if (rc != 0) {
- CERROR("Error %d reading or checking hello from from %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Error %d reading or checking hello from from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
return rc;
}
if (hello->kshm_src_nid == LNET_NID_ANY) {
CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
- "from %u.%u.%u.%u\n", HIPQUAD(conn->ksnc_ipaddr));
+ "from %pI4h\n", &conn->ksnc_ipaddr);
return -EPROTO;
}
if (!active) {
*peerid = recv_id;
- /* peer determines type */
- conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
- if (conn->ksnc_type == SOCKLND_CONN_NONE) {
- CERROR ("Unexpected type %d from %s ip %u.%u.%u.%u\n",
- hello->kshm_ctype, libcfs_id2str(*peerid),
- HIPQUAD(conn->ksnc_ipaddr));
- return -EPROTO;
- }
-
- return 0;
- }
+ /* peer determines type */
+ conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
+ if (conn->ksnc_type == SOCKLND_CONN_NONE) {
+ CERROR("Unexpected type %d from %s ip %pI4h\n",
+ hello->kshm_ctype, libcfs_id2str(*peerid),
+ &conn->ksnc_ipaddr);
+ return -EPROTO;
+ }
+ return 0;
+ }
if (peerid->pid != recv_id.pid ||
peerid->nid != recv_id.nid) {
LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
- " %u.%u.%u.%u, but they claimed they were "
+ " %pI4h, but they claimed they were "
"%s; please check your Lustre "
"configuration.\n",
libcfs_id2str(*peerid),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
libcfs_id2str(recv_id));
return -EPROTO;
}
return proto_match ? EALREADY : EPROTO;
}
- if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
- CERROR ("Mismatched types: me %d, %s ip %u.%u.%u.%u %d\n",
- conn->ksnc_type, libcfs_id2str(*peerid),
- HIPQUAD(conn->ksnc_ipaddr),
- hello->kshm_ctype);
- return -EPROTO;
- }
-
- return 0;
+ if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
+ CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
+ conn->ksnc_type, libcfs_id2str(*peerid),
+ &conn->ksnc_ipaddr,
+ hello->kshm_ctype);
+ return -EPROTO;
+ }
+ return 0;
}
-void
+static int
ksocknal_connect (ksock_route_t *route)
{
- CFS_LIST_HEAD (zombies);
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
ksock_peer_t *peer = route->ksnr_peer;
int type;
int wanted;
- cfs_socket_t *sock;
+ struct socket *sock;
cfs_time_t deadline;
int retry_later = 0;
int rc = 0;
deadline = cfs_time_add(cfs_time_current(),
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
LASSERT (route->ksnr_scheduled);
LASSERT (!route->ksnr_connecting);
type = SOCKLND_CONN_BULK_OUT;
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (cfs_time_aftereq(cfs_time_current(), deadline)) {
rc = -ETIMEDOUT;
CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
libcfs_nid2str(peer->ksnp_id.nid));
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
}
route->ksnr_scheduled = 0;
if (retry_later) {
/* re-queue for attention; this frees me up to handle
* the peer's incoming connection request */
+
+ if (rc == EALREADY ||
+ (rc == 0 && peer->ksnp_accepting > 0)) {
+ /* We want to introduce a delay before next
+ * attempt to connect if we lost conn race,
+ * but the race is resolved quickly usually,
+ * so min_reconnectms should be good heuristic */
+ route->ksnr_retry_interval =
+ cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
+ route->ksnr_timeout = cfs_time_add(cfs_time_current(),
+ route->ksnr_retry_interval);
+ }
+
ksocknal_launch_connection_locked(route);
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
- return;
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ return retry_later;
failed:
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
- if (!list_empty(&peer->ksnp_tx_queue) &&
+ if (!list_empty(&peer->ksnp_tx_queue) &&
peer->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer) == NULL) {
ksock_conn_t *conn;
/* ksnp_tx_queue is queued on a conn on successful
* connection for V1.x and V2.x */
- if (!list_empty (&peer->ksnp_conns)) {
- conn = list_entry(peer->ksnp_conns.next, ksock_conn_t, ksnc_list);
+ if (!list_empty(&peer->ksnp_conns)) {
+ conn = list_entry(peer->ksnp_conns.next,
+ ksock_conn_t, ksnc_list);
LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
}
/* take all the blocked packets while I've got the lock and
* complete below... */
- list_splice_init(&peer->ksnp_tx_queue, &zombies);
+ list_splice_init(&peer->ksnp_tx_queue, &zombies);
}
-#if 0 /* irrelevent with only eager routes */
- if (!route->ksnr_deleted) {
- /* make this route least-favourite for re-selection */
- list_del(&route->ksnr_list);
- list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
- }
-#endif
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_peer_failed(peer);
ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
+ return 0;
}
-static inline int
-ksocknal_connd_connect_route_locked(void)
+/*
+ * check whether we need to create more connds.
+ * It will try to create new thread if it's necessary, @timeout can
+ * be updated if failed to create, so caller wouldn't keep try while
+ * running out of resource.
+ */
+static int
+ksocknal_connd_check_start(long sec, long *timeout)
{
- /* Only handle an outgoing connection request if there is someone left
- * to handle incoming connections */
- return !list_empty(&ksocknal_data.ksnd_connd_routes) &&
- ((ksocknal_data.ksnd_connd_connecting + 1) <
- *ksocknal_tunables.ksnd_nconnds);
+ char name[16];
+ int rc;
+ int total = ksocknal_data.ksnd_connd_starting +
+ ksocknal_data.ksnd_connd_running;
+
+ if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
+ /* still in initializing */
+ return 0;
+ }
+
+ if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
+ total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
+ /* can't create more connd, or still have enough
+ * threads to handle more connecting */
+ return 0;
+ }
+
+ if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
+ /* no pending connecting request */
+ return 0;
+ }
+
+ if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
+ /* may run out of resource, retry later */
+ *timeout = cfs_time_seconds(1);
+ return 0;
+ }
+
+ if (ksocknal_data.ksnd_connd_starting > 0) {
+ /* serialize starting to avoid flood */
+ return 0;
+ }
+
+ ksocknal_data.ksnd_connd_starting_stamp = sec;
+ ksocknal_data.ksnd_connd_starting++;
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+
+ /* NB: total is the next id */
+ snprintf(name, sizeof(name), "socknal_cd%02d", total);
+ rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
+
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+ if (rc == 0)
+ return 1;
+
+ /* we tried ... */
+ LASSERT(ksocknal_data.ksnd_connd_starting > 0);
+ ksocknal_data.ksnd_connd_starting--;
+ ksocknal_data.ksnd_connd_failed_stamp = cfs_time_current_sec();
+
+ return 1;
}
-static inline int
-ksocknal_connd_ready(void)
+/*
+ * check whether current thread can exit, it will return 1 if there are too
+ * many threads and no creating in past 120 seconds.
+ * Also, this function may update @timeout to make caller come back
+ * again to recheck these conditions.
+ */
+static int
+ksocknal_connd_check_stop(long sec, long *timeout)
{
- int rc;
+ int val;
- cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
+ /* still in initializing */
+ return 0;
+ }
+
+ if (ksocknal_data.ksnd_connd_starting > 0) {
+ /* in progress of starting new thread */
+ return 0;
+ }
- rc = ksocknal_data.ksnd_shuttingdown ||
- !list_empty(&ksocknal_data.ksnd_connd_connreqs) ||
- ksocknal_connd_connect_route_locked();
+ if (ksocknal_data.ksnd_connd_running <=
+ *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
+ return 0;
+ }
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ /* created thread in past 120 seconds? */
+ val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
+ SOCKNAL_CONND_TIMEOUT - sec);
- return rc;
+ *timeout = (val > 0) ? cfs_time_seconds(val) :
+ cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
+ if (val > 0)
+ return 0;
+
+ /* no creating in past 120 seconds */
+
+ return ksocknal_data.ksnd_connd_running >
+ ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
+}
+
+/* Go through connd_routes queue looking for a route that we can process
+ * right now, @timeout_p can be updated if we need to come back later */
+static ksock_route_t *
+ksocknal_connd_get_route_locked(signed long *timeout_p)
+{
+ ksock_route_t *route;
+ cfs_time_t now;
+
+ now = cfs_time_current();
+
+ /* connd_routes can contain both pending and ordinary routes */
+ list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
+ ksnr_connd_list) {
+
+ if (route->ksnr_retry_interval == 0 ||
+ cfs_time_aftereq(now, route->ksnr_timeout))
+ return route;
+
+ if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
+ (int)*timeout_p > (int)(route->ksnr_timeout - now))
+ *timeout_p = (int)(route->ksnr_timeout - now);
+ }
+
+ return NULL;
}
int
ksocknal_connd (void *arg)
{
- long id = (long)(long_ptr_t)arg;
- char name[16];
- ksock_connreq_t *cr;
- ksock_route_t *route;
- int rc = 0;
+ spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
+ ksock_connreq_t *cr;
+ wait_queue_t wait;
+ int nloops = 0;
+ int cons_retry = 0;
- snprintf (name, sizeof (name), "socknal_cd%02ld", id);
- cfs_daemonize (name);
- cfs_block_allsigs ();
+ cfs_block_allsigs();
- cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ init_waitqueue_entry(&wait, current);
- while (!ksocknal_data.ksnd_shuttingdown) {
+ spin_lock_bh(connd_lock);
- if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
- /* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
- ksock_connreq_t, ksncr_list);
+ LASSERT(ksocknal_data.ksnd_connd_starting > 0);
+ ksocknal_data.ksnd_connd_starting--;
+ ksocknal_data.ksnd_connd_running++;
- list_del(&cr->ksncr_list);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ while (!ksocknal_data.ksnd_shuttingdown) {
+ ksock_route_t *route = NULL;
+ long sec = cfs_time_current_sec();
+ long timeout = MAX_SCHEDULE_TIMEOUT;
+ int dropped_lock = 0;
- ksocknal_create_conn(cr->ksncr_ni, NULL,
- cr->ksncr_sock, SOCKLND_CONN_NONE);
- lnet_ni_decref(cr->ksncr_ni);
- LIBCFS_FREE(cr, sizeof(*cr));
+ if (ksocknal_connd_check_stop(sec, &timeout)) {
+ /* wakeup another one to check stop */
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
+ break;
+ }
- cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ if (ksocknal_connd_check_start(sec, &timeout)) {
+ /* created new thread */
+ dropped_lock = 1;
}
- if (ksocknal_connd_connect_route_locked()) {
- /* Connection request */
- route = list_entry (ksocknal_data.ksnd_connd_routes.next,
- ksock_route_t, ksnr_connd_list);
+ if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
+ /* Connection accepted by the listener */
+ cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
+ next, ksock_connreq_t, ksncr_list);
- list_del (&route->ksnr_connd_list);
- ksocknal_data.ksnd_connd_connecting++;
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ list_del(&cr->ksncr_list);
+ spin_unlock_bh(connd_lock);
+ dropped_lock = 1;
- ksocknal_connect (route);
- ksocknal_route_decref(route);
+ ksocknal_create_conn(cr->ksncr_ni, NULL,
+ cr->ksncr_sock, SOCKLND_CONN_NONE);
+ lnet_ni_decref(cr->ksncr_ni);
+ LIBCFS_FREE(cr, sizeof(*cr));
- cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
- ksocknal_data.ksnd_connd_connecting--;
+ spin_lock_bh(connd_lock);
}
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
-
- cfs_wait_event_interruptible_exclusive(
- ksocknal_data.ksnd_connd_waitq,
- ksocknal_connd_ready(), rc);
-
- cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
- }
+ /* Only handle an outgoing connection request if there
+ * is a thread left to handle incoming connections and
+ * create new connd */
+ if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
+ ksocknal_data.ksnd_connd_running) {
+ route = ksocknal_connd_get_route_locked(&timeout);
+ }
+ if (route != NULL) {
+ list_del(&route->ksnr_connd_list);
+ ksocknal_data.ksnd_connd_connecting++;
+ spin_unlock_bh(connd_lock);
+ dropped_lock = 1;
+
+ if (ksocknal_connect(route)) {
+ /* consecutive retry */
+ if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
+ CWARN("massive consecutive "
+ "re-connecting to %pI4h\n",
+ &route->ksnr_ipaddr);
+ cons_retry = 0;
+ }
+ } else {
+ cons_retry = 0;
+ }
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ ksocknal_route_decref(route);
- ksocknal_thread_fini ();
- return (0);
+ spin_lock_bh(connd_lock);
+ ksocknal_data.ksnd_connd_connecting--;
+ }
+
+ if (dropped_lock) {
+ if (++nloops < SOCKNAL_RESCHED)
+ continue;
+ spin_unlock_bh(connd_lock);
+ nloops = 0;
+ cond_resched();
+ spin_lock_bh(connd_lock);
+ continue;
+ }
+
+ /* Nothing to do for 'timeout' */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ spin_unlock_bh(connd_lock);
+
+ nloops = 0;
+ schedule_timeout(timeout);
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
+ spin_lock_bh(connd_lock);
+ }
+ ksocknal_data.ksnd_connd_running--;
+ spin_unlock_bh(connd_lock);
+
+ ksocknal_thread_fini();
+ return 0;
}
-ksock_conn_t *
+static ksock_conn_t *
ksocknal_find_timed_out_conn (ksock_peer_t *peer)
{
/* We're called with a shared lock on ksnd_global_lock */
ksock_conn_t *conn;
- struct list_head *ctmp;
+ struct list_head *ctmp;
- list_for_each (ctmp, &peer->ksnp_conns) {
+ list_for_each(ctmp, &peer->ksnp_conns) {
int error;
- conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
- /* SOCK_ERROR will reset error code of socket in
- * some platform (like Darwin8.x) */
- error = libcfs_sock_error(conn->ksnc_sock);
+ error = conn->ksnc_sock->sk->sk_err;
if (error != 0) {
ksocknal_conn_addref(conn);
switch (error) {
case ECONNRESET:
- CDEBUG(D_NETERROR, "A connection with %s "
- "(%u.%u.%u.%u:%d) was reset; "
- "it may have rebooted.\n",
- libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
- conn->ksnc_port);
+ CNETERR("A connection with %s "
+ "(%pI4h:%d) was reset; "
+ "it may have rebooted.\n",
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
break;
case ETIMEDOUT:
- CDEBUG(D_NETERROR, "A connection with %s "
- "(%u.%u.%u.%u:%d) timed out; the "
- "network or node may be down.\n",
- libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
- conn->ksnc_port);
+ CNETERR("A connection with %s "
+ "(%pI4h:%d) timed out; the "
+ "network or node may be down.\n",
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
break;
default:
- CDEBUG(D_NETERROR, "An unexpected network error %d "
- "occurred with %s "
- "(%u.%u.%u.%u:%d\n", error,
- libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
- conn->ksnc_port);
+ CNETERR("An unexpected network error %d "
+ "occurred with %s "
+ "(%pI4h:%d\n", error,
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
break;
}
conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
- CDEBUG(D_NETERROR, "Timeout receiving from %s "
- "(%u.%u.%u.%u:%d), state %d wanted %d left %d\n",
- libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
- conn->ksnc_port,
- conn->ksnc_rx_state,
- conn->ksnc_rx_nob_wanted,
- conn->ksnc_rx_nob_left);
+ CNETERR("Timeout receiving from %s (%pI4h:%d), "
+ "state %d wanted %d left %d\n",
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port,
+ conn->ksnc_rx_state,
+ conn->ksnc_rx_nob_wanted,
+ conn->ksnc_rx_nob_left);
return (conn);
}
- if ((!list_empty(&conn->ksnc_tx_queue) ||
- libcfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
+ if ((!list_empty(&conn->ksnc_tx_queue) ||
+ conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
cfs_time_aftereq(cfs_time_current(),
conn->ksnc_tx_deadline)) {
/* Timed out messages queued for sending or
* buffered in the socket's send buffer */
ksocknal_conn_addref(conn);
- CDEBUG(D_NETERROR, "Timeout sending data to %s "
- "(%u.%u.%u.%u:%d) the network or that "
- "node may be down.\n",
- libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
- conn->ksnc_port);
+ CNETERR("Timeout sending data to %s (%pI4h:%d) "
+ "the network or that node may be down.\n",
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr, conn->ksnc_port);
return (conn);
}
}
ksocknal_flush_stale_txs(ksock_peer_t *peer)
{
ksock_tx_t *tx;
- CFS_LIST_HEAD (stale_txs);
-
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
- while (!list_empty (&peer->ksnp_tx_queue)) {
- tx = list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ while (!list_empty(&peer->ksnp_tx_queue)) {
+ tx = list_entry(peer->ksnp_tx_queue.next,
+ ksock_tx_t, tx_list);
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
-
- list_del (&tx->tx_list);
- list_add_tail (&tx->tx_list, &stale_txs);
+
+ list_del(&tx->tx_list);
+ list_add_tail(&tx->tx_list, &stale_txs);
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
}
-int
+static int
ksocknal_send_keepalive_locked(ksock_peer_t *peer)
+__must_hold(&ksocknal_data.ksnd_global_lock)
{
ksock_sched_t *sched;
ksock_conn_t *conn;
ksock_tx_t *tx;
- if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+ /* last_alive will be updated by create_conn */
+ if (list_empty(&peer->ksnp_conns))
return 0;
if (peer->ksnp_proto != &ksocknal_protocol_v3x)
if (conn != NULL) {
sched = conn->ksnc_scheduler;
- spin_lock_bh (&sched->kss_lock);
- if (!list_empty(&conn->ksnc_tx_queue)) {
- spin_unlock_bh(&sched->kss_lock);
- /* there is an queued ACK, don't need keepalive */
- return 0;
- }
+ spin_lock_bh(&sched->kss_lock);
+ if (!list_empty(&conn->ksnc_tx_queue)) {
+ spin_unlock_bh(&sched->kss_lock);
+ /* there is an queued ACK, don't need keepalive */
+ return 0;
+ }
- spin_unlock_bh(&sched->kss_lock);
- }
+ spin_unlock_bh(&sched->kss_lock);
+ }
- read_unlock(&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
- /* cookie = 1 is reserved for keepalive PING */
- tx = ksocknal_alloc_tx_noop(1, 1);
- if (tx == NULL) {
- read_lock(&ksocknal_data.ksnd_global_lock);
- return -ENOMEM;
- }
+ /* cookie = 1 is reserved for keepalive PING */
+ tx = ksocknal_alloc_tx_noop(1, 1);
+ if (tx == NULL) {
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ return -ENOMEM;
+ }
- if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
- read_lock(&ksocknal_data.ksnd_global_lock);
- return 1;
- }
+ if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ return 1;
+ }
- ksocknal_free_tx(tx);
- read_lock(&ksocknal_data.ksnd_global_lock);
+ ksocknal_free_tx(tx);
+ read_lock(&ksocknal_data.ksnd_global_lock);
- return -EIO;
+ return -EIO;
}
-void
+static void
ksocknal_check_peer_timeouts (int idx)
{
- struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
- struct list_head *ptmp;
+ struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
ksock_peer_t *peer;
ksock_conn_t *conn;
+ ksock_tx_t *tx;
again:
/* NB. We expect to have a look at all the peers and not find any
* connections to time out, so we just use a shared lock while we
* take a look... */
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ list_for_each_entry(peer, peers, ksnp_list) {
+ ksock_tx_t *tx_stale;
+ cfs_time_t deadline = 0;
+ int resid = 0;
+ int n = 0;
- list_for_each (ptmp, peers) {
- peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
if (ksocknal_send_keepalive_locked(peer) != 0) {
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
goto again;
}
conn = ksocknal_find_timed_out_conn (peer);
if (conn != NULL) {
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
/* we can't process stale txs right here because we're
* holding only shared lock */
- if (!list_empty (&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx = list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ if (!list_empty(&peer->ksnp_tx_queue)) {
+ ksock_tx_t *tx =
+ list_entry(peer->ksnp_tx_queue.next,
+ ksock_tx_t, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline)) {
ksocknal_peer_addref(peer);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
-
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
ksocknal_flush_stale_txs(peer);
ksocknal_peer_decref(peer);
goto again;
}
}
- }
- /* print out warnings about stale ZC_REQs */
- cfs_list_for_each_entry_typed(peer, peers, ksock_peer_t, ksnp_list) {
- ksock_tx_t *tx;
- int n = 0;
-
- cfs_list_for_each_entry_typed(tx, &peer->ksnp_zc_req_list,
- ksock_tx_t, tx_zc_list) {
+ if (list_empty(&peer->ksnp_zc_req_list))
+ continue;
+
+ tx_stale = NULL;
+ spin_lock(&peer->ksnp_lock);
+ list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
+ /* ignore the TX if connection is being closed */
+ if (tx->tx_conn->ksnc_closing)
+ continue;
n++;
+ if (tx_stale == NULL)
+ tx_stale = tx;
}
- if (n != 0) {
- tx = list_entry (peer->ksnp_zc_req_list.next,
- ksock_tx_t, tx_zc_list);
- CWARN("Stale ZC_REQs for peer %s detected: %d; the "
- "oldest (%p) timed out %ld secs ago\n",
- libcfs_nid2str(peer->ksnp_id.nid), n, tx,
- cfs_duration_sec(cfs_time_current() -
- tx->tx_deadline));
- }
+ if (tx_stale == NULL) {
+ spin_unlock(&peer->ksnp_lock);
+ continue;
+ }
+
+ deadline = tx_stale->tx_deadline;
+ resid = tx_stale->tx_resid;
+ conn = tx_stale->tx_conn;
+ ksocknal_conn_addref(conn);
+
+ spin_unlock(&peer->ksnp_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ CERROR("Total %d stale ZC_REQs for peer %s detected; the "
+ "oldest(%p) timed out %ld secs ago, "
+ "resid: %d, wmem: %d\n",
+ n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale,
+ cfs_duration_sec(cfs_time_current() - deadline),
+ resid, conn->ksnc_sock->sk->sk_wmem_queued);
+
+ ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+ ksocknal_conn_decref(conn);
+ goto again;
}
-
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
}
-int
-ksocknal_reaper (void *arg)
+int ksocknal_reaper(void *arg)
{
- cfs_waitlink_t wait;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
- struct list_head enomem_conns;
+ wait_queue_t wait;
+ ksock_conn_t *conn;
+ ksock_sched_t *sched;
+ struct list_head enomem_conns;
int nenomem_conns;
cfs_duration_t timeout;
int i;
int peer_index = 0;
cfs_time_t deadline = cfs_time_current();
- cfs_daemonize ("socknal_reaper");
cfs_block_allsigs ();
- CFS_INIT_LIST_HEAD(&enomem_conns);
- cfs_waitlink_init (&wait);
+ INIT_LIST_HEAD(&enomem_conns);
+ init_waitqueue_entry(&wait, current);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
- if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry (ksocknal_data.ksnd_deathrow_conns.next,
- ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
+ if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
+ conn = list_entry(ksocknal_data. \
+ ksnd_deathrow_conns.next,
+ ksock_conn_t, ksnc_list);
+ list_del(&conn->ksnc_list);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- ksocknal_terminate_conn (conn);
- ksocknal_conn_decref(conn);
+ ksocknal_terminate_conn(conn);
+ ksocknal_conn_decref(conn);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
- if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry (ksocknal_data.ksnd_zombie_conns.next,
- ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
+ if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
+ conn = list_entry(ksocknal_data.ksnd_zombie_conns.\
+ next, ksock_conn_t, ksnc_list);
+ list_del(&conn->ksnc_list);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- ksocknal_destroy_conn (conn);
+ ksocknal_destroy_conn(conn);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
- if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
- list_add(&enomem_conns, &ksocknal_data.ksnd_enomem_conns);
- list_del_init(&ksocknal_data.ksnd_enomem_conns);
+ if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
+ list_add(&enomem_conns,
+ &ksocknal_data.ksnd_enomem_conns);
+ list_del_init(&ksocknal_data.ksnd_enomem_conns);
}
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
- while (!list_empty (&enomem_conns)) {
- conn = list_entry (enomem_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
+ while (!list_empty(&enomem_conns)) {
+ conn = list_entry(enomem_conns.next,
+ ksock_conn_t, ksnc_tx_list);
+ list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
- LASSERT (conn->ksnc_tx_scheduled);
- conn->ksnc_tx_ready = 1;
- list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
- cfs_waitq_signal (&sched->kss_waitq);
+ LASSERT(conn->ksnc_tx_scheduled);
+ conn->ksnc_tx_ready = 1;
+ list_add_tail(&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ wake_up(&sched->kss_waitq);
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
nenomem_conns++;
}
ksocknal_data.ksnd_reaper_waketime =
cfs_time_add(cfs_time_current(), timeout);
- cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
- if (!ksocknal_data.ksnd_shuttingdown &&
- list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
- list_empty (&ksocknal_data.ksnd_zombie_conns))
- cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+ if (!ksocknal_data.ksnd_shuttingdown &&
+ list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+ list_empty(&ksocknal_data.ksnd_zombie_conns))
+ schedule_timeout(timeout);
- cfs_set_current_state (CFS_TASK_RUNNING);
- cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
- }
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+ }
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- ksocknal_thread_fini ();
- return (0);
+ ksocknal_thread_fini();
+ return 0;
}