* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include "socknal.h"
+#include "socklnd.h"
-/*
- * LIB functions follow
- *
- */
-int
-ksocknal_read(nal_cb_t *nal, void *private, void *dst_addr,
- user_ptr src_addr, size_t len)
-{
- CDEBUG(D_NET, LPX64": reading %ld bytes from %p -> %p\n",
- nal->ni.nid, (long)len, src_addr, dst_addr);
-
- memcpy( dst_addr, src_addr, len );
- return 0;
-}
-
-int
-ksocknal_write(nal_cb_t *nal, void *private, user_ptr dst_addr,
- void *src_addr, size_t len)
-{
- CDEBUG(D_NET, LPX64": writing %ld bytes from %p -> %p\n",
- nal->ni.nid, (long)len, src_addr, dst_addr);
-
- memcpy( dst_addr, src_addr, len );
- return 0;
-}
-
-int
-ksocknal_callback (nal_cb_t * nal, void *private, lib_eq_t *eq,
- ptl_event_t *ev)
+ksock_tx_t *
+ksocknal_alloc_tx (int size)
{
- CDEBUG(D_NET, LPX64": callback eq %p ev %p\n",
- nal->ni.nid, eq, ev);
+ ksock_tx_t *tx = NULL;
- if (eq->event_callback != NULL)
- eq->event_callback(ev);
+ if (size == KSOCK_NOOP_TX_SIZE) {
+ /* searching for a noop tx in free list */
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
- return 0;
-}
-
-void *
-ksocknal_malloc(nal_cb_t *nal, size_t len)
-{
- void *buf;
-
- PORTAL_ALLOC(buf, len);
-
- if (buf != NULL)
- memset(buf, 0, len);
-
- return (buf);
-}
-
-void
-ksocknal_free(nal_cb_t *nal, void *buf, size_t len)
-{
- PORTAL_FREE(buf, len);
-}
-
-void
-ksocknal_printf(nal_cb_t *nal, const char *fmt, ...)
-{
- va_list ap;
- char msg[256];
+ if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
+ ksock_tx_t, tx_list);
+ LASSERT(tx->tx_desc_size == size);
+ list_del(&tx->tx_list);
+ }
+
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ }
+
+ if (tx == NULL)
+ LIBCFS_ALLOC(tx, size);
- va_start (ap, fmt);
- vsnprintf (msg, sizeof (msg), fmt, ap); /* sprint safely */
- va_end (ap);
+ if (tx == NULL)
+ return NULL;
- msg[sizeof (msg) - 1] = 0; /* ensure terminated */
+ atomic_set(&tx->tx_refcount, 1);
+ tx->tx_desc_size = size;
+ atomic_inc(&ksocknal_data.ksnd_nactive_txs);
- CDEBUG (D_NET, "%s", msg);
+ return tx;
}
void
-ksocknal_cli(nal_cb_t *nal, unsigned long *flags)
+ksocknal_free_tx (ksock_tx_t *tx)
{
- ksock_nal_data_t *data = nal->nal_data;
-
- spin_lock(&data->ksnd_nal_cb_lock);
-}
+ atomic_dec(&ksocknal_data.ksnd_nactive_txs);
-void
-ksocknal_sti(nal_cb_t *nal, unsigned long *flags)
-{
- ksock_nal_data_t *data;
- data = nal->nal_data;
+ if (tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
+ /* it's a noop tx */
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
- spin_unlock(&data->ksnd_nal_cb_lock);
-}
+ list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
-int
-ksocknal_dist(nal_cb_t *nal, ptl_nid_t nid, unsigned long *dist)
-{
- /* I would guess that if ksocknal_get_peer (nid) == NULL,
- and we're not routing, then 'nid' is very distant :) */
- if ( nal->ni.nid == nid ) {
- *dist = 0;
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
} else {
- *dist = 1;
- }
-
- return 0;
-}
-
-ksock_ltx_t *
-ksocknal_get_ltx (int may_block)
-{
- unsigned long flags;
- ksock_ltx_t *ltx = NULL;
-
- for (;;) {
- spin_lock_irqsave (&ksocknal_data.ksnd_idle_ltx_lock, flags);
-
- if (!list_empty (&ksocknal_data.ksnd_idle_ltx_list)) {
- ltx = list_entry(ksocknal_data.ksnd_idle_ltx_list.next,
- ksock_ltx_t, ltx_tx.tx_list);
- list_del (<x->ltx_tx.tx_list);
- ksocknal_data.ksnd_active_ltxs++;
- break;
- }
-
- if (!may_block) {
- if (!list_empty(&ksocknal_data.ksnd_idle_nblk_ltx_list)) {
- ltx = list_entry(ksocknal_data.ksnd_idle_nblk_ltx_list.next,
- ksock_ltx_t, ltx_tx.tx_list);
- list_del (<x->ltx_tx.tx_list);
- ksocknal_data.ksnd_active_ltxs++;
- }
- break;
- }
-
- spin_unlock_irqrestore(&ksocknal_data.ksnd_idle_ltx_lock,
- flags);
-
- wait_event (ksocknal_data.ksnd_idle_ltx_waitq,
- !list_empty (&ksocknal_data.ksnd_idle_ltx_list));
+ LIBCFS_FREE(tx, tx->tx_desc_size);
}
-
- spin_unlock_irqrestore (&ksocknal_data.ksnd_idle_ltx_lock, flags);
-
- return (ltx);
}
void
-ksocknal_put_ltx (ksock_ltx_t *ltx)
-{
- unsigned long flags;
-
- spin_lock_irqsave (&ksocknal_data.ksnd_idle_ltx_lock, flags);
-
- ksocknal_data.ksnd_active_ltxs--;
- list_add_tail (<x->ltx_tx.tx_list, ltx->ltx_idle);
-
- /* normal tx desc => wakeup anyone blocking for one */
- if (ltx->ltx_idle == &ksocknal_data.ksnd_idle_ltx_list &&
- waitqueue_active (&ksocknal_data.ksnd_idle_ltx_waitq))
- wake_up (&ksocknal_data.ksnd_idle_ltx_waitq);
-
- spin_unlock_irqrestore (&ksocknal_data.ksnd_idle_ltx_lock, flags);
-}
-
-#if SOCKNAL_ZC
-struct page *
-ksocknal_kvaddr_to_page (unsigned long vaddr)
+ksocknal_init_msg(ksock_msg_t *msg, int type)
{
- struct page *page;
-
- if (vaddr >= VMALLOC_START &&
- vaddr < VMALLOC_END)
- page = vmalloc_to_page ((void *)vaddr);
-#if CONFIG_HIGHMEM
- else if (vaddr >= PKMAP_BASE &&
- vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE))
- page = vmalloc_to_page ((void *)vaddr);
- /* in 2.4 ^ just walks the page tables */
-#endif
- else
- page = virt_to_page (vaddr);
-
- if (page == NULL ||
- !VALID_PAGE (page))
- return (NULL);
-
- return (page);
+ msg->ksm_type = type;
+ msg->ksm_csum = 0;
+ msg->ksm_zc_req_cookie = 0;
+ msg->ksm_zc_ack_cookie = 0;
}
-#endif
int
ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
-{
- struct socket *sock = conn->ksnc_sock;
+{
struct iovec *iov = tx->tx_iov;
- int fragsize = iov->iov_len;
- unsigned long vaddr = (unsigned long)iov->iov_base;
- int more = (!list_empty (&conn->ksnc_tx_queue)) |
- (tx->tx_niov > 1) |
- (tx->tx_nkiov > 1);
-#if SOCKNAL_ZC
- int offset = vaddr & (PAGE_SIZE - 1);
- int zcsize = MIN (fragsize, PAGE_SIZE - offset);
- struct page *page;
-#endif
- int rc;
+ int nob;
+ int rc;
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone, so we only send 1 frag at a time. */
- LASSERT (fragsize <= tx->tx_resid);
LASSERT (tx->tx_niov > 0);
-
-#if SOCKNAL_ZC
- if (zcsize >= ksocknal_data.ksnd_zc_min_frag &&
- (sock->sk->route_caps & NETIF_F_SG) &&
- (sock->sk->route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)) &&
- (page = ksocknal_kvaddr_to_page (vaddr)) != NULL) {
-
- CDEBUG(D_NET, "vaddr %p, page %p->%p + offset %x for %d\n",
- (void *)vaddr, page, page_address(page), offset, zcsize);
-
- if (fragsize > zcsize) {
- more = 1;
- fragsize = zcsize;
- }
- rc = tcp_sendpage_zccd(sock, page, offset, zcsize,
- more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT,
- &tx->tx_zccd);
- } else
-#endif
- {
- /* NB don't pass tx's iov; sendmsg may or may not update it */
- struct iovec fragiov = { .iov_base = (void *)vaddr,
- .iov_len = fragsize};
- struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = &fragiov,
- .msg_iovlen = 1,
- .msg_control = NULL,
- .msg_controllen = 0,
- .msg_flags = more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT
- };
- mm_segment_t oldmm = get_fs();
-
- set_fs (KERNEL_DS);
- rc = sock_sendmsg(sock, &msg, fragsize);
- set_fs (oldmm);
- }
+ /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
+ rc = ksocknal_lib_send_iov(conn, tx);
- if (rc <= 0)
+ if (rc <= 0) /* sent nothing? */
return (rc);
- tx->tx_resid -= rc;
+ nob = rc;
+ LASSERT (nob <= tx->tx_resid);
+ tx->tx_resid -= nob;
- if (rc < iov->iov_len) {
- /* didn't send whole iov entry... */
- iov->iov_base = (void *)(vaddr + rc);
- iov->iov_len -= rc;
- /* ...but did we send everything we tried to send? */
- return ((rc == fragsize) ? 1 : -EAGAIN);
- }
+ /* "consume" iov */
+ do {
+ LASSERT (tx->tx_niov > 0);
- tx->tx_iov++;
- tx->tx_niov--;
- return (1);
+ if (nob < iov->iov_len) {
+ iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob);
+ iov->iov_len -= nob;
+ return (rc);
+ }
+
+ nob -= iov->iov_len;
+ tx->tx_iov = ++iov;
+ tx->tx_niov--;
+ } while (nob != 0);
+
+ return (rc);
}
int
ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
-{
- struct socket *sock = conn->ksnc_sock;
- ptl_kiov_t *kiov = tx->tx_kiov;
- int fragsize = kiov->kiov_len;
- struct page *page = kiov->kiov_page;
- int offset = kiov->kiov_offset;
- int more = (!list_empty (&conn->ksnc_tx_queue)) |
- (tx->tx_nkiov > 1);
- int rc;
+{
+ lnet_kiov_t *kiov = tx->tx_kiov;
+ int nob;
+ int rc;
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone, so we only send 1 frag at a time. */
- LASSERT (fragsize <= tx->tx_resid);
- LASSERT (offset + fragsize <= PAGE_SIZE);
- LASSERT (tx->tx_niov == 0);
+ LASSERT (tx->tx_niov == 0);
LASSERT (tx->tx_nkiov > 0);
-#if SOCKNAL_ZC
- if (fragsize >= ksocknal_data.ksnd_zc_min_frag &&
- (sock->sk->route_caps & NETIF_F_SG) &&
- (sock->sk->route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM))) {
+ /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
+ rc = ksocknal_lib_send_kiov(conn, tx);
- CDEBUG(D_NET, "page %p + offset %x for %d\n",
- page, offset, fragsize);
+ if (rc <= 0) /* sent nothing? */
+ return (rc);
+
+ nob = rc;
+ LASSERT (nob <= tx->tx_resid);
+ tx->tx_resid -= nob;
- rc = tcp_sendpage_zccd(sock, page, offset, fragsize,
- more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT,
- &tx->tx_zccd);
- } else
-#endif
- {
- char *addr = ((char *)kmap (page)) + offset;
- struct iovec fragiov = {.iov_base = addr,
- .iov_len = fragsize};
- struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = &fragiov,
- .msg_iovlen = 1,
- .msg_control = NULL,
- .msg_controllen = 0,
- .msg_flags = more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT
- };
- mm_segment_t oldmm = get_fs();
-
- set_fs (KERNEL_DS);
- rc = sock_sendmsg(sock, &msg, fragsize);
- set_fs (oldmm);
+ /* "consume" kiov */
+ do {
+ LASSERT(tx->tx_nkiov > 0);
- kunmap (page);
- }
+ if (nob < kiov->kiov_len) {
+ kiov->kiov_offset += nob;
+ kiov->kiov_len -= nob;
+ return rc;
+ }
- if (rc <= 0)
- return (rc);
+ nob -= kiov->kiov_len;
+ tx->tx_kiov = ++kiov;
+ tx->tx_nkiov--;
+ } while (nob != 0);
- tx->tx_resid -= rc;
-
- if (rc < fragsize) {
- /* didn't send whole frag */
- kiov->kiov_offset = offset + rc;
- kiov->kiov_len = fragsize - rc;
- return (-EAGAIN);
- }
-
- /* everything went */
- LASSERT (rc == fragsize);
- tx->tx_kiov++;
- tx->tx_nkiov--;
- return (1);
+ return (rc);
}
int
-ksocknal_sendmsg (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
{
- /* Return 0 on success, < 0 on error.
- * caller checks tx_resid to determine progress/completion */
int rc;
- ENTRY;
+ int bufnob;
if (ksocknal_data.ksnd_stall_tx != 0) {
- set_current_state (TASK_UNINTERRUPTIBLE);
- schedule_timeout (ksocknal_data.ksnd_stall_tx * HZ);
+ cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
}
- rc = ksocknal_getconnsock (conn);
- if (rc != 0)
- return (rc);
-
- for (;;) {
- LASSERT (tx->tx_resid != 0);
+ LASSERT (tx->tx_resid != 0);
- if (conn->ksnc_closing) {
- rc = -ESHUTDOWN;
- break;
- }
+ rc = ksocknal_connsock_addref(conn);
+ if (rc != 0) {
+ LASSERT (conn->ksnc_closing);
+ return (-ESHUTDOWN);
+ }
- if (tx->tx_niov != 0)
+ do {
+ if (ksocknal_data.ksnd_enomem_tx > 0) {
+ /* testing... */
+ ksocknal_data.ksnd_enomem_tx--;
+ rc = -EAGAIN;
+ } else if (tx->tx_niov != 0) {
rc = ksocknal_send_iov (conn, tx);
- else
+ } else {
rc = ksocknal_send_kiov (conn, tx);
+ }
- if (rc <= 0) { /* error or socket full? */
- /* NB: rc == 0 and rc == -EAGAIN both mean try
- * again later (linux stack returns -EAGAIN for
- * this, but Adaptech TOE returns 0) */
- if (rc == -EAGAIN)
- rc = 0;
- break;
+ bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
+ if (rc > 0) /* sent something? */
+ conn->ksnc_tx_bufnob += rc; /* account it */
+
+ if (bufnob < conn->ksnc_tx_bufnob) {
+ /* allocated send buffer bytes < computed; infer
+ * something got ACKed */
+ conn->ksnc_tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_tx_bufnob = bufnob;
+ mb();
}
- /* Consider the connection alive since we managed to chuck
- * more data into it. Really, we'd like to consider it
- * alive only when the peer ACKs something, but
- * write_space() only gets called back while SOCK_NOSPACE
- * is set. Instead, we presume peer death has occurred if
- * the socket doesn't drain within a timout */
- conn->ksnc_tx_deadline = jiffies +
- ksocknal_data.ksnd_io_timeout * HZ;
- conn->ksnc_peer->ksnp_last_alive = jiffies;
-
- if (tx->tx_resid == 0) { /* sent everything */
- rc = 0;
+ if (rc <= 0) { /* Didn't write anything? */
+ ksock_sched_t *sched;
+
+ if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
+ rc = -EAGAIN;
+
+ if (rc != -EAGAIN)
+ break;
+
+ /* Check if EAGAIN is due to memory pressure */
+
+ sched = conn->ksnc_scheduler;
+ spin_lock_bh (&sched->kss_lock);
+
+ if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
+ !conn->ksnc_tx_ready) {
+ /* SOCK_NOSPACE is set when the socket fills
+ * and cleared in the write_space callback
+ * (which also sets ksnc_tx_ready). If
+ * SOCK_NOSPACE and ksnc_tx_ready are BOTH
+ * zero, I didn't fill the socket and
+ * write_space won't reschedule me, so I
+ * return -ENOMEM to get my caller to retry
+ * after a timeout */
+ rc = -ENOMEM;
+ }
+
+ spin_unlock_bh (&sched->kss_lock);
break;
}
- }
- ksocknal_putconnsock (conn);
- RETURN (rc);
-}
+ /* socket's wmem_queued now includes 'rc' bytes */
+ atomic_sub (rc, &conn->ksnc_tx_nob);
+ rc = 0;
-void
-ksocknal_eager_ack (ksock_conn_t *conn)
-{
- int opt = 1;
- mm_segment_t oldmm = get_fs();
- struct socket *sock = conn->ksnc_sock;
-
- /* Remind the socket to ACK eagerly. If I don't, the socket might
- * think I'm about to send something it could piggy-back the ACK
- * on, introducing delay in completing zero-copy sends in my
- * peer. */
-
- set_fs(KERNEL_DS);
- sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK,
- (char *)&opt, sizeof (opt));
- set_fs(oldmm);
+ } while (tx->tx_resid != 0);
+
+ ksocknal_connsock_decref(conn);
+ return (rc);
}
int
ksocknal_recv_iov (ksock_conn_t *conn)
-{
+{
struct iovec *iov = conn->ksnc_rx_iov;
- int fragsize = iov->iov_len;
- unsigned long vaddr = (unsigned long)iov->iov_base;
- struct iovec fragiov = { .iov_base = (void *)vaddr,
- .iov_len = fragsize};
- struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = &fragiov,
- .msg_iovlen = 1,
- .msg_control = NULL,
- .msg_controllen = 0,
- .msg_flags = 0
- };
- mm_segment_t oldmm = get_fs();
- int rc;
-
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone, so we only receive 1 frag at a time. */
+ int nob;
+ int rc;
+
LASSERT (conn->ksnc_rx_niov > 0);
- LASSERT (fragsize <= conn->ksnc_rx_nob_wanted);
+
+ /* Never touch conn->ksnc_rx_iov or change connection
+ * status inside ksocknal_lib_recv_iov */
+ rc = ksocknal_lib_recv_iov(conn);
+
+ if (rc <= 0)
+ return (rc);
+
+ /* received something... */
+ nob = rc;
+
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_rx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ mb(); /* order with setting rx_started */
+ conn->ksnc_rx_started = 1;
- set_fs (KERNEL_DS);
- rc = sock_recvmsg (conn->ksnc_sock, &msg, fragsize, MSG_DONTWAIT);
- /* NB this is just a boolean............................^ */
- set_fs (oldmm);
+ conn->ksnc_rx_nob_wanted -= nob;
+ conn->ksnc_rx_nob_left -= nob;
- if (rc <= 0)
- return (rc);
+ do {
+ LASSERT (conn->ksnc_rx_niov > 0);
- /* received something... */
- conn->ksnc_peer->ksnp_last_alive = jiffies;
- conn->ksnc_rx_deadline = jiffies +
- ksocknal_data.ksnd_io_timeout * HZ;
- mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
+ if (nob < iov->iov_len) {
+ iov->iov_len -= nob;
+ iov->iov_base = (void *)(((unsigned long)iov->iov_base) + nob);
+ return (-EAGAIN);
+ }
- conn->ksnc_rx_nob_wanted -= rc;
- conn->ksnc_rx_nob_left -= rc;
-
- if (rc < fragsize) {
- iov->iov_base = (void *)(vaddr + rc);
- iov->iov_len = fragsize - rc;
- return (-EAGAIN);
- }
+ nob -= iov->iov_len;
+ conn->ksnc_rx_iov = ++iov;
+ conn->ksnc_rx_niov--;
+ } while (nob != 0);
- conn->ksnc_rx_iov++;
- conn->ksnc_rx_niov--;
- return (1);
+ return (rc);
}
int
ksocknal_recv_kiov (ksock_conn_t *conn)
{
- ptl_kiov_t *kiov = conn->ksnc_rx_kiov;
- struct page *page = kiov->kiov_page;
- int offset = kiov->kiov_offset;
- int fragsize = kiov->kiov_len;
- unsigned long vaddr = ((unsigned long)kmap (page)) + offset;
- struct iovec fragiov = { .iov_base = (void *)vaddr,
- .iov_len = fragsize};
- struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = &fragiov,
- .msg_iovlen = 1,
- .msg_control = NULL,
- .msg_controllen = 0,
- .msg_flags = 0
- };
- mm_segment_t oldmm = get_fs();
- int rc;
-
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone, so we only receive 1 frag at a time. */
- LASSERT (fragsize <= conn->ksnc_rx_nob_wanted);
+ lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ int nob;
+ int rc;
LASSERT (conn->ksnc_rx_nkiov > 0);
- LASSERT (offset + fragsize <= PAGE_SIZE);
-
- set_fs (KERNEL_DS);
- rc = sock_recvmsg (conn->ksnc_sock, &msg, fragsize, MSG_DONTWAIT);
- /* NB this is just a boolean............................^ */
- set_fs (oldmm);
- kunmap (page);
+ /* Never touch conn->ksnc_rx_kiov or change connection
+ * status inside ksocknal_lib_recv_iov */
+ rc = ksocknal_lib_recv_kiov(conn);
- if (rc <= 0)
- return (rc);
+ if (rc <= 0)
+ return (rc);
- /* received something... */
- conn->ksnc_peer->ksnp_last_alive = jiffies;
- conn->ksnc_rx_deadline = jiffies +
- ksocknal_data.ksnd_io_timeout * HZ;
- mb(); /* order with setting rx_started */
+ /* received something... */
+ nob = rc;
+
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_rx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
- conn->ksnc_rx_nob_wanted -= rc;
- conn->ksnc_rx_nob_left -= rc;
-
- if (rc < fragsize) {
- kiov->kiov_offset = offset + rc;
- kiov->kiov_len = fragsize - rc;
- return (-EAGAIN);
- }
+ conn->ksnc_rx_nob_wanted -= nob;
+ conn->ksnc_rx_nob_left -= nob;
+
+ do {
+ LASSERT (conn->ksnc_rx_nkiov > 0);
+
+ if (nob < kiov->kiov_len) {
+ kiov->kiov_offset += nob;
+ kiov->kiov_len -= nob;
+ return -EAGAIN;
+ }
+
+ nob -= kiov->kiov_len;
+ conn->ksnc_rx_kiov = ++kiov;
+ conn->ksnc_rx_nkiov--;
+ } while (nob != 0);
- conn->ksnc_rx_kiov++;
- conn->ksnc_rx_nkiov--;
- return (1);
+ return 1;
}
int
-ksocknal_recvmsg (ksock_conn_t *conn)
+ksocknal_receive (ksock_conn_t *conn)
{
/* Return 1 on success, 0 on EOF, < 0 on error.
* Caller checks ksnc_rx_nob_wanted to determine
ENTRY;
if (ksocknal_data.ksnd_stall_rx != 0) {
- set_current_state (TASK_UNINTERRUPTIBLE);
- schedule_timeout (ksocknal_data.ksnd_stall_rx * HZ);
+ cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
}
- rc = ksocknal_getconnsock (conn);
- if (rc != 0)
- return (rc);
+ rc = ksocknal_connsock_addref(conn);
+ if (rc != 0) {
+ LASSERT (conn->ksnc_closing);
+ return (-ESHUTDOWN);
+ }
for (;;) {
- if (conn->ksnc_closing) {
- rc = -ESHUTDOWN;
- break;
- }
-
if (conn->ksnc_rx_niov != 0)
rc = ksocknal_recv_iov (conn);
else
/* Completed a fragment */
if (conn->ksnc_rx_nob_wanted == 0) {
- /* Completed a message segment (header or payload) */
- if (ksocknal_data.ksnd_eager_ack &&
- (conn->ksnc_rx_state == SOCKNAL_RX_BODY ||
- conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD)) {
- /* Remind the socket to ack eagerly... */
- ksocknal_eager_ack(conn);
- }
rc = 1;
break;
}
}
- ksocknal_putconnsock (conn);
+ ksocknal_connsock_decref(conn);
RETURN (rc);
}
-#if SOCKNAL_ZC
void
-ksocknal_zc_callback (zccd_t *zcd)
+ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
{
- ksock_tx_t *tx = KSOCK_ZCCD_2_TX(zcd);
- ksock_sched_t *sched = tx->tx_conn->ksnc_scheduler;
- unsigned long flags;
+ lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
+ int rc = (tx->tx_resid == 0) ? 0 : -EIO;
ENTRY;
- /* Schedule tx for cleanup (can't do it now due to lock conflicts) */
+ LASSERT(ni != NULL || tx->tx_conn != NULL);
+
+ if (tx->tx_conn != NULL)
+ ksocknal_conn_decref(tx->tx_conn);
- spin_lock_irqsave (&sched->kss_lock, flags);
+ if (ni == NULL && tx->tx_conn != NULL)
+ ni = tx->tx_conn->ksnc_peer->ksnp_ni;
- list_add_tail (&tx->tx_list, &sched->kss_zctxdone_list);
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
+ ksocknal_free_tx (tx);
+ if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
+ lnet_finalize (ni, lnetmsg, rc);
- spin_unlock_irqrestore (&sched->kss_lock, flags);
EXIT;
}
-#endif
void
-ksocknal_tx_done (ksock_tx_t *tx, int asynch)
+ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
{
- ksock_ltx_t *ltx;
- ENTRY;
+ ksock_tx_t *tx;
+
+ while (!list_empty (txlist)) {
+ tx = list_entry (txlist->next, ksock_tx_t, tx_list);
+
+ if (error && tx->tx_lnetmsg != NULL) {
+ CDEBUG (D_NETERROR, "Deleting packet type %d len %d %s->%s\n",
+ le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
+ le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
+ libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
+ libcfs_nid2str(le64_to_cpu (tx->tx_lnetmsg->msg_hdr.dest_nid)));
+ } else if (error) {
+ CDEBUG (D_NETERROR, "Deleting noop packet\n");
+ }
- if (tx->tx_conn != NULL) {
- /* This tx got queued on a conn; do the accounting... */
- atomic_sub (tx->tx_nob, &tx->tx_conn->ksnc_tx_nob);
-#if SOCKNAL_ZC
- /* zero copy completion isn't always from
- * process_transmit() so it needs to keep a ref on
- * tx_conn... */
- if (asynch)
- ksocknal_put_conn (tx->tx_conn);
-#else
- LASSERT (!asynch);
-#endif
+ list_del (&tx->tx_list);
+
+ LASSERT (atomic_read(&tx->tx_refcount) == 1);
+ ksocknal_tx_done (ni, tx);
}
+}
- if (tx->tx_isfwd) { /* was a forwarded packet? */
- kpr_fwd_done (&ksocknal_data.ksnd_router,
- KSOCK_TX_2_KPR_FWD_DESC (tx), 0);
- EXIT;
+static void
+ksocknal_check_zc_req(ksock_tx_t *tx)
+{
+ ksock_conn_t *conn = tx->tx_conn;
+ ksock_peer_t *peer = conn->ksnc_peer;
+ lnet_kiov_t *kiov = tx->tx_kiov;
+ int nkiov = tx->tx_nkiov;
+
+ /* Set tx_msg.ksm_zc_req_cookie to a unique non-zero cookie and add tx
+ * to ksnp_zc_req_list if some fragment of this message should be sent
+ * zero-copy. Our peer will send an ACK containing this cookie when
+ * she has received this message to tell us we can signal completion.
+ * tx_msg.ksm_zc_req_cookie remains non-zero while tx is on
+ * ksnp_zc_req_list. */
+
+ if (conn->ksnc_proto != &ksocknal_protocol_v2x ||
+ !conn->ksnc_zc_capable)
return;
+
+ while (nkiov > 0) {
+ if (kiov->kiov_len >= *ksocknal_tunables.ksnd_zc_min_frag)
+ break;
+ --nkiov;
+ ++kiov;
}
- /* local send */
- ltx = KSOCK_TX_2_KSOCK_LTX (tx);
+ if (nkiov == 0)
+ return;
+
+ /* assign cookie and queue tx to pending list, it will be released when
+ * a matching ack is received. See ksocknal_handle_zc_ack() */
- lib_finalize (&ksocknal_lib, ltx->ltx_private, ltx->ltx_cookie);
+ ksocknal_tx_addref(tx);
- ksocknal_put_ltx (ltx);
- EXIT;
+ spin_lock(&peer->ksnp_lock);
+
+ LASSERT (tx->tx_msg.ksm_zc_req_cookie == 0);
+ tx->tx_msg.ksm_zc_req_cookie = peer->ksnp_zc_next_cookie++;
+ list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
+
+ spin_unlock(&peer->ksnp_lock);
}
-void
-ksocknal_tx_launched (ksock_tx_t *tx)
+static void
+ksocknal_unzc_req(ksock_tx_t *tx)
{
-#if SOCKNAL_ZC
- if (atomic_read (&tx->tx_zccd.zccd_count) != 1) {
- ksock_conn_t *conn = tx->tx_conn;
-
- /* zccd skbufs are still in-flight. First take a ref on
- * conn, so it hangs about for ksocknal_tx_done... */
- atomic_inc (&conn->ksnc_refcount);
+ ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
+
+ spin_lock(&peer->ksnp_lock);
- /* ...then drop the initial ref on zccd, so the zero copy
- * callback can occur */
- zccd_put (&tx->tx_zccd);
+ if (tx->tx_msg.ksm_zc_req_cookie == 0) {
+ /* Not waiting for an ACK */
+ spin_unlock(&peer->ksnp_lock);
return;
}
-#endif
- /* Any zero-copy-ness (if any) has completed; I can complete the
- * transmit now, avoiding an extra schedule */
- ksocknal_tx_done (tx, 0);
+
+ tx->tx_msg.ksm_zc_req_cookie = 0;
+ list_del(&tx->tx_zc_list);
+
+ spin_unlock(&peer->ksnp_lock);
+
+ ksocknal_tx_decref(tx);
}
-void
-ksocknal_process_transmit (ksock_sched_t *sched, unsigned long *irq_flags)
+int
+ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
{
- ksock_conn_t *conn;
- ksock_tx_t *tx;
int rc;
-
- LASSERT (!list_empty (&sched->kss_tx_conns));
- conn = list_entry(sched->kss_tx_conns.next, ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
-
- LASSERT (conn->ksnc_tx_scheduled);
- LASSERT (conn->ksnc_tx_ready);
- LASSERT (!list_empty (&conn->ksnc_tx_queue));
- tx = list_entry (conn->ksnc_tx_queue.next, ksock_tx_t, tx_list);
- /* assume transmit will complete now, so dequeue while I've got lock */
- list_del (&tx->tx_list);
- spin_unlock_irqrestore (&sched->kss_lock, *irq_flags);
+ if (!tx->tx_checked_zc) {
+ tx->tx_checked_zc = 1;
+ ksocknal_check_zc_req(tx);
+ }
+
+ rc = ksocknal_transmit (conn, tx);
- LASSERT (tx->tx_resid > 0);
+ CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
- conn->ksnc_tx_ready = 0;/* write_space may race with me and set ready */
- mb(); /* => clear BEFORE trying to write */
+ if (tx->tx_resid == 0) {
+ /* Sent everything OK */
+ LASSERT (rc == 0);
- rc = ksocknal_sendmsg (conn, tx);
+ return (0);
+ }
- CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
+ if (rc == -EAGAIN)
+ return (rc);
- if (rc != 0) {
- if (ksocknal_close_conn_unlocked (conn, rc)) {
- /* I'm the first to close */
- CERROR ("[%p] Error %d on write to "LPX64" ip %08x:%d\n",
- conn, rc, conn->ksnc_peer->ksnp_nid,
- conn->ksnc_ipaddr, conn->ksnc_port);
- }
- ksocknal_tx_launched (tx);
- spin_lock_irqsave (&sched->kss_lock, *irq_flags);
+ if (rc == -ENOMEM) {
+ static int counter;
+
+ counter++; /* exponential backoff warnings */
+ if ((counter & (-counter)) == counter)
+ CWARN("%u ENOMEM tx %p (%u allocated)\n",
+ counter, conn, atomic_read(&libcfs_kmemory));
+
+ /* Queue on ksnd_enomem_conns for retry after a timeout */
+ spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+
+ /* enomem list takes over scheduler's ref... */
+ LASSERT (conn->ksnc_tx_scheduled);
+ list_add_tail(&conn->ksnc_tx_list,
+ &ksocknal_data.ksnd_enomem_conns);
+ if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
+ SOCKNAL_ENOMEM_RETRY),
+ ksocknal_data.ksnd_reaper_waketime))
+ cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
+
+ spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ return (rc);
+ }
- } else if (tx->tx_resid == 0) {
- /* everything went; assume more can go, and avoid
- * write_space locking */
- conn->ksnc_tx_ready = 1;
+ /* Actual error */
+ LASSERT (rc < 0);
- ksocknal_tx_launched (tx);
- spin_lock_irqsave (&sched->kss_lock, *irq_flags);
- } else {
- spin_lock_irqsave (&sched->kss_lock, *irq_flags);
+ if (!conn->ksnc_closing) {
+ switch (rc) {
+ case -ECONNRESET:
+ LCONSOLE_WARN("Host %u.%u.%u.%u reset our connection "
+ "while we were sending data; it may have "
+ "rebooted.\n",
+ HIPQUAD(conn->ksnc_ipaddr));
+ break;
+ default:
+ LCONSOLE_WARN("There was an unexpected network error "
+ "while writing to %u.%u.%u.%u: %d.\n",
+ HIPQUAD(conn->ksnc_ipaddr), rc);
+ break;
+ }
+ CDEBUG(D_NET, "[%p] Error %d on write to %s"
+ " ip %d.%d.%d.%d:%d\n", conn, rc,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ HIPQUAD(conn->ksnc_ipaddr),
+ conn->ksnc_port);
+ }
+
+ ksocknal_unzc_req(tx);
- /* back onto HEAD of tx_queue */
- list_add (&tx->tx_list, &conn->ksnc_tx_queue);
- }
+ /* it's not an error if conn is being closed */
+ ksocknal_close_conn_and_siblings (conn,
+ (conn->ksnc_closing) ? 0 : rc);
- /* no space to write, or nothing to write? */
- if (!conn->ksnc_tx_ready ||
- list_empty (&conn->ksnc_tx_queue)) {
- /* mark not scheduled */
- conn->ksnc_tx_scheduled = 0;
- /* drop scheduler's ref */
- ksocknal_put_conn (conn);
- } else {
- /* stay scheduled */
- list_add_tail (&conn->ksnc_tx_list, &sched->kss_tx_conns);
- }
+ return (rc);
}
void
-ksocknal_launch_autoconnect_locked (ksock_route_t *route)
+ksocknal_launch_connection_locked (ksock_route_t *route)
{
- unsigned long flags;
/* called holding write lock on ksnd_global_lock */
- LASSERT (route->ksnr_conn == NULL);
- LASSERT (!route->ksnr_deleted && !route->ksnr_connecting);
+ LASSERT (!route->ksnr_scheduled);
+ LASSERT (!route->ksnr_connecting);
+ LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
- route->ksnr_connecting = 1;
- atomic_inc (&route->ksnr_refcount); /* extra ref for asynchd */
+ route->ksnr_scheduled = 1; /* scheduling conn for connd */
+ ksocknal_route_addref(route); /* extra ref for connd */
- spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
+ spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
- list_add_tail (&route->ksnr_connect_list,
- &ksocknal_data.ksnd_autoconnectd_routes);
-
- if (waitqueue_active (&ksocknal_data.ksnd_autoconnectd_waitq))
- wake_up (&ksocknal_data.ksnd_autoconnectd_waitq);
+ list_add_tail (&route->ksnr_connd_list,
+ &ksocknal_data.ksnd_connd_routes);
+ cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq);
- spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
+ spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
}
-ksock_peer_t *
-ksocknal_find_target_peer_locked (ksock_tx_t *tx, ptl_nid_t nid)
+ksock_conn_t *
+ksocknal_find_conn_locked (int payload_nob, ksock_peer_t *peer)
{
- ptl_nid_t target_nid;
- int rc;
- ksock_peer_t *peer = ksocknal_find_peer_locked (nid);
-
- if (peer != NULL)
- return (peer);
-
- if (tx->tx_isfwd) {
- CERROR ("Can't send packet to "LPX64
- ": routed target is not a peer\n", nid);
- return (NULL);
+ struct list_head *tmp;
+ ksock_conn_t *typed = NULL;
+ int tnob = 0;
+ ksock_conn_t *fallback = NULL;
+ int fnob = 0;
+ ksock_conn_t *conn;
+
+ list_for_each (tmp, &peer->ksnp_conns) {
+ ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
+ int hdr_nob = 0;
+#if SOCKNAL_ROUND_ROBIN
+ const int nob = 0;
+#else
+ int nob = atomic_read(&c->ksnc_tx_nob) +
+ SOCK_WMEM_QUEUED(c->ksnc_sock);
+#endif
+ LASSERT (!c->ksnc_closing);
+ LASSERT(c->ksnc_proto != NULL);
+
+ if (fallback == NULL || nob < fnob) {
+ fallback = c;
+ fnob = nob;
+ }
+
+ if (!*ksocknal_tunables.ksnd_typed_conns)
+ continue;
+
+ if (payload_nob == 0) {
+ /* noop packet */
+ hdr_nob = offsetof(ksock_msg_t, ksm_u);
+ } else {
+ /* lnet packet */
+ hdr_nob = (c->ksnc_proto == &ksocknal_protocol_v2x)?
+ offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload):
+ sizeof(lnet_hdr_t);
+ }
+
+ switch (c->ksnc_type) {
+ default:
+ CERROR("ksnc_type bad: %u\n", c->ksnc_type);
+ LBUG();
+ case SOCKLND_CONN_ANY:
+ break;
+ case SOCKLND_CONN_BULK_IN:
+ continue;
+ case SOCKLND_CONN_BULK_OUT:
+ if ((hdr_nob + payload_nob) < *ksocknal_tunables.ksnd_min_bulk)
+ continue;
+ break;
+ case SOCKLND_CONN_CONTROL:
+ if ((hdr_nob + payload_nob) >= *ksocknal_tunables.ksnd_min_bulk)
+ continue;
+ break;
+ }
+
+ if (typed == NULL || nob < tnob) {
+ typed = c;
+ tnob = nob;
+ }
}
-
- rc = kpr_lookup (&ksocknal_data.ksnd_router, nid, tx->tx_nob,
- &target_nid);
- if (rc != 0) {
- CERROR ("Can't route to "LPX64": router error %d\n", nid, rc);
- return (NULL);
+
+ /* prefer the typed selection */
+ conn = (typed != NULL) ? typed : fallback;
+
+#if SOCKNAL_ROUND_ROBIN
+ if (conn != NULL) {
+ /* round-robin all else being equal */
+ list_del (&conn->ksnc_list);
+ list_add_tail (&conn->ksnc_list, &peer->ksnp_conns);
}
+#endif
+ return conn;
+}
+
+void
+ksocknal_next_mono_tx(ksock_conn_t *conn)
+{
+ ksock_tx_t *tx = conn->ksnc_tx_mono;
- peer = ksocknal_find_peer_locked (target_nid);
- if (peer != NULL)
- return (peer);
+ /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
+ LASSERT(conn->ksnc_proto == &ksocknal_protocol_v2x);
+ LASSERT(!list_empty(&conn->ksnc_tx_queue));
+ LASSERT(tx != NULL);
- CERROR ("Can't send packet to "LPX64": no peer entry\n", target_nid);
- return (NULL);
+ if (tx->tx_list.next == &conn->ksnc_tx_queue) {
+ /* no more packets queued */
+ conn->ksnc_tx_mono = NULL;
+ } else {
+ conn->ksnc_tx_mono = list_entry(tx->tx_list.next, ksock_tx_t, tx_list);
+ LASSERT(conn->ksnc_tx_mono->tx_msg.ksm_type == tx->tx_msg.ksm_type);
+ }
}
-ksock_conn_t *
-ksocknal_find_conn_locked (ksock_tx_t *tx, ksock_peer_t *peer)
+int
+ksocknal_piggyback_zcack(ksock_conn_t *conn, __u64 cookie)
{
- struct list_head *tmp;
- ksock_conn_t *conn = NULL;
+ ksock_tx_t *tx = conn->ksnc_tx_mono;
- /* Find the conn with the shortest tx queue */
- list_for_each (tmp, &peer->ksnp_conns) {
- ksock_conn_t *c = list_entry (tmp, ksock_conn_t, ksnc_list);
+ /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
- LASSERT (!c->ksnc_closing);
-
- if (conn == NULL ||
- atomic_read (&conn->ksnc_tx_nob) >
- atomic_read (&c->ksnc_tx_nob))
- conn = c;
+ if (tx == NULL)
+ return 0;
+
+ if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
+ /* tx is noop zc-ack, can't piggyback zc-ack cookie */
+ return 0;
}
- return (conn);
+ LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
+ LASSERT(tx->tx_msg.ksm_zc_ack_cookie == 0);
+
+ /* piggyback the zc-ack cookie */
+ tx->tx_msg.ksm_zc_ack_cookie = cookie;
+ ksocknal_next_mono_tx(conn);
+
+ return 1;
}
void
ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
{
- unsigned long flags;
ksock_sched_t *sched = conn->ksnc_scheduler;
+ ksock_msg_t *msg = &tx->tx_msg;
+ ksock_tx_t *ztx;
+ int bufnob = 0;
- /* called holding global lock (read or irq-write) */
+ /* called holding global lock (read or irq-write) and caller may
+ * not have dropped this lock between finding conn and calling me,
+ * so we don't need the {get,put}connsock dance to deref
+ * ksnc_sock... */
+ LASSERT(!conn->ksnc_closing);
- CDEBUG (D_NET, "Sending to "LPX64" on port %d\n",
- conn->ksnc_peer->ksnp_nid, conn->ksnc_port);
+ CDEBUG (D_NET, "Sending to %s ip %d.%d.%d.%d:%d\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ HIPQUAD(conn->ksnc_ipaddr),
+ conn->ksnc_port);
- atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
- tx->tx_resid = tx->tx_nob;
- tx->tx_conn = conn;
+ tx->tx_checked_zc = 0;
+ conn->ksnc_proto->pro_pack(tx);
-#if SOCKNAL_ZC
- zccd_init (&tx->tx_zccd, ksocknal_zc_callback);
- /* NB this sets 1 ref on zccd, so the callback can only occur after
- * I've released this ref. */
-#endif
+ /* Ensure the frags we've been given EXACTLY match the number of
+ * bytes we want to send. Many TCP/IP stacks disregard any total
+ * size parameters passed to them and just look at the frags.
+ *
+ * We always expect at least 1 mapped fragment containing the
+ * complete ksocknal message header. */
+ LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
+ lnet_kiov_nob (tx->tx_nkiov, tx->tx_kiov) == tx->tx_nob);
+ LASSERT (tx->tx_niov >= 1);
+ LASSERT (tx->tx_resid == tx->tx_nob);
+
+ CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
+ tx, (tx->tx_lnetmsg != NULL)? tx->tx_lnetmsg->msg_hdr.type:
+ KSOCK_MSG_NOOP,
+ tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
+
+ atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+ tx->tx_conn = conn;
+ ksocknal_conn_addref(conn); /* +1 ref for tx */
+
+ /*
+ * NB Darwin: SOCK_WMEM_QUEUED()->sock_getsockopt() will take
+ * a blockable lock(socket lock), so SOCK_WMEM_QUEUED can't be
+ * put in spinlock.
+ */
+ bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
+ spin_lock_bh (&sched->kss_lock);
+
+ if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+ /* First packet starts the timeout */
+ conn->ksnc_tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_tx_bufnob = 0;
+ mb(); /* order with adding to tx_queue */
+ }
+
+ ztx = NULL;
+
+ if (msg->ksm_type == KSOCK_MSG_NOOP) {
+ /* The packet is noop ZC ACK, try to piggyback the ack_cookie
+ * on a normal packet so I don't need to send it */
+ LASSERT(msg->ksm_zc_req_cookie == 0);
+ LASSERT(msg->ksm_zc_ack_cookie != 0);
+
+ if (conn->ksnc_tx_mono != NULL) {
+ if (ksocknal_piggyback_zcack(conn, msg->ksm_zc_ack_cookie)) {
+ /* zc-ack cookie is piggybacked */
+ atomic_sub (tx->tx_nob, &conn->ksnc_tx_nob);
+ ztx = tx; /* Put to freelist later */
+ } else {
+ /* no packet can piggyback zc-ack cookie */
+ list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
+ }
+ } else {
+ /* It's the first mono-packet */
+ conn->ksnc_tx_mono = tx;
+ list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
+ }
- spin_lock_irqsave (&sched->kss_lock, flags);
+ } else {
+ /* It's a normal packet - can it piggback a noop zc-ack that
+ * has been queued already? */
+ LASSERT(msg->ksm_zc_ack_cookie == 0);
+
+ if (conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x packet */
+ conn->ksnc_tx_mono != NULL) {
+ if (conn->ksnc_tx_mono->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
+ /* There is a noop zc-ack can be piggybacked */
+ ztx = conn->ksnc_tx_mono;
+
+ msg->ksm_zc_ack_cookie = ztx->tx_msg.ksm_zc_ack_cookie;
+ ksocknal_next_mono_tx(conn);
+
+ /* use tx to replace the noop zc-ack packet, ztx will
+ * be put to freelist later */
+ list_add(&tx->tx_list, &ztx->tx_list);
+ list_del(&ztx->tx_list);
+
+ atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+ } else {
+ /* no noop zc-ack packet, just enqueue it */
+ LASSERT(conn->ksnc_tx_mono->tx_msg.ksm_type == KSOCK_MSG_LNET);
+ list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
+ }
- list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
+ } else if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
+ /* it's the first mono-packet, enqueue it */
+ conn->ksnc_tx_mono = tx;
+ list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
+ } else {
+ /* V1.x packet, just enqueue it */
+ list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
+ }
+ }
+
+ if (ztx != NULL)
+ list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
if (conn->ksnc_tx_ready && /* able to send */
!conn->ksnc_tx_scheduled) { /* not scheduled to send */
/* +1 ref for scheduler */
- atomic_inc (&conn->ksnc_refcount);
+ ksocknal_conn_addref(conn);
list_add_tail (&conn->ksnc_tx_list,
&sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
+ cfs_waitq_signal (&sched->kss_waitq);
}
- spin_unlock_irqrestore (&sched->kss_lock, flags);
+ spin_unlock_bh (&sched->kss_lock);
}
ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_t *peer, int eager_only)
+ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
{
struct list_head *tmp;
ksock_route_t *route;
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry (tmp, ksock_route_t, ksnr_list);
+
+ LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
+
+ if (route->ksnr_scheduled) /* connections being established */
+ continue;
+
+ /* all route types connected ? */
+ if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
+ continue;
+
+ /* too soon to retry this guy? */
+ if (!(route->ksnr_retry_interval == 0 || /* first attempt */
+ cfs_time_aftereq (cfs_time_current(),
+ route->ksnr_timeout)))
+ continue;
- if (route->ksnr_conn == NULL && /* not connected */
- !route->ksnr_connecting && /* not connecting */
- (!eager_only || route->ksnr_eager) && /* wants to connect */
- time_after_eq (jiffies, route->ksnr_timeout)) /* OK to retry */
- return (route);
+ return (route);
}
return (NULL);
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry (tmp, ksock_route_t, ksnr_list);
+
+ LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
- if (route->ksnr_connecting)
+ if (route->ksnr_scheduled)
return (route);
}
}
int
-ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid)
+ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
{
- unsigned long flags;
ksock_peer_t *peer;
ksock_conn_t *conn;
ksock_route_t *route;
rwlock_t *g_lock;
-
- /* Ensure the frags we've been given EXACTLY match the number of
- * bytes we want to send. Many TCP/IP stacks disregard any total
- * size parameters passed to them and just look at the frags.
- *
- * We always expect at least 1 mapped fragment containing the
- * complete portals header. */
- LASSERT (lib_iov_nob (tx->tx_niov, tx->tx_iov) +
- lib_kiov_nob (tx->tx_nkiov, tx->tx_kiov) == tx->tx_nob);
- LASSERT (tx->tx_niov >= 1);
- LASSERT (tx->tx_iov[0].iov_len >= sizeof (ptl_hdr_t));
-
- CDEBUG (D_NET, "packet %p type %d, nob %d niov %d nkiov %d\n",
- tx, ((ptl_hdr_t *)tx->tx_iov[0].iov_base)->type,
- tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
-
- tx->tx_conn = NULL; /* only set when assigned a conn */
+ int retry;
+ int rc;
+
+ LASSERT (tx->tx_conn == NULL);
+ LASSERT (tx->tx_lnetmsg != NULL);
g_lock = &ksocknal_data.ksnd_global_lock;
- read_lock (g_lock);
- peer = ksocknal_find_target_peer_locked (tx, nid);
- if (peer == NULL) {
+ for (retry = 0;; retry = 1) {
+#if !SOCKNAL_ROUND_ROBIN
+ read_lock (g_lock);
+ peer = ksocknal_find_peer_locked(ni, id);
+ if (peer != NULL) {
+ if (ksocknal_find_connectable_route_locked(peer) == NULL) {
+ conn = ksocknal_find_conn_locked (tx->tx_lnetmsg->msg_len, peer);
+ if (conn != NULL) {
+ /* I've got no routes that need to be
+ * connecting and I do have an actual
+ * connection... */
+ ksocknal_queue_tx_locked (tx, conn);
+ read_unlock (g_lock);
+ return (0);
+ }
+ }
+ }
+
+ /* I'll need a write lock... */
read_unlock (g_lock);
- return (PTL_FAIL);
- }
+#endif
+ write_lock_bh (g_lock);
- if (ksocknal_find_connectable_route_locked(peer, 1) == NULL) {
- conn = ksocknal_find_conn_locked (tx, peer);
- if (conn != NULL) {
- /* I've got no unconnected autoconnect routes that
- * need to be connected, and I do have an actual
- * connection... */
- ksocknal_queue_tx_locked (tx, conn);
- read_unlock (g_lock);
- return (PTL_OK);
- }
- }
-
- /* Making one or more connections; I'll need a write lock... */
+ peer = ksocknal_find_peer_locked(ni, id);
+ if (peer != NULL)
+ break;
+
+ write_unlock_bh (g_lock);
- atomic_inc (&peer->ksnp_refcount); /* +1 ref for me while I unlock */
- read_unlock (g_lock);
- write_lock_irqsave (g_lock, flags);
-
- if (peer->ksnp_closing) { /* peer deleted as I blocked! */
- write_unlock_irqrestore (g_lock, flags);
- ksocknal_put_peer (peer);
- return (PTL_FAIL);
+ if ((id.pid & LNET_PID_USERFLAG) != 0) {
+ CERROR("Refusing to create a connection to "
+ "userspace process %s\n", libcfs_id2str(id));
+ return -EHOSTUNREACH;
+ }
+
+ if (retry) {
+ CERROR("Can't find peer %s\n", libcfs_id2str(id));
+ return -EHOSTUNREACH;
+ }
+
+ rc = ksocknal_add_peer(ni, id,
+ LNET_NIDADDR(id.nid),
+ lnet_acceptor_port());
+ if (rc != 0) {
+ CERROR("Can't add peer %s: %d\n",
+ libcfs_id2str(id), rc);
+ return rc;
+ }
}
- ksocknal_put_peer (peer); /* drop ref I got above */
-
for (;;) {
- /* launch all eager autoconnections */
- route = ksocknal_find_connectable_route_locked (peer, 1);
+ /* launch any/all connections that need it */
+ route = ksocknal_find_connectable_route_locked (peer);
if (route == NULL)
break;
- ksocknal_launch_autoconnect_locked (route);
+ ksocknal_launch_connection_locked (route);
}
- conn = ksocknal_find_conn_locked (tx, peer);
+ conn = ksocknal_find_conn_locked (tx->tx_lnetmsg->msg_len, peer);
if (conn != NULL) {
/* Connection exists; queue message on it */
ksocknal_queue_tx_locked (tx, conn);
- write_unlock_irqrestore (g_lock, flags);
- return (PTL_OK);
+ write_unlock_bh (g_lock);
+ return (0);
}
- if (ksocknal_find_connecting_route_locked (peer) == NULL) {
- /* no autoconnect routes actually connecting now. Scrape
- * the barrel for non-eager autoconnects */
- route = ksocknal_find_connectable_route_locked (peer, 0);
- if (route != NULL) {
- ksocknal_launch_autoconnect_locked (route);
- } else {
- write_unlock_irqrestore (g_lock, flags);
- return (PTL_FAIL);
- }
+ if (peer->ksnp_accepting > 0 ||
+ ksocknal_find_connecting_route_locked (peer) != NULL) {
+ /* Queue the message until a connection is established */
+ list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+ write_unlock_bh (g_lock);
+ return 0;
}
-
- /* At least 1 connection is being established; queue the message... */
- list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
-
- write_unlock_irqrestore (g_lock, flags);
- return (PTL_OK);
-}
-
-ksock_ltx_t *
-ksocknal_setup_hdr (nal_cb_t *nal, void *private, lib_msg_t *cookie,
- ptl_hdr_t *hdr, int type)
-{
- ksock_ltx_t *ltx;
-
- /* I may not block for a transmit descriptor if I might block the
- * receiver, or an interrupt handler. */
- ltx = ksocknal_get_ltx (!(type == PTL_MSG_ACK ||
- type == PTL_MSG_REPLY ||
- in_interrupt ()));
- if (ltx == NULL) {
- CERROR ("Can't allocate tx desc\n");
- return (NULL);
- }
-
- /* Init local send packet (storage for hdr, finalize() args) */
- ltx->ltx_hdr = *hdr;
- ltx->ltx_private = private;
- ltx->ltx_cookie = cookie;
- /* Init common ltx_tx */
- ltx->ltx_tx.tx_isfwd = 0;
- ltx->ltx_tx.tx_nob = sizeof (*hdr);
-
- /* We always have 1 mapped frag for the header */
- ltx->ltx_tx.tx_niov = 1;
- ltx->ltx_tx.tx_iov = <x->ltx_iov_space.hdr;
- ltx->ltx_tx.tx_iov[0].iov_base = <x->ltx_hdr;
- ltx->ltx_tx.tx_iov[0].iov_len = sizeof (ltx->ltx_hdr);
-
- ltx->ltx_tx.tx_kiov = NULL;
- ltx->ltx_tx.tx_nkiov = 0;
+ write_unlock_bh (g_lock);
- return (ltx);
+ /* NB Routes may be ignored if connections to them failed recently */
+ CDEBUG(D_NETERROR, "No usable routes to %s\n", libcfs_id2str(id));
+ return (-EHOSTUNREACH);
}
int
-ksocknal_send (nal_cb_t *nal, void *private, lib_msg_t *cookie,
- ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
- unsigned int payload_niov, struct iovec *payload_iov,
- size_t payload_len)
+ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
- ksock_ltx_t *ltx;
- int rc;
+ int type = lntmsg->msg_type;
+ lnet_process_id_t target = lntmsg->msg_target;
+ unsigned int payload_niov = lntmsg->msg_niov;
+ struct iovec *payload_iov = lntmsg->msg_iov;
+ lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
+ unsigned int payload_offset = lntmsg->msg_offset;
+ unsigned int payload_nob = lntmsg->msg_len;
+ ksock_tx_t *tx;
+ int desc_size;
+ int rc;
/* NB 'private' is different depending on what we're sending.
- * Just ignore it until we can rely on it
- */
-
- CDEBUG(D_NET,
- "sending "LPSZ" bytes in %d mapped frags to nid: "LPX64
- " pid %d\n", payload_len, payload_niov, nid, pid);
-
- ltx = ksocknal_setup_hdr (nal, private, cookie, hdr, type);
- if (ltx == NULL)
- return (PTL_FAIL);
+ * Just ignore it... */
- /* append the payload_iovs to the one pointing at the header */
- LASSERT (ltx->ltx_tx.tx_niov == 1 && ltx->ltx_tx.tx_nkiov == 0);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV);
+ CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
+ payload_nob, payload_niov, libcfs_id2str(target));
- memcpy (ltx->ltx_tx.tx_iov + 1, payload_iov,
- payload_niov * sizeof (*payload_iov));
- ltx->ltx_tx.tx_niov = 1 + payload_niov;
- ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_len;
-
- rc = ksocknal_launch_packet (<x->ltx_tx, nid);
- if (rc != PTL_OK)
- ksocknal_put_ltx (ltx);
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ LASSERT (!in_interrupt ());
- return (rc);
-}
-
-int
-ksocknal_send_pages (nal_cb_t *nal, void *private, lib_msg_t *cookie,
- ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
- unsigned int payload_niov, ptl_kiov_t *payload_iov, size_t payload_len)
-{
- ksock_ltx_t *ltx;
- int rc;
-
- /* NB 'private' is different depending on what we're sending.
- * Just ignore it until we can rely on it */
-
- CDEBUG(D_NET,
- "sending "LPSZ" bytes in %d mapped frags to nid: "LPX64" pid %d\n",
- payload_len, payload_niov, nid, pid);
-
- ltx = ksocknal_setup_hdr (nal, private, cookie, hdr, type);
- if (ltx == NULL)
- return (PTL_FAIL);
-
- LASSERT (ltx->ltx_tx.tx_niov == 1 && ltx->ltx_tx.tx_nkiov == 0);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV);
+ if (payload_iov != NULL)
+ desc_size = offsetof(ksock_tx_t,
+ tx_frags.virt.iov[1 + payload_niov]);
+ else
+ desc_size = offsetof(ksock_tx_t,
+ tx_frags.paged.kiov[payload_niov]);
- ltx->ltx_tx.tx_kiov = ltx->ltx_iov_space.payload.kiov;
- memcpy (ltx->ltx_tx.tx_kiov, payload_iov,
- payload_niov * sizeof (*payload_iov));
- ltx->ltx_tx.tx_nkiov = payload_niov;
- ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_len;
-
- rc = ksocknal_launch_packet (<x->ltx_tx, nid);
- if (rc != PTL_OK)
- ksocknal_put_ltx (ltx);
-
- return (rc);
-}
+ tx = ksocknal_alloc_tx(desc_size);
+ if (tx == NULL) {
+ CERROR("Can't allocate tx desc type %d size %d\n",
+ type, desc_size);
+ return (-ENOMEM);
+ }
+
+ tx->tx_conn = NULL; /* set when assigned a conn */
+ tx->tx_lnetmsg = lntmsg;
+
+ if (payload_iov != NULL) {
+ tx->tx_kiov = NULL;
+ tx->tx_nkiov = 0;
+ tx->tx_iov = tx->tx_frags.virt.iov;
+ tx->tx_niov = 1 +
+ lnet_extract_iov(payload_niov, &tx->tx_iov[1],
+ payload_niov, payload_iov,
+ payload_offset, payload_nob);
+ } else {
+ tx->tx_niov = 1;
+ tx->tx_iov = &tx->tx_frags.paged.iov;
+ tx->tx_kiov = tx->tx_frags.paged.kiov;
+ tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+ }
-void
-ksocknal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd)
-{
- ptl_nid_t nid = fwd->kprfd_gateway_nid;
- ksock_tx_t *tx = (ksock_tx_t *)&fwd->kprfd_scratch;
- int rc;
+ ksocknal_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
+
+ /* The first fragment will be set later in pro_pack */
+ rc = ksocknal_launch_packet(ni, tx, target);
+ if (rc == 0)
+ return (0);
- CDEBUG (D_NET, "Forwarding [%p] -> "LPX64" ("LPX64"))\n", fwd,
- fwd->kprfd_gateway_nid, fwd->kprfd_target_nid);
-
- /* I'm the gateway; must be the last hop */
- if (nid == ksocknal_lib.ni.nid)
- nid = fwd->kprfd_target_nid;
-
- tx->tx_isfwd = 1; /* This is a forwarding packet */
- tx->tx_nob = fwd->kprfd_nob;
- tx->tx_niov = fwd->kprfd_niov;
- tx->tx_iov = fwd->kprfd_iov;
- tx->tx_nkiov = 0;
- tx->tx_kiov = NULL;
- tx->tx_hdr = (ptl_hdr_t *)fwd->kprfd_iov[0].iov_base;
-
- rc = ksocknal_launch_packet (tx, nid);
- if (rc != 0) {
- /* FIXME, could pass a better completion error */
- kpr_fwd_done (&ksocknal_data.ksnd_router, fwd, -EHOSTUNREACH);
- }
+ ksocknal_free_tx(tx);
+ return (-EIO);
}
int
ksocknal_thread_start (int (*fn)(void *arg), void *arg)
{
- long pid = kernel_thread (fn, arg, 0);
+ long pid = cfs_kernel_thread (fn, arg, 0);
if (pid < 0)
return ((int)pid);
- atomic_inc (&ksocknal_data.ksnd_nthreads);
+ write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ ksocknal_data.ksnd_nthreads++;
+ write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (0);
}
void
ksocknal_thread_fini (void)
{
- atomic_dec (&ksocknal_data.ksnd_nthreads);
-}
-
-void
-ksocknal_fmb_callback (void *arg, int error)
-{
- ksock_fmb_t *fmb = (ksock_fmb_t *)arg;
- ksock_fmb_pool_t *fmp = fmb->fmb_pool;
- ptl_hdr_t *hdr = (ptl_hdr_t *) page_address(fmb->fmb_pages[0]);
- ksock_conn_t *conn = NULL;
- ksock_sched_t *sched;
- unsigned long flags;
-
- if (error != 0)
- CERROR("Failed to route packet from "LPX64" to "LPX64": %d\n",
- NTOH__u64(hdr->src_nid), NTOH__u64(hdr->dest_nid),
- error);
- else
- CDEBUG (D_NET, "routed packet from "LPX64" to "LPX64": OK\n",
- NTOH__u64 (hdr->src_nid), NTOH__u64 (hdr->dest_nid));
-
- /* drop peer ref taken on init */
- ksocknal_put_peer (fmb->fmb_peer);
-
- spin_lock_irqsave (&fmp->fmp_lock, flags);
-
- list_add (&fmb->fmb_list, &fmp->fmp_idle_fmbs);
-
- if (!list_empty (&fmp->fmp_blocked_conns)) {
- conn = list_entry (fmb->fmb_pool->fmp_blocked_conns.next,
- ksock_conn_t, ksnc_rx_list);
- list_del (&conn->ksnc_rx_list);
- }
-
- spin_unlock_irqrestore (&fmp->fmp_lock, flags);
-
- if (conn == NULL)
- return;
-
- CDEBUG (D_NET, "Scheduling conn %p\n", conn);
- LASSERT (conn->ksnc_rx_scheduled);
- LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_FMB_SLEEP);
-
- conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB;
-
- sched = conn->ksnc_scheduler;
-
- spin_lock_irqsave (&sched->kss_lock, flags);
-
- list_add_tail (&conn->ksnc_rx_list, &sched->kss_rx_conns);
-
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
-
- spin_unlock_irqrestore (&sched->kss_lock, flags);
-}
-
-ksock_fmb_t *
-ksocknal_get_idle_fmb (ksock_conn_t *conn)
-{
- int payload_nob = conn->ksnc_rx_nob_left;
- int packet_nob = sizeof (ptl_hdr_t) + payload_nob;
- unsigned long flags;
- ksock_fmb_pool_t *pool;
- ksock_fmb_t *fmb;
-
- LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB);
- LASSERT (ksocknal_data.ksnd_fmbs != NULL);
-
- if (packet_nob <= SOCKNAL_SMALL_FWD_PAGES * PAGE_SIZE)
- pool = &ksocknal_data.ksnd_small_fmp;
- else
- pool = &ksocknal_data.ksnd_large_fmp;
-
- spin_lock_irqsave (&pool->fmp_lock, flags);
-
- if (!list_empty (&pool->fmp_idle_fmbs)) {
- fmb = list_entry(pool->fmp_idle_fmbs.next,
- ksock_fmb_t, fmb_list);
- list_del (&fmb->fmb_list);
- spin_unlock_irqrestore (&pool->fmp_lock, flags);
-
- return (fmb);
- }
-
- /* deschedule until fmb free */
-
- conn->ksnc_rx_state = SOCKNAL_RX_FMB_SLEEP;
-
- list_add_tail (&conn->ksnc_rx_list,
- &pool->fmp_blocked_conns);
-
- spin_unlock_irqrestore (&pool->fmp_lock, flags);
- return (NULL);
+ write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ ksocknal_data.ksnd_nthreads--;
+ write_unlock_bh (&ksocknal_data.ksnd_global_lock);
}
int
-ksocknal_init_fmb (ksock_conn_t *conn, ksock_fmb_t *fmb)
-{
- int payload_nob = conn->ksnc_rx_nob_left;
- int packet_nob = sizeof (ptl_hdr_t) + payload_nob;
- ptl_nid_t dest_nid = NTOH__u64 (conn->ksnc_hdr.dest_nid);
- int niov; /* at least the header */
- int nob;
-
- LASSERT (conn->ksnc_rx_scheduled);
- LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB);
- LASSERT (conn->ksnc_rx_nob_wanted == conn->ksnc_rx_nob_left);
- LASSERT (payload_nob >= 0);
- LASSERT (packet_nob <= fmb->fmb_npages * PAGE_SIZE);
- LASSERT (sizeof (ptl_hdr_t) < PAGE_SIZE);
-
- /* Got a forwarding buffer; copy the header we just read into the
- * forwarding buffer. If there's payload, start reading reading it
- * into the buffer, otherwise the forwarding buffer can be kicked
- * off immediately.
- *
- * NB fmb->fmb_iov spans the WHOLE packet.
- * conn->ksnc_rx_iov spans just the payload.
- */
- fmb->fmb_iov[0].iov_base = page_address (fmb->fmb_pages[0]);
-
- /* copy header */
- memcpy (fmb->fmb_iov[0].iov_base, &conn->ksnc_hdr, sizeof (ptl_hdr_t));
-
- /* Take a ref on the conn's peer to prevent module unload before
- * forwarding completes. NB we ref peer and not conn since because
- * all refs on conn after it has been closed must remove themselves
- * in finite time */
- fmb->fmb_peer = conn->ksnc_peer;
- atomic_inc (&conn->ksnc_peer->ksnp_refcount);
-
- if (payload_nob == 0) { /* got complete packet already */
- CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d fwd_start (immediate)\n",
- conn, NTOH__u64 (conn->ksnc_hdr.src_nid),
- dest_nid, packet_nob);
-
- fmb->fmb_iov[0].iov_len = sizeof (ptl_hdr_t);
-
- kpr_fwd_init (&fmb->fmb_fwd, dest_nid,
- packet_nob, 1, fmb->fmb_iov,
- ksocknal_fmb_callback, fmb);
-
- /* forward it now */
- kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd);
-
- ksocknal_new_packet (conn, 0); /* on to next packet */
- return (1);
- }
-
- niov = 1;
- if (packet_nob <= PAGE_SIZE) { /* whole packet fits in first page */
- fmb->fmb_iov[0].iov_len = packet_nob;
- } else {
- fmb->fmb_iov[0].iov_len = PAGE_SIZE;
- nob = packet_nob - PAGE_SIZE;
-
- do {
- LASSERT (niov < fmb->fmb_npages);
- fmb->fmb_iov[niov].iov_base =
- page_address (fmb->fmb_pages[niov]);
- fmb->fmb_iov[niov].iov_len = MIN (PAGE_SIZE, nob);
- nob -= PAGE_SIZE;
- niov++;
- } while (nob > 0);
- }
-
- kpr_fwd_init (&fmb->fmb_fwd, dest_nid,
- packet_nob, niov, fmb->fmb_iov,
- ksocknal_fmb_callback, fmb);
-
- conn->ksnc_cookie = fmb; /* stash fmb for later */
- conn->ksnc_rx_state = SOCKNAL_RX_BODY_FWD; /* read in the payload */
-
- /* payload is desc's iov-ed buffer, but skipping the hdr */
- LASSERT (niov <= sizeof (conn->ksnc_rx_iov_space) /
- sizeof (struct iovec));
-
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base =
- (void *)(((unsigned long)fmb->fmb_iov[0].iov_base) +
- sizeof (ptl_hdr_t));
- conn->ksnc_rx_iov[0].iov_len =
- fmb->fmb_iov[0].iov_len - sizeof (ptl_hdr_t);
-
- if (niov > 1)
- memcpy(&conn->ksnc_rx_iov[1], &fmb->fmb_iov[1],
- (niov - 1) * sizeof (struct iovec));
-
- conn->ksnc_rx_niov = niov;
-
- CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d reading body\n", conn,
- NTOH__u64 (conn->ksnc_hdr.src_nid), dest_nid, payload_nob);
- return (0);
-}
-
-void
-ksocknal_fwd_parse (ksock_conn_t *conn)
+ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
{
- ksock_peer_t *peer;
- ptl_nid_t dest_nid = NTOH__u64 (conn->ksnc_hdr.dest_nid);
- int body_len = NTOH__u32 (PTL_HDR_LENGTH(&conn->ksnc_hdr));
-
- CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d parsing header\n", conn,
- NTOH__u64 (conn->ksnc_hdr.src_nid),
- dest_nid, conn->ksnc_rx_nob_left);
-
- LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER);
- LASSERT (conn->ksnc_rx_scheduled);
-
- if (body_len < 0) { /* length corrupt (overflow) */
- CERROR("dropping packet from "LPX64" for "LPX64": packet "
- "size %d illegal\n", NTOH__u64 (conn->ksnc_hdr.src_nid),
- dest_nid, body_len);
-
- ksocknal_new_packet (conn, 0); /* on to new packet */
- ksocknal_close_conn_unlocked (conn, -EINVAL); /* give up on conn */
- return;
- }
+ static char ksocknal_slop_buffer[4096];
- if (ksocknal_data.ksnd_fmbs == NULL) { /* not forwarding */
- CERROR("dropping packet from "LPX64" for "LPX64": not "
- "forwarding\n", conn->ksnc_hdr.src_nid,
- conn->ksnc_hdr.dest_nid);
- /* on to new packet (skip this one's body) */
- ksocknal_new_packet (conn, body_len);
- return;
- }
+ int nob;
+ unsigned int niov;
+ int skipped;
- if (body_len > SOCKNAL_MAX_FWD_PAYLOAD) { /* too big to forward */
- CERROR ("dropping packet from "LPX64" for "LPX64
- ": packet size %d too big\n", conn->ksnc_hdr.src_nid,
- conn->ksnc_hdr.dest_nid, body_len);
- /* on to new packet (skip this one's body) */
- ksocknal_new_packet (conn, body_len);
- return;
- }
-
- /* should have gone direct */
- peer = ksocknal_get_peer (conn->ksnc_hdr.dest_nid);
- if (peer != NULL) {
- CERROR ("dropping packet from "LPX64" for "LPX64
- ": target is a peer\n", conn->ksnc_hdr.src_nid,
- conn->ksnc_hdr.dest_nid);
- ksocknal_put_peer (peer); /* drop ref from get above */
+ LASSERT(conn->ksnc_proto != NULL);
- /* on to next packet (skip this one's body) */
- ksocknal_new_packet (conn, body_len);
- return;
+ if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
+ /* Remind the socket to ack eagerly... */
+ ksocknal_lib_eager_ack(conn);
}
- conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB; /* Getting FMB now */
- conn->ksnc_rx_nob_left = body_len; /* stash packet size */
- conn->ksnc_rx_nob_wanted = body_len; /* (no slop) */
-}
-
-int
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
-{
- static char ksocknal_slop_buffer[4096];
-
- int nob;
- int niov;
- int skipped;
-
if (nob_to_skip == 0) { /* right at next packet boundary now */
conn->ksnc_rx_started = 0;
mb (); /* racing with timeout thread */
- conn->ksnc_rx_state = SOCKNAL_RX_HEADER;
- conn->ksnc_rx_nob_wanted = sizeof (ptl_hdr_t);
- conn->ksnc_rx_nob_left = sizeof (ptl_hdr_t);
+ switch (conn->ksnc_proto->pro_version) {
+ case KSOCK_PROTO_V2:
+ conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
+ conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
+
+ if (conn->ksnc_type == SOCKLND_CONN_BULK_IN) {
+ /* always expect lnet_hdr_t to avoid extra-read for better performance */
+ conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload);
+ conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload);
+ conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload);
+
+ } else {
+ /* can't make sure if it's noop or not */
+ conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
+ conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
+ conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
+ }
+ break;
+
+ case KSOCK_PROTO_V1:
+ /* Receiving bare lnet_hdr_t */
+ conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
+ conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
+ conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
+
+ conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
+ conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
+ break;
- conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_hdr;
- conn->ksnc_rx_iov[0].iov_len = sizeof (ptl_hdr_t);
+ default:
+ LBUG ();
+ }
conn->ksnc_rx_niov = 1;
conn->ksnc_rx_kiov = NULL;
conn->ksnc_rx_nkiov = 0;
+ conn->ksnc_rx_csum = ~0;
return (1);
}
- /* Set up to skip as much a possible now. If there's more left
+ /* Set up to skip as much as possible now. If there's more left
* (ran out of iov entries) we'll get called again */
conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
return (0);
}
-void
-ksocknal_process_receive (ksock_sched_t *sched, unsigned long *irq_flags)
+/* (Sink) handle incoming ZC request from sender */
+static int
+ksocknal_handle_zc_req(ksock_peer_t *peer, __u64 cookie)
{
- ksock_conn_t *conn;
- ksock_fmb_t *fmb;
- int rc;
+ ksock_conn_t *conn;
+ ksock_tx_t *tx;
+ ksock_sched_t *sched;
+ int rc;
+
+ read_lock (&ksocknal_data.ksnd_global_lock);
+
+ conn = ksocknal_find_conn_locked (0, peer);
+ if (conn == NULL) {
+ read_unlock (&ksocknal_data.ksnd_global_lock);
+ CERROR("Can't find connection to send zcack.\n");
+ return -ECONNRESET;
+ }
- /* NB: sched->ksnc_lock lock held */
+ sched = conn->ksnc_scheduler;
- LASSERT (!list_empty (&sched->kss_rx_conns));
- conn = list_entry(sched->kss_rx_conns.next, ksock_conn_t, ksnc_rx_list);
- list_del (&conn->ksnc_rx_list);
+ spin_lock_bh (&sched->kss_lock);
+ rc = ksocknal_piggyback_zcack(conn, cookie);
+ spin_unlock_bh (&sched->kss_lock);
- spin_unlock_irqrestore (&sched->kss_lock, *irq_flags);
+ read_unlock (&ksocknal_data.ksnd_global_lock);
+ if (rc) {
+ /* Ack cookie is piggybacked */
+ return 0;
+ }
- CDEBUG(D_NET, "sched %p conn %p\n", sched, conn);
- LASSERT (atomic_read (&conn->ksnc_refcount) > 0);
- LASSERT (conn->ksnc_rx_scheduled);
- LASSERT (conn->ksnc_rx_ready);
+ tx = ksocknal_alloc_tx(KSOCK_NOOP_TX_SIZE);
+ if (tx == NULL) {
+ CERROR("Can't allocate noop tx desc\n");
+ return -ENOMEM;
+ }
- /* doesn't need a forwarding buffer */
- if (conn->ksnc_rx_state != SOCKNAL_RX_GET_FMB)
- goto try_read;
+ tx->tx_conn = NULL;
+ tx->tx_lnetmsg = NULL;
+ tx->tx_kiov = NULL;
+ tx->tx_nkiov = 0;
+ tx->tx_iov = tx->tx_frags.virt.iov;
+ tx->tx_niov = 1;
- get_fmb:
- fmb = ksocknal_get_idle_fmb (conn);
- if (fmb == NULL) { /* conn descheduled waiting for idle fmb */
- spin_lock_irqsave (&sched->kss_lock, *irq_flags);
- return;
+ ksocknal_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
+ tx->tx_msg.ksm_zc_ack_cookie = cookie; /* incoming cookie */
+
+ read_lock (&ksocknal_data.ksnd_global_lock);
+
+ conn = ksocknal_find_conn_locked (0, peer);
+ if (conn == NULL) {
+ read_unlock (&ksocknal_data.ksnd_global_lock);
+ ksocknal_free_tx(tx);
+ CERROR("Can't find connection to send zcack.\n");
+ return -ECONNRESET;
}
+ ksocknal_queue_tx_locked(tx, conn);
+
+ read_unlock (&ksocknal_data.ksnd_global_lock);
- if (ksocknal_init_fmb (conn, fmb)) /* packet forwarded ? */
- goto out; /* come back later for next packet */
+ return 0;
+}
- try_read:
- /* NB: sched lock NOT held */
- LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER ||
- conn->ksnc_rx_state == SOCKNAL_RX_BODY ||
- conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD ||
- conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
+/* (Sender) handle ZC_ACK from sink */
+static int
+ksocknal_handle_zc_ack(ksock_peer_t *peer, __u64 cookie)
+{
+ ksock_tx_t *tx;
+ struct list_head *ctmp;
- LASSERT (conn->ksnc_rx_nob_wanted > 0);
+ spin_lock(&peer->ksnp_lock);
- conn->ksnc_rx_ready = 0;/* data ready may race with me and set ready */
- mb(); /* => clear BEFORE trying to read */
+ list_for_each(ctmp, &peer->ksnp_zc_req_list) {
+ tx = list_entry (ctmp, ksock_tx_t, tx_zc_list);
+ if (tx->tx_msg.ksm_zc_req_cookie != cookie)
+ continue;
- rc = ksocknal_recvmsg(conn);
+ tx->tx_msg.ksm_zc_req_cookie = 0;
+ list_del(&tx->tx_zc_list);
- if (rc <= 0) {
- if (ksocknal_close_conn_unlocked (conn, rc)) {
- /* I'm the first to close */
- if (rc < 0)
- CERROR ("[%p] Error %d on read from "LPX64" ip %08x:%d\n",
- conn, rc, conn->ksnc_peer->ksnp_nid,
- conn->ksnc_ipaddr, conn->ksnc_port);
- else
- CERROR ("[%p] EOF from "LPX64" ip %08x:%d\n",
- conn, conn->ksnc_peer->ksnp_nid,
- conn->ksnc_ipaddr, conn->ksnc_port);
- }
- goto out;
+ spin_unlock(&peer->ksnp_lock);
+
+ ksocknal_tx_decref(tx);
+ return 0;
}
+ spin_unlock(&peer->ksnp_lock);
+ return -EPROTO;
+}
+
+int
+ksocknal_process_receive (ksock_conn_t *conn)
+{
+ int rc;
- if (conn->ksnc_rx_nob_wanted != 0) /* short read */
- goto out; /* try again later */
+ LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
- /* got all I wanted, assume there's more - prevent data_ready locking */
- conn->ksnc_rx_ready = 1;
+ /* NB: sched lock NOT held */
+ /* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
+ LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
+ conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
+ again:
+ if (conn->ksnc_rx_nob_wanted != 0) {
+ rc = ksocknal_receive(conn);
+
+ if (rc <= 0) {
+ LASSERT (rc != -EAGAIN);
+ if (rc == 0)
+ CDEBUG (D_NET, "[%p] EOF from %s"
+ " ip %d.%d.%d.%d:%d\n", conn,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ HIPQUAD(conn->ksnc_ipaddr),
+ conn->ksnc_port);
+ else if (!conn->ksnc_closing)
+ CERROR ("[%p] Error %d on read from %s"
+ " ip %d.%d.%d.%d:%d\n",
+ conn, rc,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ HIPQUAD(conn->ksnc_ipaddr),
+ conn->ksnc_port);
+
+ /* it's not an error if conn is being closed */
+ ksocknal_close_conn_and_siblings (conn,
+ (conn->ksnc_closing) ? 0 : rc);
+ return (rc == 0 ? -ESHUTDOWN : rc);
+ }
+
+ if (conn->ksnc_rx_nob_wanted != 0) {
+ /* short read */
+ return (-EAGAIN);
+ }
+ }
switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_HEADER:
- if (conn->ksnc_hdr.type != HTON__u32(PTL_MSG_HELLO) &&
- NTOH__u64(conn->ksnc_hdr.dest_nid) != ksocknal_lib.ni.nid) {
- /* This packet isn't for me */
- ksocknal_fwd_parse (conn);
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_HEADER: /* skipped (zero payload) */
- goto out; /* => come back later */
- case SOCKNAL_RX_SLOP: /* skipping packet's body */
- goto try_read; /* => go read it */
- case SOCKNAL_RX_GET_FMB: /* forwarding */
- goto get_fmb; /* => go get a fwd msg buffer */
- default:
- LBUG ();
+ case SOCKNAL_RX_KSM_HEADER:
+ if (conn->ksnc_flip) {
+ __swab32s(&conn->ksnc_msg.ksm_type);
+ __swab32s(&conn->ksnc_msg.ksm_csum);
+ __swab64s(&conn->ksnc_msg.ksm_zc_req_cookie);
+ __swab64s(&conn->ksnc_msg.ksm_zc_ack_cookie);
+ }
+
+ if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
+ conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
+ conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
+ /* NOOP Checksum error */
+ CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
+ ksocknal_new_packet(conn, 0);
+ ksocknal_close_conn_and_siblings(conn, -EPROTO);
+ return (-EIO);
+ }
+
+ if (conn->ksnc_msg.ksm_zc_ack_cookie != 0) {
+ LASSERT(conn->ksnc_proto == &ksocknal_protocol_v2x);
+
+ rc = ksocknal_handle_zc_ack(conn->ksnc_peer,
+ conn->ksnc_msg.ksm_zc_ack_cookie);
+ if (rc != 0) {
+ CERROR("%s: Unknown zero copy ACK cookie: "LPU64"\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ conn->ksnc_msg.ksm_zc_ack_cookie);
+ ksocknal_new_packet(conn, 0);
+ ksocknal_close_conn_and_siblings(conn, -EPROTO);
+ return (rc);
}
- /* Not Reached */
}
- /* sets wanted_len, iovs etc */
- lib_parse(&ksocknal_lib, &conn->ksnc_hdr, conn);
+ if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
+ ksocknal_new_packet (conn, 0);
+ return 0; /* NOOP is done and just return */
+ }
+ LASSERT (conn->ksnc_msg.ksm_type == KSOCK_MSG_LNET);
+
+ if (conn->ksnc_type == SOCKLND_CONN_BULK_IN) {
+ conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
+ /* has read lnet_hdr_t already (re ksocknal_new_packet), fall through */
+ } else {
+ conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
+ conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
+ conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
+
+ conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
+ conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
+
+ conn->ksnc_rx_niov = 1;
+ conn->ksnc_rx_kiov = NULL;
+ conn->ksnc_rx_nkiov = 0;
- if (conn->ksnc_rx_nob_wanted != 0) { /* need to get payload? */
- conn->ksnc_rx_state = SOCKNAL_RX_BODY;
- goto try_read; /* go read the payload */
+ goto again; /* read lnet header now */
}
- /* Fall through (completed packet for me) */
- case SOCKNAL_RX_BODY:
- /* payload all received */
- lib_finalize(&ksocknal_lib, NULL, conn->ksnc_cookie);
- /* Fall through */
+ case SOCKNAL_RX_LNET_HEADER:
+ /* unpack message header */
+ conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
- case SOCKNAL_RX_SLOP:
- /* starting new packet? */
- if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
- goto out; /* come back later */
- goto try_read; /* try to finish reading slop now */
+ if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
+ /* Userspace peer */
+ lnet_process_id_t *id = &conn->ksnc_peer->ksnp_id;
+ lnet_hdr_t *lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
+
+ /* Substitute process ID assigned at connection time */
+ lhdr->src_pid = cpu_to_le32(id->pid);
+ lhdr->src_nid = cpu_to_le64(id->nid);
+ }
+
+ conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
+ ksocknal_conn_addref(conn); /* ++ref while parsing */
+
+ rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
+ &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
+ conn->ksnc_peer->ksnp_id.nid, conn, 0);
+ if (rc < 0) {
+ /* I just received garbage: give up on this conn */
+ ksocknal_new_packet(conn, 0);
+ ksocknal_close_conn_and_siblings (conn, rc);
+ ksocknal_conn_decref(conn);
+ return (-EPROTO);
+ }
- case SOCKNAL_RX_BODY_FWD:
+ /* I'm racing with ksocknal_recv() */
+ LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
+
+ if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
+ return 0;
+
+ /* ksocknal_recv() got called */
+ goto again;
+
+ case SOCKNAL_RX_LNET_PAYLOAD:
/* payload all received */
- CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d fwd_start (got body)\n",
- conn, NTOH__u64 (conn->ksnc_hdr.src_nid),
- NTOH__u64 (conn->ksnc_hdr.dest_nid),
- conn->ksnc_rx_nob_left);
+ rc = 0;
+
+ if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
+ conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
+ conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
+ CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
+ rc = -EIO;
+ }
+
+ lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
- /* forward the packet. NB ksocknal_init_fmb() put fmb into
- * conn->ksnc_cookie */
- fmb = (ksock_fmb_t *)conn->ksnc_cookie;
- kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd);
+ if (rc == 0 && conn->ksnc_msg.ksm_zc_req_cookie != 0) {
+ LASSERT(conn->ksnc_proto == &ksocknal_protocol_v2x);
+ rc = ksocknal_handle_zc_req(conn->ksnc_peer,
+ conn->ksnc_msg.ksm_zc_req_cookie);
+ }
- /* no slop in forwarded packets */
- LASSERT (conn->ksnc_rx_nob_left == 0);
+ if (rc != 0) {
+ ksocknal_new_packet(conn, 0);
+ ksocknal_close_conn_and_siblings (conn, rc);
+ return (-EPROTO);
+ }
+ /* Fall through */
- ksocknal_new_packet (conn, 0); /* on to next packet */
- goto out; /* (later) */
+ case SOCKNAL_RX_SLOP:
+ /* starting new packet? */
+ if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
+ return 0; /* come back later */
+ goto again; /* try to finish reading slop now */
default:
break;
/* Not Reached */
LBUG ();
-
- out:
- spin_lock_irqsave (&sched->kss_lock, *irq_flags);
-
- /* no data there to read? */
- if (!conn->ksnc_rx_ready) {
- /* let socket callback schedule again */
- conn->ksnc_rx_scheduled = 0;
- /* drop scheduler's ref */
- ksocknal_put_conn (conn);
- } else {
- /* stay scheduled */
- list_add_tail (&conn->ksnc_rx_list, &sched->kss_rx_conns);
- }
+ return (-EINVAL); /* keep gcc happy */
}
int
-ksocknal_recv (nal_cb_t *nal, void *private, lib_msg_t *msg,
- unsigned int niov, struct iovec *iov, size_t mlen, size_t rlen)
+ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+ unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- ksock_conn_t *conn = (ksock_conn_t *)private;
+ ksock_conn_t *conn = (ksock_conn_t *)private;
+ ksock_sched_t *sched = conn->ksnc_scheduler;
LASSERT (mlen <= rlen);
- LASSERT (niov <= PTL_MD_MAX_IOV);
+ LASSERT (niov <= LNET_MAX_IOV);
conn->ksnc_cookie = msg;
conn->ksnc_rx_nob_wanted = mlen;
conn->ksnc_rx_nob_left = rlen;
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_niov = niov;
- conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
- memcpy (conn->ksnc_rx_iov, iov, niov * sizeof (*iov));
-
+ if (mlen == 0 || iov != NULL) {
+ conn->ksnc_rx_nkiov = 0;
+ conn->ksnc_rx_kiov = NULL;
+ conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
+ conn->ksnc_rx_niov =
+ lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
+ niov, iov, offset, mlen);
+ } else {
+ conn->ksnc_rx_niov = 0;
+ conn->ksnc_rx_iov = NULL;
+ conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
+ conn->ksnc_rx_nkiov =
+ lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
+ niov, kiov, offset, mlen);
+ }
+
LASSERT (mlen ==
- lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
- lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
+ lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
+ lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
- return (rlen);
-}
+ LASSERT (conn->ksnc_rx_scheduled);
-int
-ksocknal_recv_pages (nal_cb_t *nal, void *private, lib_msg_t *msg,
- unsigned int niov, ptl_kiov_t *kiov, size_t mlen, size_t rlen)
-{
- ksock_conn_t *conn = (ksock_conn_t *)private;
+ spin_lock_bh (&sched->kss_lock);
- LASSERT (mlen <= rlen);
- LASSERT (niov <= PTL_MD_MAX_IOV);
+ switch (conn->ksnc_rx_state) {
+ case SOCKNAL_RX_PARSE_WAIT:
+ list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+ cfs_waitq_signal (&sched->kss_waitq);
+ LASSERT (conn->ksnc_rx_ready);
+ break;
+
+ case SOCKNAL_RX_PARSE:
+ /* scheduler hasn't noticed I'm parsing yet */
+ break;
+ }
+
+ conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
- conn->ksnc_cookie = msg;
- conn->ksnc_rx_nob_wanted = mlen;
- conn->ksnc_rx_nob_left = rlen;
+ spin_unlock_bh (&sched->kss_lock);
+ ksocknal_conn_decref(conn);
+ return (0);
+}
- conn->ksnc_rx_niov = 0;
- conn->ksnc_rx_iov = NULL;
- conn->ksnc_rx_nkiov = niov;
- conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
- memcpy (conn->ksnc_rx_kiov, kiov, niov * sizeof (*kiov));
+static inline int
+ksocknal_sched_cansleep(ksock_sched_t *sched)
+{
+ int rc;
- LASSERT (mlen ==
- lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
- lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
+ spin_lock_bh (&sched->kss_lock);
- return (rlen);
+ rc = (!ksocknal_data.ksnd_shuttingdown &&
+ list_empty(&sched->kss_rx_conns) &&
+ list_empty(&sched->kss_tx_conns));
+
+ spin_unlock_bh (&sched->kss_lock);
+ return (rc);
}
int ksocknal_scheduler (void *arg)
{
ksock_sched_t *sched = (ksock_sched_t *)arg;
- unsigned long flags;
+ ksock_conn_t *conn;
+ ksock_tx_t *tx;
int rc;
int nloops = 0;
int id = sched - ksocknal_data.ksnd_schedulers;
char name[16];
- snprintf (name, sizeof (name),"ksocknald_%02d", id);
- kportal_daemonize (name);
- kportal_blockallsigs ();
+ snprintf (name, sizeof (name),"socknal_sd%02d", id);
+ cfs_daemonize (name);
+ cfs_block_allsigs ();
-#if (CONFIG_SMP && CPU_AFFINITY)
- if ((cpu_online_map & (1 << id)) != 0) {
-#if 0
- current->cpus_allowed = (1 << id);
-#else
- set_cpus_allowed (current, 1<<id);
-#endif
+#if defined(CONFIG_SMP) && defined(CPU_AFFINITY)
+ id = ksocknal_sched2cpu(id);
+ if (cpu_online(id)) {
+ cpumask_t m;
+ cpu_set(id, m);
+ set_cpus_allowed(current, m);
} else {
- CERROR ("Can't set CPU affinity for %s\n", name);
+ CERROR ("Can't set CPU affinity for %s to %d\n", name, id);
}
#endif /* CONFIG_SMP && CPU_AFFINITY */
-
- spin_lock_irqsave (&sched->kss_lock, flags);
+
+ spin_lock_bh (&sched->kss_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
int did_something = 0;
/* Ensure I progress everything semi-fairly */
if (!list_empty (&sched->kss_rx_conns)) {
+ conn = list_entry(sched->kss_rx_conns.next,
+ ksock_conn_t, ksnc_rx_list);
+ list_del(&conn->ksnc_rx_list);
+
+ LASSERT(conn->ksnc_rx_scheduled);
+ LASSERT(conn->ksnc_rx_ready);
+
+ /* clear rx_ready in case receive isn't complete.
+ * Do it BEFORE we call process_recv, since
+ * data_ready can set it any time after we release
+ * kss_lock. */
+ conn->ksnc_rx_ready = 0;
+ spin_unlock_bh (&sched->kss_lock);
+
+ rc = ksocknal_process_receive(conn);
+
+ spin_lock_bh (&sched->kss_lock);
+
+ /* I'm the only one that can clear this flag */
+ LASSERT(conn->ksnc_rx_scheduled);
+
+ /* Did process_receive get everything it wanted? */
+ if (rc == 0)
+ conn->ksnc_rx_ready = 1;
+
+ if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
+ /* Conn blocked waiting for ksocknal_recv()
+ * I change its state (under lock) to signal
+ * it can be rescheduled */
+ conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
+ } else if (conn->ksnc_rx_ready) {
+ /* reschedule for rx */
+ list_add_tail (&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
+ } else {
+ conn->ksnc_rx_scheduled = 0;
+ /* drop my ref */
+ ksocknal_conn_decref(conn);
+ }
+
did_something = 1;
- /* drops & regains kss_lock */
- ksocknal_process_receive (sched, &flags);
}
if (!list_empty (&sched->kss_tx_conns)) {
- did_something = 1;
- /* drops and regains kss_lock */
- ksocknal_process_transmit (sched, &flags);
- }
-#if SOCKNAL_ZC
- if (!list_empty (&sched->kss_zctxdone_list)) {
- ksock_tx_t *tx =
- list_entry(sched->kss_zctxdone_list.next,
- ksock_tx_t, tx_list);
- did_something = 1;
+ CFS_LIST_HEAD (zlist);
- list_del (&tx->tx_list);
- spin_unlock_irqrestore (&sched->kss_lock, flags);
+ if (!list_empty(&sched->kss_zombie_noop_txs)) {
+ list_add(&zlist, &sched->kss_zombie_noop_txs);
+ list_del_init(&sched->kss_zombie_noop_txs);
+ }
- ksocknal_tx_done (tx, 1);
+ conn = list_entry(sched->kss_tx_conns.next,
+ ksock_conn_t, ksnc_tx_list);
+ list_del (&conn->ksnc_tx_list);
+
+ LASSERT(conn->ksnc_tx_scheduled);
+ LASSERT(conn->ksnc_tx_ready);
+ LASSERT(!list_empty(&conn->ksnc_tx_queue));
+
+ tx = list_entry(conn->ksnc_tx_queue.next,
+ ksock_tx_t, tx_list);
+
+ if (conn->ksnc_tx_mono == tx)
+ ksocknal_next_mono_tx(conn);
+
+ /* dequeue now so empty list => more to send */
+ list_del(&tx->tx_list);
+
+ /* Clear tx_ready in case send isn't complete. Do
+ * it BEFORE we call process_transmit, since
+ * write_space can set it any time after we release
+ * kss_lock. */
+ conn->ksnc_tx_ready = 0;
+ spin_unlock_bh (&sched->kss_lock);
+
+ if (!list_empty(&zlist)) {
+ /* free zombie noop txs, it's fast because
+ * noop txs are just put in freelist */
+ ksocknal_txlist_done(NULL, &zlist, 0);
+ }
- spin_lock_irqsave (&sched->kss_lock, flags);
+ rc = ksocknal_process_transmit(conn, tx);
+
+ if (rc == -ENOMEM || rc == -EAGAIN) {
+ /* Incomplete send: replace tx on HEAD of tx_queue */
+ spin_lock_bh (&sched->kss_lock);
+ list_add (&tx->tx_list, &conn->ksnc_tx_queue);
+ } else {
+ /* Complete send; tx -ref */
+ ksocknal_tx_decref (tx);
+
+ spin_lock_bh (&sched->kss_lock);
+ /* assume space for more */
+ conn->ksnc_tx_ready = 1;
+ }
+
+ if (rc == -ENOMEM) {
+ /* Do nothing; after a short timeout, this
+ * conn will be reposted on kss_tx_conns. */
+ } else if (conn->ksnc_tx_ready &&
+ !list_empty (&conn->ksnc_tx_queue)) {
+ /* reschedule for tx */
+ list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ } else {
+ conn->ksnc_tx_scheduled = 0;
+ /* drop my ref */
+ ksocknal_conn_decref(conn);
+ }
+
+ did_something = 1;
}
-#endif
if (!did_something || /* nothing to do */
++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
- spin_unlock_irqrestore (&sched->kss_lock, flags);
+ spin_unlock_bh (&sched->kss_lock);
nloops = 0;
if (!did_something) { /* wait for something to do */
-#if SOCKNAL_ZC
- rc = wait_event_interruptible (sched->kss_waitq,
- ksocknal_data.ksnd_shuttingdown ||
- !list_empty(&sched->kss_rx_conns) ||
- !list_empty(&sched->kss_tx_conns) ||
- !list_empty(&sched->kss_zctxdone_list));
-#else
- rc = wait_event_interruptible (sched->kss_waitq,
- ksocknal_data.ksnd_shuttingdown ||
- !list_empty(&sched->kss_rx_conns) ||
- !list_empty(&sched->kss_tx_conns));
-#endif
+ rc = wait_event_interruptible_exclusive(
+ sched->kss_waitq,
+ !ksocknal_sched_cansleep(sched));
LASSERT (rc == 0);
- } else
- our_cond_resched();
+ } else {
+ our_cond_resched();
+ }
+
+ spin_lock_bh (&sched->kss_lock);
+ }
+ }
+
+ spin_unlock_bh (&sched->kss_lock);
+ ksocknal_thread_fini ();
+ return (0);
+}
+
+/*
+ * Add connection to kss_rx_conns of scheduler
+ * and wakeup the scheduler.
+ */
+void ksocknal_read_callback (ksock_conn_t *conn)
+{
+ ksock_sched_t *sched;
+ ENTRY;
+
+ sched = conn->ksnc_scheduler;
+
+ spin_lock_bh (&sched->kss_lock);
+
+ conn->ksnc_rx_ready = 1;
+
+ if (!conn->ksnc_rx_scheduled) { /* not being progressed */
+ list_add_tail(&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
+ conn->ksnc_rx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
+
+ cfs_waitq_signal (&sched->kss_waitq);
+ }
+ spin_unlock_bh (&sched->kss_lock);
+
+ EXIT;
+}
+
+/*
+ * Add connection to kss_tx_conns of scheduler
+ * and wakeup the scheduler.
+ */
+void ksocknal_write_callback (ksock_conn_t *conn)
+{
+ ksock_sched_t *sched;
+ ENTRY;
+
+ sched = conn->ksnc_scheduler;
+
+ spin_lock_bh (&sched->kss_lock);
+
+ conn->ksnc_tx_ready = 1;
+
+ if (!conn->ksnc_tx_scheduled && // not being progressed
+ !list_empty(&conn->ksnc_tx_queue)){//packets to send
+ list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
+
+ cfs_waitq_signal (&sched->kss_waitq);
+ }
+
+ spin_unlock_bh (&sched->kss_lock);
+
+ EXIT;
+}
+
+ksock_protocol_t *
+ksocknal_compat_protocol (ksock_hello_msg_t *hello)
+{
+ if ((hello->kshm_magic == LNET_PROTO_MAGIC &&
+ hello->kshm_version == KSOCK_PROTO_V2) ||
+ (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC) &&
+ hello->kshm_version == __swab32(KSOCK_PROTO_V2)))
+ return &ksocknal_protocol_v2x;
+
+ if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
+ lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
+
+ CLASSERT (sizeof (lnet_magicversion_t) ==
+ offsetof (ksock_hello_msg_t, kshm_src_nid));
+
+ if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
+ hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
+ return &ksocknal_protocol_v1x;
+ }
+
+ return NULL;
+}
+
+static int
+ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
+{
+ cfs_socket_t *sock = conn->ksnc_sock;
+ lnet_hdr_t *hdr;
+ lnet_magicversion_t *hmv;
+ int rc;
+ int i;
+
+ CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid));
+
+ LIBCFS_ALLOC(hdr, sizeof(*hdr));
+ if (hdr == NULL) {
+ CERROR("Can't allocate lnet_hdr_t\n");
+ return -ENOMEM;
+ }
+
+ hmv = (lnet_magicversion_t *)&hdr->dest_nid;
+
+ /* Re-organize V2.x message header to V1.x (lnet_hdr_t)
+ * header and send out */
+ hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
+ hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR);
+ hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR);
+
+ if (the_lnet.ln_testprotocompat != 0) {
+ /* single-shot proto check */
+ LNET_LOCK();
+ if ((the_lnet.ln_testprotocompat & 1) != 0) {
+ hmv->version_major++; /* just different! */
+ the_lnet.ln_testprotocompat &= ~1;
+ }
+ if ((the_lnet.ln_testprotocompat & 2) != 0) {
+ hmv->magic = LNET_PROTO_MAGIC;
+ the_lnet.ln_testprotocompat &= ~2;
+ }
+ LNET_UNLOCK();
+ }
+
+ hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
+ hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
+ hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
+ hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32));
+ hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype);
+ hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation);
+
+ rc = libcfs_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
+
+ if (rc != 0) {
+ CDEBUG (D_NETERROR, "Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
+ goto out;
+ }
+
+ if (hello->kshm_nips == 0)
+ goto out;
+
+ for (i = 0; i < hello->kshm_nips; i++) {
+ hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
+ }
+
+ rc = libcfs_sock_write(sock, hello->kshm_ips,
+ hello->kshm_nips * sizeof(__u32),
+ lnet_acceptor_timeout());
+ if (rc != 0) {
+ CDEBUG (D_NETERROR, "Error %d sending HELLO payload (%d)"
+ " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips,
+ HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
+ }
+out:
+ LIBCFS_FREE(hdr, sizeof(*hdr));
+
+ return rc;
+}
+
+static int
+ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
+{
+ cfs_socket_t *sock = conn->ksnc_sock;
+ int rc;
+
+ hello->kshm_magic = LNET_PROTO_MAGIC;
+ hello->kshm_version = KSOCK_PROTO_V2;
+
+ if (the_lnet.ln_testprotocompat != 0) {
+ /* single-shot proto check */
+ LNET_LOCK();
+ if ((the_lnet.ln_testprotocompat & 1) != 0) {
+ hello->kshm_version++; /* just different! */
+ the_lnet.ln_testprotocompat &= ~1;
+ }
+ LNET_UNLOCK();
+ }
+
+ rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
+ lnet_acceptor_timeout());
+
+ if (rc != 0) {
+ CDEBUG (D_NETERROR, "Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
+ return rc;
+ }
- spin_lock_irqsave (&sched->kss_lock, flags);
- }
+ if (hello->kshm_nips == 0)
+ return 0;
+
+ rc = libcfs_sock_write(sock, hello->kshm_ips,
+ hello->kshm_nips * sizeof(__u32),
+ lnet_acceptor_timeout());
+ if (rc != 0) {
+ CDEBUG (D_NETERROR, "Error %d sending HELLO payload (%d)"
+ " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips,
+ HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
}
- spin_unlock_irqrestore (&sched->kss_lock, flags);
- ksocknal_thread_fini ();
- return (0);
+ return rc;
}
-void
-ksocknal_data_ready (struct sock *sk, int n)
+static int
+ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,int timeout)
{
- unsigned long flags;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
- ENTRY;
+ cfs_socket_t *sock = conn->ksnc_sock;
+ lnet_hdr_t *hdr;
+ int rc;
+ int i;
- /* interleave correctly with closing sockets... */
- read_lock (&ksocknal_data.ksnd_global_lock);
+ LIBCFS_ALLOC(hdr, sizeof(*hdr));
+ if (hdr == NULL) {
+ CERROR("Can't allocate lnet_hdr_t\n");
+ return -ENOMEM;
+ }
- conn = sk->sk_user_data;
- if (conn == NULL) { /* raced with ksocknal_close_sock */
- LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
- sk->sk_data_ready (sk, n);
+ rc = libcfs_sock_read(sock, &hdr->src_nid,
+ sizeof (*hdr) - offsetof (lnet_hdr_t, src_nid),
+ timeout);
+ if (rc != 0) {
+ CERROR ("Error %d reading rest of HELLO hdr from %u.%u.%u.%u\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr));
+ LASSERT (rc < 0 && rc != -EALREADY);
goto out;
}
- if (!conn->ksnc_rx_ready) { /* new news */
- /* Set ASAP in case of concurrent calls to me */
- conn->ksnc_rx_ready = 1;
+ /* ...and check we got what we expected */
+ if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
+ CERROR ("Expecting a HELLO hdr,"
+ " but got type %d from %u.%u.%u.%u\n",
+ le32_to_cpu (hdr->type),
+ HIPQUAD(conn->ksnc_ipaddr));
+ rc = -EPROTO;
+ goto out;
+ }
- sched = conn->ksnc_scheduler;
+ hello->kshm_src_nid = le64_to_cpu (hdr->src_nid);
+ hello->kshm_src_pid = le32_to_cpu (hdr->src_pid);
+ hello->kshm_src_incarnation = le64_to_cpu (hdr->msg.hello.incarnation);
+ hello->kshm_ctype = le32_to_cpu (hdr->msg.hello.type);
+ hello->kshm_nips = le32_to_cpu (hdr->payload_length) /
+ sizeof (__u32);
- spin_lock_irqsave (&sched->kss_lock, flags);
+ if (hello->kshm_nips > LNET_MAX_INTERFACES) {
+ CERROR("Bad nips %d from ip %u.%u.%u.%u\n",
+ hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr));
+ rc = -EPROTO;
+ goto out;
+ }
- /* Set again (process_receive may have cleared while I blocked for the lock) */
- conn->ksnc_rx_ready = 1;
+ if (hello->kshm_nips == 0)
+ goto out;
- if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
- conn->ksnc_rx_scheduled = 1;
- /* extra ref for scheduler */
- atomic_inc (&conn->ksnc_refcount);
+ rc = libcfs_sock_read(sock, hello->kshm_ips,
+ hello->kshm_nips * sizeof(__u32), timeout);
+ if (rc != 0) {
+ CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr));
+ LASSERT (rc < 0 && rc != -EALREADY);
+ goto out;
+ }
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
+ for (i = 0; i < hello->kshm_nips; i++) {
+ hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
+
+ if (hello->kshm_ips[i] == 0) {
+ CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n",
+ i, HIPQUAD(conn->ksnc_ipaddr));
+ rc = -EPROTO;
+ break;
}
-
- spin_unlock_irqrestore (&sched->kss_lock, flags);
}
+out:
+ LIBCFS_FREE(hdr, sizeof(*hdr));
- out:
- read_unlock (&ksocknal_data.ksnd_global_lock);
-
- EXIT;
+ return rc;
}
-void
-ksocknal_write_space (struct sock *sk)
+static int
+ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
{
- unsigned long flags;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
+ cfs_socket_t *sock = conn->ksnc_sock;
+ int rc;
+ int i;
- /* interleave correctly with closing sockets... */
- read_lock (&ksocknal_data.ksnd_global_lock);
+ if (hello->kshm_magic == LNET_PROTO_MAGIC)
+ conn->ksnc_flip = 0;
+ else
+ conn->ksnc_flip = 1;
+
+ rc = libcfs_sock_read(sock, &hello->kshm_src_nid,
+ offsetof(ksock_hello_msg_t, kshm_ips) -
+ offsetof(ksock_hello_msg_t, kshm_src_nid),
+ timeout);
+ if (rc != 0) {
+ CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr));
+ LASSERT (rc < 0 && rc != -EALREADY);
+ return rc;
+ }
- conn = sk->sk_user_data;
+ if (conn->ksnc_flip) {
+ __swab32s(&hello->kshm_src_pid);
+ __swab64s(&hello->kshm_src_nid);
+ __swab32s(&hello->kshm_dst_pid);
+ __swab64s(&hello->kshm_dst_nid);
+ __swab64s(&hello->kshm_src_incarnation);
+ __swab64s(&hello->kshm_dst_incarnation);
+ __swab32s(&hello->kshm_ctype);
+ __swab32s(&hello->kshm_nips);
+ }
- CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
- sk, tcp_wspace(sk), SOCKNAL_TX_LOW_WATER(sk), conn,
- (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
- " ready" : " blocked"),
- (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
- " scheduled" : " idle"),
- (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
- " empty" : " queued"));
+ if (hello->kshm_nips > LNET_MAX_INTERFACES) {
+ CERROR("Bad nips %d from ip %u.%u.%u.%u\n",
+ hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr));
+ return -EPROTO;
+ }
- if (conn == NULL) { /* raced with ksocknal_close_sock */
- LASSERT (sk->sk_write_space != &ksocknal_write_space);
- sk->sk_write_space (sk);
+ if (hello->kshm_nips == 0)
+ return 0;
+
+ rc = libcfs_sock_read(sock, hello->kshm_ips,
+ hello->kshm_nips * sizeof(__u32), timeout);
+ if (rc != 0) {
+ CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr));
+ LASSERT (rc < 0 && rc != -EALREADY);
+ return rc;
+ }
- read_unlock (&ksocknal_data.ksnd_global_lock);
- return;
+ for (i = 0; i < hello->kshm_nips; i++) {
+ if (conn->ksnc_flip)
+ __swab32s(&hello->kshm_ips[i]);
+
+ if (hello->kshm_ips[i] == 0) {
+ CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n",
+ i, HIPQUAD(conn->ksnc_ipaddr));
+ return -EPROTO;
+ }
}
- if (tcp_wspace(sk) >= SOCKNAL_TX_LOW_WATER(sk)) { /* got enough space */
- clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
+ return 0;
+}
- if (!conn->ksnc_tx_ready) { /* new news */
- /* Set ASAP in case of concurrent calls to me */
- conn->ksnc_tx_ready = 1;
+static void
+ksocknal_pack_msg_v1(ksock_tx_t *tx)
+{
+ /* V1.x has no KSOCK_MSG_NOOP */
+ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
+ LASSERT(tx->tx_lnetmsg != NULL);
- sched = conn->ksnc_scheduler;
+ tx->tx_iov[0].iov_base = (void *)&tx->tx_lnetmsg->msg_hdr;
+ tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t);
- spin_lock_irqsave (&sched->kss_lock, flags);
+ tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+}
- /* Set again (process_transmit may have
- cleared while I blocked for the lock) */
- conn->ksnc_tx_ready = 1;
+static void
+ksocknal_pack_msg_v2(ksock_tx_t *tx)
+{
+ tx->tx_iov[0].iov_base = (void *)&tx->tx_msg;
- if (!conn->ksnc_tx_scheduled && // not being progressed
- !list_empty(&conn->ksnc_tx_queue)){//packets to send
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- /* extra ref for scheduler */
- atomic_inc (&conn->ksnc_refcount);
+ if (tx->tx_lnetmsg != NULL) {
+ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
- }
+ tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
+ tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload);
+ tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload) +
+ tx->tx_lnetmsg->msg_len;
+ } else {
+ LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
- spin_unlock_irqrestore (&sched->kss_lock, flags);
- }
+ tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
+ tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
}
+ /* Don't checksum before start sending, because packet can be piggybacked with ACK */
+}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+static void
+ksocknal_unpack_msg_v1(ksock_msg_t *msg)
+{
+ msg->ksm_type = KSOCK_MSG_LNET;
+ msg->ksm_csum = 0;
+ msg->ksm_zc_req_cookie = 0;
+ msg->ksm_zc_ack_cookie = 0;
}
-int
-ksocknal_sock_write (struct socket *sock, void *buffer, int nob)
+static void
+ksocknal_unpack_msg_v2(ksock_msg_t *msg)
{
- int rc;
- mm_segment_t oldmm = get_fs();
-
- while (nob > 0) {
- struct iovec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = &iov,
- .msg_iovlen = 1,
- .msg_control = NULL,
- .msg_controllen = 0,
- .msg_flags = 0
- };
-
- set_fs (KERNEL_DS);
- rc = sock_sendmsg (sock, &msg, iov.iov_len);
- set_fs (oldmm);
-
- if (rc < 0)
- return (rc);
+ return; /* Do nothing */
+}
- if (rc == 0) {
- CERROR ("Unexpected zero rc\n");
- return (-ECONNABORTED);
- }
+ksock_protocol_t ksocknal_protocol_v1x =
+{
+ KSOCK_PROTO_V1,
+ ksocknal_send_hello_v1,
+ ksocknal_recv_hello_v1,
+ ksocknal_pack_msg_v1,
+ ksocknal_unpack_msg_v1
+};
- buffer = ((char *)buffer) + rc;
- nob -= rc;
- }
-
- return (0);
-}
+ksock_protocol_t ksocknal_protocol_v2x =
+{
+ KSOCK_PROTO_V2,
+ ksocknal_send_hello_v2,
+ ksocknal_recv_hello_v2,
+ ksocknal_pack_msg_v2,
+ ksocknal_unpack_msg_v2
+};
int
-ksocknal_sock_read (struct socket *sock, void *buffer, int nob)
+ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
+ lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
{
- int rc;
- mm_segment_t oldmm = get_fs();
-
- while (nob > 0) {
- struct iovec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_name = NULL,
- .msg_namelen = 0,
- .msg_iov = &iov,
- .msg_iovlen = 1,
- .msg_control = NULL,
- .msg_controllen = 0,
- .msg_flags = 0
- };
-
- set_fs (KERNEL_DS);
- rc = sock_recvmsg (sock, &msg, iov.iov_len, 0);
- set_fs (oldmm);
-
- if (rc < 0)
- return (rc);
+ /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
+ ksock_net_t *net = (ksock_net_t *)ni->ni_data;
+ lnet_nid_t srcnid;
- if (rc == 0)
- return (-ECONNABORTED);
+ LASSERT (0 <= hello->kshm_nips && hello->kshm_nips <= LNET_MAX_INTERFACES);
- buffer = ((char *)buffer) + rc;
- nob -= rc;
+ /* No need for getconnsock/putconnsock */
+ LASSERT (!conn->ksnc_closing);
+ LASSERT (conn->ksnc_proto != NULL);
+
+ srcnid = lnet_ptlcompat_srcnid(ni->ni_nid, peer_nid);
+
+ hello->kshm_src_nid = srcnid;
+ hello->kshm_dst_nid = peer_nid;
+ hello->kshm_src_pid = the_lnet.ln_pid;
+
+ hello->kshm_src_incarnation = net->ksnn_incarnation;
+ hello->kshm_ctype = conn->ksnc_type;
+
+ return conn->ksnc_proto->pro_send_hello(conn, hello);
+}
+
+int
+ksocknal_invert_type(int type)
+{
+ switch (type)
+ {
+ case SOCKLND_CONN_ANY:
+ case SOCKLND_CONN_CONTROL:
+ return (type);
+ case SOCKLND_CONN_BULK_IN:
+ return SOCKLND_CONN_BULK_OUT;
+ case SOCKLND_CONN_BULK_OUT:
+ return SOCKLND_CONN_BULK_IN;
+ default:
+ return (SOCKLND_CONN_NONE);
}
-
- return (0);
}
int
-ksocknal_exchange_nids (struct socket *sock, ptl_nid_t nid)
+ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
+ ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
+ __u64 *incarnation)
{
- int rc;
- ptl_hdr_t hdr;
- ptl_magicversion_t *hmv = (ptl_magicversion_t *)&hdr.dest_nid;
+ cfs_socket_t *sock = conn->ksnc_sock;
+ int active;
+ int timeout;
+ int match = 0;
+ int rc;
+ ksock_protocol_t *proto;
+ lnet_process_id_t recv_id;
+
+ active = (peerid->nid != LNET_NID_ANY);
+ timeout = active ? *ksocknal_tunables.ksnd_timeout :
+ lnet_acceptor_timeout();
+
+ rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
+ if (rc != 0) {
+ CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr));
+ LASSERT (rc < 0 && rc != -EALREADY);
+ return rc;
+ }
+
+ if (hello->kshm_magic != LNET_PROTO_MAGIC &&
+ hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
+ hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
+ /* Unexpected magic! */
+ if (active ||
+ the_lnet.ln_ptlcompat == 0) {
+ CERROR ("Bad magic(1) %#08x (%#08x expected) from "
+ "%u.%u.%u.%u\n", __cpu_to_le32 (hello->kshm_magic),
+ LNET_PROTO_TCP_MAGIC,
+ HIPQUAD(conn->ksnc_ipaddr));
+ return -EPROTO;
+ }
- LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid));
+ /* When portals compatibility is set, I may be passed a new
+ * connection "blindly" by the acceptor, and I have to
+ * determine if my peer has sent an acceptor connection request
+ * or not. This isn't a 'hello', so I'll get the acceptor to
+ * look at it... */
+ rc = lnet_accept(ni, sock, hello->kshm_magic);
+ if (rc != 0)
+ return -EPROTO;
- memset (&hdr, 0, sizeof (hdr));
- hmv->magic = __cpu_to_le32 (PORTALS_PROTO_MAGIC);
- hmv->version_major = __cpu_to_le32 (PORTALS_PROTO_VERSION_MAJOR);
- hmv->version_minor = __cpu_to_le32 (PORTALS_PROTO_VERSION_MINOR);
-
- hdr.src_nid = __cpu_to_le64 (ksocknal_lib.ni.nid);
- hdr.type = __cpu_to_le32 (PTL_MSG_HELLO);
+ /* ...and if it's OK I'm back to looking for a 'hello'... */
+ rc = libcfs_sock_read(sock, &hello->kshm_magic,
+ sizeof (hello->kshm_magic), timeout);
+ if (rc != 0) {
+ CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr));
+ LASSERT (rc < 0 && rc != -EALREADY);
+ return rc;
+ }
- /* Assume sufficient socket buffering for this message */
- rc = ksocknal_sock_write (sock, &hdr, sizeof (hdr));
- if (rc != 0) {
- CERROR ("Error %d sending HELLO to "LPX64"\n", rc, nid);
- return (rc);
+ /* Only need to check V1.x magic */
+ if (hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
+ CERROR ("Bad magic(2) %#08x (%#08x expected) from "
+ "%u.%u.%u.%u\n", __cpu_to_le32 (hello->kshm_magic),
+ LNET_PROTO_TCP_MAGIC,
+ HIPQUAD(conn->ksnc_ipaddr));
+ return -EPROTO;
+ }
}
- rc = ksocknal_sock_read (sock, hmv, sizeof (*hmv));
+ rc = libcfs_sock_read(sock, &hello->kshm_version,
+ sizeof(hello->kshm_version), timeout);
if (rc != 0) {
- CERROR ("Error %d reading HELLO from "LPX64"\n", rc, nid);
- return (rc);
- }
-
- if (hmv->magic != __le32_to_cpu (PORTALS_PROTO_MAGIC)) {
- CERROR ("Bad magic %#08x (%#08x expected) from "LPX64"\n",
- __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC, nid);
- return (-EINVAL);
- }
+ CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr));
+ LASSERT (rc < 0 && rc != -EALREADY);
+ return rc;
+ }
+
+ proto = ksocknal_compat_protocol(hello);
+ if (proto == NULL) {
+ if (!active) {
+ /* unknown protocol from peer, tell peer my protocol */
+ conn->ksnc_proto = &ksocknal_protocol_v2x;
+ hello->kshm_nips = 0;
+ ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
+ }
- if (hmv->version_major != __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR) ||
- hmv->version_minor != __cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR)) {
- CERROR ("Incompatible protocol version %d.%d (%d.%d expected)"
- " from "LPX64"\n",
- __le16_to_cpu (hmv->version_major),
- __le16_to_cpu (hmv->version_minor),
- PORTALS_PROTO_VERSION_MAJOR,
- PORTALS_PROTO_VERSION_MINOR,
- nid);
- return (-EINVAL);
+ CERROR ("Unknown protocol version (%d.x expected)"
+ " from %u.%u.%u.%u\n",
+ conn->ksnc_proto->pro_version,
+ HIPQUAD(conn->ksnc_ipaddr));
+
+ return -EPROTO;
}
- LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0);
- /* version 0 sends magic/version as the dest_nid of a 'hello' header,
- * so read the rest of it in now... */
+ if (conn->ksnc_proto == proto)
+ match = 1;
+
+ conn->ksnc_proto = proto;
- rc = ksocknal_sock_read (sock, hmv + 1, sizeof (hdr) - sizeof (*hmv));
+ /* receive the rest of hello message anyway */
+ rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
if (rc != 0) {
- CERROR ("Error %d reading rest of HELLO hdr from "LPX64"\n",
- rc, nid);
- return (rc);
+ CERROR("Error %d reading or checking hello from from %u.%u.%u.%u\n",
+ rc, HIPQUAD(conn->ksnc_ipaddr));
+ return rc;
}
- /* ...and check we got what we expected */
- if (hdr.type != __cpu_to_le32 (PTL_MSG_HELLO) ||
- PTL_HDR_LENGTH (&hdr) != __cpu_to_le32 (0)) {
- CERROR ("Expecting a HELLO hdr with 0 payload,"
- " but got type %d with %d payload from "LPX64"\n",
- __le32_to_cpu (hdr.type),
- __le32_to_cpu (PTL_HDR_LENGTH (&hdr)), nid);
- return (-EINVAL);
- }
-
- if (__le64_to_cpu (hdr.src_nid) != nid) {
- CERROR ("Connected to nid "LPX64", but expecting "LPX64"\n",
- __le64_to_cpu (hdr.src_nid), nid);
- return (-EINVAL);
+ if (hello->kshm_src_nid == LNET_NID_ANY) {
+ CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
+ "from %u.%u.%u.%u\n", HIPQUAD(conn->ksnc_ipaddr));
+ return -EPROTO;
}
- return (0);
-}
-
-int
-ksocknal_setup_sock (struct socket *sock)
-{
- mm_segment_t oldmm = get_fs ();
- int rc;
- int option;
- struct linger linger;
+ if (conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
+ /* Userspace NAL assigns peer process ID from socket */
+ recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
+ recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
+ } else {
+ recv_id.nid = hello->kshm_src_nid;
- /* Ensure this socket aborts active sends immediately when we close
- * it. */
-
- linger.l_onoff = 0;
- linger.l_linger = 0;
+ if (the_lnet.ln_ptlcompat > 1 && /* portals peers may exist */
+ LNET_NIDNET(recv_id.nid) == 0) /* this is one */
+ recv_id.pid = the_lnet.ln_pid; /* give it a sensible pid */
+ else
+ recv_id.pid = hello->kshm_src_pid;
- set_fs (KERNEL_DS);
- rc = sock_setsockopt (sock, SOL_SOCKET, SO_LINGER,
- (char *)&linger, sizeof (linger));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set SO_LINGER: %d\n", rc);
- return (rc);
}
- option = -1;
- set_fs (KERNEL_DS);
- rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_LINGER2,
- (char *)&option, sizeof (option));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set SO_LINGER2: %d\n", rc);
- return (rc);
+ if (!active) { /* don't know peer's nid yet */
+ *peerid = recv_id;
+ } else if (peerid->pid != recv_id.pid ||
+ !lnet_ptlcompat_matchnid(peerid->nid, recv_id.nid)) {
+ LCONSOLE_ERROR("Connected successfully to %s on host "
+ "%u.%u.%u.%u, but they claimed they were "
+ "%s; please check your Lustre "
+ "configuration.\n",
+ libcfs_id2str(*peerid),
+ HIPQUAD(conn->ksnc_ipaddr),
+ libcfs_id2str(recv_id));
+ return -EPROTO;
+ }
+
+ if (conn->ksnc_type == SOCKLND_CONN_NONE) {
+ /* I've accepted this connection; peer determines type */
+ conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
+ if (conn->ksnc_type == SOCKLND_CONN_NONE) {
+ CERROR ("Unexpected type %d from %s ip %u.%u.%u.%u\n",
+ hello->kshm_ctype, libcfs_id2str(*peerid),
+ HIPQUAD(conn->ksnc_ipaddr));
+ return -EPROTO;
+ }
+ } else if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
+ if (match) {
+ /* lost a connection race */
+ return -EALREADY;
+ }
+ /* unmatched protocol get SOCKLND_CONN_NONE anyway */
+ } else if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
+ CERROR ("Mismatched types: me %d, %s ip %u.%u.%u.%u %d\n",
+ conn->ksnc_type, libcfs_id2str(*peerid),
+ HIPQUAD(conn->ksnc_ipaddr),
+ hello->kshm_ctype);
+ return -EPROTO;
}
-#if SOCKNAL_USE_KEEPALIVES
- /* Keepalives: If 3/4 of the timeout elapses, start probing every
- * second until the timeout elapses. */
-
- option = (ksocknal_data.ksnd_io_timeout * 3) / 4;
- set_fs (KERNEL_DS);
- rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE,
- (char *)&option, sizeof (option));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc);
- return (rc);
- }
-
- option = 1;
- set_fs (KERNEL_DS);
- rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPINTVL,
- (char *)&option, sizeof (option));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
- return (rc);
- }
-
- option = ksocknal_data.ksnd_io_timeout / 4;
- set_fs (KERNEL_DS);
- rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT,
- (char *)&option, sizeof (option));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
- return (rc);
- }
+ *incarnation = hello->kshm_src_incarnation;
- option = 1;
- set_fs (KERNEL_DS);
- rc = sock_setsockopt (sock, SOL_SOCKET, SO_KEEPALIVE,
- (char *)&option, sizeof (option));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set SO_KEEPALIVE: %d\n", rc);
- return (rc);
- }
-#endif
- return (0);
+ return 0;
}
-int
-ksocknal_connect_peer (ksock_route_t *route)
+void
+ksocknal_connect (ksock_route_t *route)
{
- struct sockaddr_in peer_addr;
- mm_segment_t oldmm = get_fs();
- struct timeval tv;
- int fd;
- struct socket *sock;
- int rc;
-
- rc = sock_create (PF_INET, SOCK_STREAM, 0, &sock);
- if (rc != 0) {
- CERROR ("Can't create autoconnect socket: %d\n", rc);
- return (rc);
- }
+ CFS_LIST_HEAD (zombies);
+ ksock_peer_t *peer = route->ksnr_peer;
+ int type;
+ int wanted;
+ cfs_socket_t *sock;
+ cfs_time_t deadline;
+ int retry_later = 0;
+ int rc = 0;
- /* Ugh; have to map_fd for compatibility with sockets passed in
- * from userspace. And we actually need the sock->file refcounting
- * that this gives you :) */
+ deadline = cfs_time_add(cfs_time_current(),
+ cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
- fd = sock_map_fd (sock);
- if (fd < 0) {
- sock_release (sock);
- CERROR ("sock_map_fd error %d\n", fd);
- return (fd);
- }
+ write_lock_bh (&ksocknal_data.ksnd_global_lock);
- /* NB the fd now owns the ref on sock->file */
- LASSERT (sock->file != NULL);
- LASSERT (file_count(sock->file) == 1);
+ LASSERT (route->ksnr_scheduled);
+ LASSERT (!route->ksnr_connecting);
- /* Set the socket timeouts, so our connection attempt completes in
- * finite time */
- tv.tv_sec = ksocknal_data.ksnd_io_timeout;
- tv.tv_usec = 0;
+ route->ksnr_connecting = 1;
- set_fs (KERNEL_DS);
- rc = sock_setsockopt (sock, SOL_SOCKET, SO_SNDTIMEO,
- (char *)&tv, sizeof (tv));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set send timeout %d: %d\n",
- ksocknal_data.ksnd_io_timeout, rc);
- goto out;
- }
-
- set_fs (KERNEL_DS);
- rc = sock_setsockopt (sock, SOL_SOCKET, SO_RCVTIMEO,
- (char *)&tv, sizeof (tv));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set receive timeout %d: %d\n",
- ksocknal_data.ksnd_io_timeout, rc);
- goto out;
- }
+ for (;;) {
+ wanted = ksocknal_route_mask() & ~route->ksnr_connected;
- if (route->ksnr_nonagel) {
- int option = 1;
-
- set_fs (KERNEL_DS);
- rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_NODELAY,
- (char *)&option, sizeof (option));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't disable nagel: %d\n", rc);
- goto out;
+ /* stop connecting if peer/route got closed under me, or
+ * route got connected while queued */
+ if (peer->ksnp_closing || route->ksnr_deleted ||
+ wanted == 0) {
+ retry_later = 0;
+ break;
}
- }
-
- if (route->ksnr_buffer_size != 0) {
- int option = route->ksnr_buffer_size;
-
- set_fs (KERNEL_DS);
- rc = sock_setsockopt (sock, SOL_SOCKET, SO_SNDBUF,
- (char *)&option, sizeof (option));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set send buffer %d: %d\n",
- route->ksnr_buffer_size, rc);
- goto out;
+
+ /* reschedule if peer is connecting to me */
+ if (peer->ksnp_accepting > 0) {
+ CDEBUG(D_NET,
+ "peer %s(%d) already connecting to me, retry later.\n",
+ libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
+ retry_later = 1;
}
- set_fs (KERNEL_DS);
- rc = sock_setsockopt (sock, SOL_SOCKET, SO_RCVBUF,
- (char *)&option, sizeof (option));
- set_fs (oldmm);
- if (rc != 0) {
- CERROR ("Can't set receive buffer %d: %d\n",
- route->ksnr_buffer_size, rc);
- goto out;
+ if (retry_later) /* needs reschedule */
+ break;
+
+ if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
+ type = SOCKLND_CONN_ANY;
+ } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
+ type = SOCKLND_CONN_CONTROL;
+ } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
+ type = SOCKLND_CONN_BULK_IN;
+ } else {
+ LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+ type = SOCKLND_CONN_BULK_OUT;
}
- }
-
- memset (&peer_addr, 0, sizeof (peer_addr));
- peer_addr.sin_family = AF_INET;
- peer_addr.sin_port = htons (route->ksnr_port);
- peer_addr.sin_addr.s_addr = htonl (route->ksnr_ipaddr);
-
- rc = sock->ops->connect (sock, (struct sockaddr *)&peer_addr,
- sizeof (peer_addr), sock->file->f_flags);
- if (rc != 0) {
- CERROR ("Error %d connecting to "LPX64"\n", rc,
- route->ksnr_peer->ksnp_nid);
- goto out;
- }
-
- if (route->ksnr_xchange_nids) {
- rc = ksocknal_exchange_nids (sock, route->ksnr_peer->ksnp_nid);
+
+ write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+
+ if (cfs_time_aftereq(cfs_time_current(), deadline)) {
+ rc = -ETIMEDOUT;
+ lnet_connect_console_error(rc, peer->ksnp_id.nid,
+ route->ksnr_ipaddr,
+ route->ksnr_port);
+ goto failed;
+ }
+
+ rc = lnet_connect(&sock, peer->ksnp_id.nid,
+ route->ksnr_myipaddr,
+ route->ksnr_ipaddr, route->ksnr_port);
if (rc != 0)
- goto out;
- }
+ goto failed;
- rc = ksocknal_create_conn (route->ksnr_peer->ksnp_nid,
- route, sock, route->ksnr_irq_affinity);
- if (rc == 0) {
- /* Take an extra ref on sock->file to compensate for the
- * upcoming close which will lose fd's ref on it. */
- get_file (sock->file);
+ rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
+
+ if (rc < 0) {
+ lnet_connect_console_error(rc, peer->ksnp_id.nid,
+ route->ksnr_ipaddr,
+ route->ksnr_port);
+ goto failed;
+ }
+
+ /* rc == EALREADY means I lost a connection race and my
+ * peer is connecting to me.
+ * rc == EPROTO means my peer is speaking an older
+ * protocol version. */
+ LASSERT (rc == 0 || rc == EALREADY || rc == EPROTO);
+
+ retry_later = rc != 0;
+ if (retry_later)
+ CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
+ libcfs_nid2str(peer->ksnp_id.nid));
+
+ write_lock_bh (&ksocknal_data.ksnd_global_lock);
}
- out:
- sys_close (fd);
- return (rc);
-}
+ route->ksnr_scheduled = 0;
+ route->ksnr_connecting = 0;
-void
-ksocknal_autoconnect (ksock_route_t *route)
-{
- LIST_HEAD (zombies);
- ksock_tx_t *tx;
- ksock_peer_t *peer;
- unsigned long flags;
- int rc;
-
- rc = ksocknal_connect_peer (route);
- if (rc == 0) {
- /* successfully autoconnected: create_conn did the
- * route/conn binding and scheduled any blocked packets,
- * so there's nothing left to do now. */
- return;
+ if (retry_later) {
+ /* re-queue for attention; this frees me up to handle
+ * the peer's incoming connection request */
+ ksocknal_launch_connection_locked(route);
}
- write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
+ write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ return;
+
+ failed:
+ write_lock_bh (&ksocknal_data.ksnd_global_lock);
- peer = route->ksnr_peer;
+ route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
+ /* This is a retry rather than a new connection */
+ route->ksnr_retry_interval *= 2;
+ route->ksnr_retry_interval =
+ MAX(route->ksnr_retry_interval,
+ cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
+ route->ksnr_retry_interval =
+ MIN(route->ksnr_retry_interval,
+ cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
+
LASSERT (route->ksnr_retry_interval != 0);
- route->ksnr_timeout = jiffies + route->ksnr_retry_interval;
- route->ksnr_retry_interval = MIN (route->ksnr_retry_interval * 2,
- SOCKNAL_MAX_RECONNECT_INTERVAL);
-
- if (!list_empty (&peer->ksnp_tx_queue) &&
- ksocknal_find_connecting_route_locked (peer) == NULL) {
+ route->ksnr_timeout = cfs_time_add(cfs_time_current(),
+ route->ksnr_retry_interval);
+
+ if (!list_empty(&peer->ksnp_tx_queue) &&
+ peer->ksnp_accepting == 0 &&
+ ksocknal_find_connecting_route_locked(peer) == NULL) {
+ /* ksnp_tx_queue is queued on a conn on successful
+ * connection */
LASSERT (list_empty (&peer->ksnp_conns));
- /* None of the connections that the blocked packets are
- * waiting for have been successful. Complete them now... */
- do {
- tx = list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
- list_del (&tx->tx_list);
- list_add_tail (&tx->tx_list, &zombies);
- } while (!list_empty (&peer->ksnp_tx_queue));
+ /* take all the blocked packets while I've got the lock and
+ * complete below... */
+ list_add(&zombies, &peer->ksnp_tx_queue);
+ list_del_init(&peer->ksnp_tx_queue);
}
- write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
+#if 0 /* irrelevent with only eager routes */
+ if (!route->ksnr_deleted) {
+ /* make this route least-favourite for re-selection */
+ list_del(&route->ksnr_list);
+ list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+ }
+#endif
+ write_unlock_bh (&ksocknal_data.ksnd_global_lock);
- while (!list_empty (&zombies)) {
- tx = list_entry (zombies.next, ksock_tx_t, tx_list);
-
- CERROR ("Deleting packet type %d len %d ("LPX64"->"LPX64")\n",
- NTOH__u32 (tx->tx_hdr->type),
- NTOH__u32 (PTL_HDR_LENGTH(tx->tx_hdr)),
- NTOH__u64 (tx->tx_hdr->src_nid),
- NTOH__u64 (tx->tx_hdr->dest_nid));
+ ksocknal_peer_failed(peer);
+ ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
+}
- list_del (&tx->tx_list);
- /* complete now */
- ksocknal_tx_done (tx, 0);
- }
+static inline int
+ksocknal_connd_connect_route_locked(void)
+{
+ /* Only handle an outgoing connection request if there is someone left
+ * to handle incoming connections */
+ return !list_empty(&ksocknal_data.ksnd_connd_routes) &&
+ ((ksocknal_data.ksnd_connd_connecting + 1) <
+ *ksocknal_tunables.ksnd_nconnds);
+}
+
+static inline int
+ksocknal_connd_ready(void)
+{
+ int rc;
+
+ spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+
+ rc = ksocknal_data.ksnd_shuttingdown ||
+ !list_empty(&ksocknal_data.ksnd_connd_connreqs) ||
+ ksocknal_connd_connect_route_locked();
+
+ spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+
+ return rc;
}
int
-ksocknal_autoconnectd (void *arg)
+ksocknal_connd (void *arg)
{
long id = (long)arg;
char name[16];
- unsigned long flags;
+ ksock_connreq_t *cr;
ksock_route_t *route;
- int rc;
- snprintf (name, sizeof (name), "ksocknal_ad%02ld", id);
- kportal_daemonize (name);
- kportal_blockallsigs ();
+ snprintf (name, sizeof (name), "socknal_cd%02ld", id);
+ cfs_daemonize (name);
+ cfs_block_allsigs ();
- spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
+ spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
- if (!list_empty (&ksocknal_data.ksnd_autoconnectd_routes)) {
- route = list_entry (ksocknal_data.ksnd_autoconnectd_routes.next,
- ksock_route_t, ksnr_connect_list);
+ if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
+ /* Connection accepted by the listener */
+ cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
+ ksock_connreq_t, ksncr_list);
+
+ list_del(&cr->ksncr_list);
+ spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+
+ ksocknal_create_conn(cr->ksncr_ni, NULL,
+ cr->ksncr_sock, SOCKLND_CONN_NONE);
+ lnet_ni_decref(cr->ksncr_ni);
+ LIBCFS_FREE(cr, sizeof(*cr));
- list_del (&route->ksnr_connect_list);
- spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
+ spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ }
- ksocknal_autoconnect (route);
- ksocknal_put_route (route);
+ if (ksocknal_connd_connect_route_locked()) {
+ /* Connection request */
+ route = list_entry (ksocknal_data.ksnd_connd_routes.next,
+ ksock_route_t, ksnr_connd_list);
- spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
- continue;
+ list_del (&route->ksnr_connd_list);
+ ksocknal_data.ksnd_connd_connecting++;
+ spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+
+ ksocknal_connect (route);
+ ksocknal_route_decref(route);
+
+ spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ ksocknal_data.ksnd_connd_connecting--;
}
-
- spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
- rc = wait_event_interruptible (ksocknal_data.ksnd_autoconnectd_waitq,
- ksocknal_data.ksnd_shuttingdown ||
- !list_empty (&ksocknal_data.ksnd_autoconnectd_routes));
+ spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+
+ wait_event_interruptible_exclusive(
+ ksocknal_data.ksnd_connd_waitq,
+ ksocknal_connd_ready());
- spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
+ spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
}
- spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
+ spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
ksocknal_thread_fini ();
return (0);
}
ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_t *peer)
+ksocknal_find_timed_out_conn (ksock_peer_t *peer)
{
/* We're called with a shared lock on ksnd_global_lock */
ksock_conn_t *conn;
struct list_head *ctmp;
- ksock_sched_t *sched;
list_for_each (ctmp, &peer->ksnp_conns) {
+ int error;
conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
- sched = conn->ksnc_scheduler;
- /* Don't need the {get,put}connsock dance to deref ksnc_sock... */
+ /* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
-
+
+ /* SOCK_ERROR will reset error code of socket in
+ * some platform (like Darwin8.x) */
+ error = SOCK_ERROR(conn->ksnc_sock);
+ if (error != 0) {
+ ksocknal_conn_addref(conn);
+
+ switch (error) {
+ case ECONNRESET:
+ CDEBUG(D_NETERROR, "A connection with %s "
+ "(%u.%u.%u.%u:%d) was reset; "
+ "it may have rebooted.\n",
+ libcfs_id2str(peer->ksnp_id),
+ HIPQUAD(conn->ksnc_ipaddr),
+ conn->ksnc_port);
+ break;
+ case ETIMEDOUT:
+ CDEBUG(D_NETERROR, "A connection with %s "
+ "(%u.%u.%u.%u:%d) timed out; the "
+ "network or node may be down.\n",
+ libcfs_id2str(peer->ksnp_id),
+ HIPQUAD(conn->ksnc_ipaddr),
+ conn->ksnc_port);
+ break;
+ default:
+ CDEBUG(D_NETERROR, "An unexpected network error %d "
+ "occurred with %s "
+ "(%u.%u.%u.%u:%d\n", error,
+ libcfs_id2str(peer->ksnp_id),
+ HIPQUAD(conn->ksnc_ipaddr),
+ conn->ksnc_port);
+ break;
+ }
+
+ return (conn);
+ }
+
if (conn->ksnc_rx_started &&
- time_after_eq (jiffies, conn->ksnc_rx_deadline)) {
+ cfs_time_aftereq(cfs_time_current(),
+ conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
- atomic_inc (&conn->ksnc_refcount);
- CERROR ("Timed out RX from "LPX64" %p\n",
- peer->ksnp_nid, conn);
+ ksocknal_conn_addref(conn);
+ CDEBUG(D_NETERROR, "Timeout receiving from %s "
+ "(%u.%u.%u.%u:%d), state %d wanted %d left %d\n",
+ libcfs_id2str(peer->ksnp_id),
+ HIPQUAD(conn->ksnc_ipaddr),
+ conn->ksnc_port,
+ conn->ksnc_rx_state,
+ conn->ksnc_rx_nob_wanted,
+ conn->ksnc_rx_nob_left);
return (conn);
}
-
- if ((!list_empty (&conn->ksnc_tx_queue) ||
- conn->ksnc_sock->sk->wmem_queued != 0) &&
- time_after_eq (jiffies, conn->ksnc_tx_deadline)) {
- /* Timed out messages queued for sending, or
- * messages buffered in the socket's send buffer */
- atomic_inc (&conn->ksnc_refcount);
- CERROR ("Timed out TX to "LPX64" %s%d %p\n",
- peer->ksnp_nid,
- list_empty (&conn->ksnc_tx_queue) ? "" : "Q ",
- conn->ksnc_sock->sk->wmem_queued, conn);
+
+ if ((!list_empty(&conn->ksnc_tx_queue) ||
+ SOCK_WMEM_QUEUED(conn->ksnc_sock) != 0) &&
+ cfs_time_aftereq(cfs_time_current(),
+ conn->ksnc_tx_deadline)) {
+ /* Timed out messages queued for sending or
+ * buffered in the socket's send buffer */
+ ksocknal_conn_addref(conn);
+ CDEBUG(D_NETERROR, "Timeout sending data to %s "
+ "(%u.%u.%u.%u:%d) the network or that "
+ "node may be down.\n",
+ libcfs_id2str(peer->ksnp_id),
+ HIPQUAD(conn->ksnc_ipaddr),
+ conn->ksnc_port);
return (conn);
}
}
list_for_each (ptmp, peers) {
peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
conn = ksocknal_find_timed_out_conn (peer);
-
+
if (conn != NULL) {
read_unlock (&ksocknal_data.ksnd_global_lock);
- if (ksocknal_close_conn_unlocked (conn, -ETIMEDOUT)) {
- /* I actually closed... */
- CERROR ("Timeout out conn->"LPX64" ip %x:%d\n",
- peer->ksnp_nid, conn->ksnc_ipaddr,
- conn->ksnc_port);
- }
-
+ ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+
/* NB we won't find this one again, but we can't
* just proceed with the next peer, since we dropped
* ksnd_global_lock and it might be dead already! */
- ksocknal_put_conn (conn);
+ ksocknal_conn_decref(conn);
goto again;
}
}
int
ksocknal_reaper (void *arg)
{
- wait_queue_t wait;
- unsigned long flags;
+ cfs_waitlink_t wait;
ksock_conn_t *conn;
- int timeout;
+ ksock_sched_t *sched;
+ struct list_head enomem_conns;
+ int nenomem_conns;
+ cfs_duration_t timeout;
int i;
int peer_index = 0;
- unsigned long deadline = jiffies;
-
- kportal_daemonize ("ksocknal_reaper");
- kportal_blockallsigs ();
+ cfs_time_t deadline = cfs_time_current();
+
+ cfs_daemonize ("socknal_reaper");
+ cfs_block_allsigs ();
- init_waitqueue_entry (&wait, current);
+ CFS_INIT_LIST_HEAD(&enomem_conns);
+ cfs_waitlink_init (&wait);
- spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
+ spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
ksock_conn_t, ksnc_list);
list_del (&conn->ksnc_list);
- spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
+ spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
ksocknal_terminate_conn (conn);
- ksocknal_put_conn (conn);
+ ksocknal_conn_decref(conn);
- spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
+ spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
continue;
}
ksock_conn_t, ksnc_list);
list_del (&conn->ksnc_list);
- spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
+ spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
ksocknal_destroy_conn (conn);
- spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
+ spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
continue;
}
-
- spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
+
+ if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+ list_add(&enomem_conns, &ksocknal_data.ksnd_enomem_conns);
+ list_del_init(&ksocknal_data.ksnd_enomem_conns);
+ }
+
+ spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+
+ /* reschedule all the connections that stalled with ENOMEM... */
+ nenomem_conns = 0;
+ while (!list_empty (&enomem_conns)) {
+ conn = list_entry (enomem_conns.next,
+ ksock_conn_t, ksnc_tx_list);
+ list_del (&conn->ksnc_tx_list);
+
+ sched = conn->ksnc_scheduler;
+
+ spin_lock_bh (&sched->kss_lock);
+
+ LASSERT (conn->ksnc_tx_scheduled);
+ conn->ksnc_tx_ready = 1;
+ list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
+ cfs_waitq_signal (&sched->kss_waitq);
+
+ spin_unlock_bh (&sched->kss_lock);
+ nenomem_conns++;
+ }
/* careful with the jiffy wrap... */
- while ((timeout = ((int)deadline - (int)jiffies)) <= 0) {
+ while ((timeout = cfs_time_sub(deadline,
+ cfs_time_current())) <= 0) {
const int n = 4;
const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size;
-
+
/* Time to check for timeouts on a few more peers: I do
* checks every 'p' seconds on a proportion of the peer
* table and I need to check every connection 'n' times
* timeout on any connection within (n+1)/n times the
* timeout interval. */
- if (ksocknal_data.ksnd_io_timeout > n * p)
- chunk = (chunk * n * p) /
- ksocknal_data.ksnd_io_timeout;
+ if (*ksocknal_tunables.ksnd_timeout > n * p)
+ chunk = (chunk * n * p) /
+ *ksocknal_tunables.ksnd_timeout;
if (chunk == 0)
chunk = 1;
for (i = 0; i < chunk; i++) {
ksocknal_check_peer_timeouts (peer_index);
- peer_index = (peer_index + 1) %
+ peer_index = (peer_index + 1) %
ksocknal_data.ksnd_peer_hash_size;
}
- deadline += p * HZ;
+ deadline = cfs_time_add(deadline, cfs_time_seconds(p));
}
- add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ if (nenomem_conns != 0) {
+ /* Reduce my timeout if I rescheduled ENOMEM conns.
+ * This also prevents me getting woken immediately
+ * if any go back on my enomem list. */
+ timeout = SOCKNAL_ENOMEM_RETRY;
+ }
+ ksocknal_data.ksnd_reaper_waketime =
+ cfs_time_add(cfs_time_current(), timeout);
+
set_current_state (TASK_INTERRUPTIBLE);
+ cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
list_empty (&ksocknal_data.ksnd_zombie_conns))
- schedule_timeout (timeout);
+ cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
set_current_state (TASK_RUNNING);
- remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
- spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
+ spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
}
- spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
+ spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
ksocknal_thread_fini ();
return (0);
}
-
-nal_cb_t ksocknal_lib = {
- nal_data: &ksocknal_data, /* NAL private data */
- cb_send: ksocknal_send,
- cb_send_pages: ksocknal_send_pages,
- cb_recv: ksocknal_recv,
- cb_recv_pages: ksocknal_recv_pages,
- cb_read: ksocknal_read,
- cb_write: ksocknal_write,
- cb_callback: ksocknal_callback,
- cb_malloc: ksocknal_malloc,
- cb_free: ksocknal_free,
- cb_printf: ksocknal_printf,
- cb_cli: ksocknal_cli,
- cb_sti: ksocknal_sti,
- cb_dist: ksocknal_dist
-};