X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fklnds%2Fsocklnd%2Fsocklnd_cb.c;h=efec018e23ac6621cd89bc4269f7e7f2b9e9452b;hb=9190c945141ad12ebec0d589b3d3fdb193e742fa;hp=3dcaa6cb0c6c5a939f2507d26ec2efcb0dc5b3a6;hpb=c9e28d512b1ba159548bc040920d70d3e41fad13;p=fs%2Flustre-release.git diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 3dcaa6c..efec018 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -1,7 +1,8 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2001, 2002 Cluster File Systems, Inc. + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * * Author: Zach Brown * Author: Peter J. Braam * Author: Phil Schwan @@ -23,131 +24,77 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include "socknal.h" -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) -# include -#endif +#include "socklnd.h" -/* - * LIB functions follow - * - */ -int -ksocknal_dist(lib_nal_t *nal, ptl_nid_t nid, unsigned long *dist) +ksock_tx_t * +ksocknal_alloc_tx (int size) { - /* I would guess that if ksocknal_get_peer (nid) == NULL, - and we're not routing, then 'nid' is very distant :) */ - if (nal->libnal_ni.ni_pid.nid == nid) { - *dist = 0; - } else { - *dist = 1; + ksock_tx_t *tx = NULL; + + if (size == KSOCK_NOOP_TX_SIZE) { + /* searching for a noop tx in free list */ + spin_lock(&ksocknal_data.ksnd_tx_lock); + + if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { + tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next, + ksock_tx_t, tx_list); + LASSERT(tx->tx_desc_size == size); + list_del(&tx->tx_list); + } + + spin_unlock(&ksocknal_data.ksnd_tx_lock); } - return 0; + if (tx == NULL) + LIBCFS_ALLOC(tx, size); + + if (tx == NULL) + return NULL; + + atomic_set(&tx->tx_refcount, 1); + tx->tx_desc_size = size; + atomic_inc(&ksocknal_data.ksnd_nactive_txs); + + return tx; } void -ksocknal_free_ltx (ksock_ltx_t *ltx) +ksocknal_free_tx (ksock_tx_t *tx) { - atomic_dec(&ksocknal_data.ksnd_nactive_ltxs); - PORTAL_FREE(ltx, ltx->ltx_desc_size); -} + atomic_dec(&ksocknal_data.ksnd_nactive_txs); -#if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC) -struct page * -ksocknal_kvaddr_to_page (unsigned long vaddr) -{ - struct page *page; - - if (vaddr >= VMALLOC_START && - vaddr < VMALLOC_END) - page = vmalloc_to_page ((void *)vaddr); -#if CONFIG_HIGHMEM - else if (vaddr >= PKMAP_BASE && - vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) - page = vmalloc_to_page ((void *)vaddr); - /* in 2.4 ^ just walks the page tables */ -#endif - else - page = virt_to_page (vaddr); + if (tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { + /* it's a noop tx */ + spin_lock(&ksocknal_data.ksnd_tx_lock); - if (page == NULL || - !VALID_PAGE (page)) - return (NULL); + list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs); - return (page); + spin_unlock(&ksocknal_data.ksnd_tx_lock); + } else { + LIBCFS_FREE(tx, tx->tx_desc_size); + } +} + +void +ksocknal_init_msg(ksock_msg_t *msg, int type) +{ + msg->ksm_type = type; + msg->ksm_csum = 0; + msg->ksm_zc_req_cookie = 0; + msg->ksm_zc_ack_cookie = 0; } -#endif int ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) { - struct socket *sock = conn->ksnc_sock; struct iovec *iov = tx->tx_iov; -#if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC) - unsigned long vaddr = (unsigned long)iov->iov_base - int offset = vaddr & (PAGE_SIZE - 1); - int zcsize = MIN (iov->iov_len, PAGE_SIZE - offset); - struct page *page; -#endif - int nob; - int rc; + int nob; + int rc; - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ LASSERT (tx->tx_niov > 0); - -#if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC) - if (zcsize >= ksocknal_data.ksnd_zc_min_frag && - (sock->sk->route_caps & NETIF_F_SG) && - (sock->sk->route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)) && - (page = ksocknal_kvaddr_to_page (vaddr)) != NULL) { - int msgflg = MSG_DONTWAIT; - - CDEBUG(D_NET, "vaddr %p, page %p->%p + offset %x for %d\n", - (void *)vaddr, page, page_address(page), offset, zcsize); - if (!list_empty (&conn->ksnc_tx_queue) || - zcsize < tx->tx_resid) - msgflg |= MSG_MORE; - - rc = tcp_sendpage_zccd(sock, page, offset, zcsize, msgflg, &tx->tx_zccd); - } else -#endif - { -#if SOCKNAL_SINGLE_FRAG_TX - struct iovec scratch; - struct iovec *scratchiov = &scratch; - int niov = 1; -#else - struct iovec *scratchiov = conn->ksnc_tx_scratch_iov; - int niov = tx->tx_niov; -#endif - struct msghdr msg = { - .msg_name = NULL, - .msg_namelen = 0, - .msg_iov = scratchiov, - .msg_iovlen = niov, - .msg_control = NULL, - .msg_controllen = 0, - .msg_flags = MSG_DONTWAIT - }; - mm_segment_t oldmm = get_fs(); - int i; - - for (nob = i = 0; i < niov; i++) { - scratchiov[i] = tx->tx_iov[i]; - nob += scratchiov[i].iov_len; - } - - if (!list_empty(&conn->ksnc_tx_queue) || - nob < tx->tx_resid) - msg.msg_flags |= MSG_MORE; - - set_fs (KERNEL_DS); - rc = sock_sendmsg(sock, &msg, nob); - set_fs (oldmm); - } + /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */ + rc = ksocknal_lib_send_iov(conn, tx); if (rc <= 0) /* sent nothing? */ return (rc); @@ -159,7 +106,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) /* "consume" iov */ do { LASSERT (tx->tx_niov > 0); - + if (nob < iov->iov_len) { iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob); iov->iov_len -= nob; @@ -170,84 +117,22 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) tx->tx_iov = ++iov; tx->tx_niov--; } while (nob != 0); - + return (rc); } int ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) { - struct socket *sock = conn->ksnc_sock; - ptl_kiov_t *kiov = tx->tx_kiov; - int rc; - int nob; - - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ + lnet_kiov_t *kiov = tx->tx_kiov; + int nob; + int rc; + LASSERT (tx->tx_niov == 0); LASSERT (tx->tx_nkiov > 0); -#if SOCKNAL_ZC - if (kiov->kiov_len >= ksocknal_tunables.ksnd_zc_min_frag && - (sock->sk->route_caps & NETIF_F_SG) && - (sock->sk->route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM))) { - struct page *page = kiov->kiov_page; - int offset = kiov->kiov_offset; - int fragsize = kiov->kiov_len; - int msgflg = MSG_DONTWAIT; - - CDEBUG(D_NET, "page %p + offset %x for %d\n", - page, offset, kiov->kiov_len); - - if (!list_empty(&conn->ksnc_tx_queue) || - fragsize < tx->tx_resid) - msgflg |= MSG_MORE; - - rc = tcp_sendpage_zccd(sock, page, offset, fragsize, msgflg, - &tx->tx_zccd); - } else -#endif - { -#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK - struct iovec scratch; - struct iovec *scratchiov = &scratch; - int niov = 1; -#else -#ifdef CONFIG_HIGHMEM -#warning "XXX risk of kmap deadlock on multiple frags..." -#endif - struct iovec *scratchiov = conn->ksnc_tx_scratch_iov; - int niov = tx->tx_nkiov; -#endif - struct msghdr msg = { - .msg_name = NULL, - .msg_namelen = 0, - .msg_iov = scratchiov, - .msg_iovlen = niov, - .msg_control = NULL, - .msg_controllen = 0, - .msg_flags = MSG_DONTWAIT - }; - mm_segment_t oldmm = get_fs(); - int i; - - for (nob = i = 0; i < niov; i++) { - scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + - kiov[i].kiov_offset; - nob += scratchiov[i].iov_len = kiov[i].kiov_len; - } - - if (!list_empty(&conn->ksnc_tx_queue) || - nob < tx->tx_resid) - msg.msg_flags |= MSG_DONTWAIT; - - set_fs (KERNEL_DS); - rc = sock_sendmsg(sock, &msg, nob); - set_fs (oldmm); - - for (i = 0; i < niov; i++) - kunmap(kiov[i].kiov_page); - } + /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */ + rc = ksocknal_lib_send_kiov(conn, tx); if (rc <= 0) /* sent nothing? */ return (rc); @@ -256,15 +141,16 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) LASSERT (nob <= tx->tx_resid); tx->tx_resid -= nob; + /* "consume" kiov */ do { LASSERT(tx->tx_nkiov > 0); - + if (nob < kiov->kiov_len) { kiov->kiov_offset += nob; kiov->kiov_len -= nob; return rc; } - + nob -= kiov->kiov_len; tx->tx_kiov = ++kiov; tx->tx_nkiov--; @@ -278,15 +164,14 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { int rc; int bufnob; - + if (ksocknal_data.ksnd_stall_tx != 0) { - set_current_state (TASK_UNINTERRUPTIBLE); - schedule_timeout (ksocknal_data.ksnd_stall_tx * HZ); + cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); } LASSERT (tx->tx_resid != 0); - rc = ksocknal_getconnsock (conn); + rc = ksocknal_connsock_addref(conn); if (rc != 0) { LASSERT (conn->ksnc_closing); return (-ESHUTDOWN); @@ -303,22 +188,21 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) rc = ksocknal_send_kiov (conn, tx); } - bufnob = conn->ksnc_sock->sk->sk_wmem_queued; + bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock); if (rc > 0) /* sent something? */ conn->ksnc_tx_bufnob += rc; /* account it */ - + if (bufnob < conn->ksnc_tx_bufnob) { /* allocated send buffer bytes < computed; infer * something got ACKed */ - conn->ksnc_tx_deadline = jiffies + - ksocknal_tunables.ksnd_io_timeout * HZ; - conn->ksnc_peer->ksnp_last_alive = jiffies; + conn->ksnc_tx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); conn->ksnc_tx_bufnob = bufnob; mb(); } if (rc <= 0) { /* Didn't write anything? */ - unsigned long flags; ksock_sched_t *sched; if (rc == 0) /* some stacks return 0 instead of -EAGAIN */ @@ -330,9 +214,9 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) /* Check if EAGAIN is due to memory pressure */ sched = conn->ksnc_scheduler; - spin_lock_irqsave(&sched->kss_lock, flags); - - if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) && + spin_lock_bh (&sched->kss_lock); + + if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) && !conn->ksnc_tx_ready) { /* SOCK_NOSPACE is set when the socket fills * and cleared in the write_space callback @@ -345,7 +229,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) rc = -ENOMEM; } - spin_unlock_irqrestore(&sched->kss_lock, flags); + spin_unlock_bh (&sched->kss_lock); break; } @@ -355,68 +239,22 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) } while (tx->tx_resid != 0); - ksocknal_putconnsock (conn); + ksocknal_connsock_decref(conn); return (rc); } -void -ksocknal_eager_ack (ksock_conn_t *conn) -{ - int opt = 1; - mm_segment_t oldmm = get_fs(); - struct socket *sock = conn->ksnc_sock; - - /* Remind the socket to ACK eagerly. If I don't, the socket might - * think I'm about to send something it could piggy-back the ACK - * on, introducing delay in completing zero-copy sends in my - * peer. */ - - set_fs(KERNEL_DS); - sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK, - (char *)&opt, sizeof (opt)); - set_fs(oldmm); -} - int ksocknal_recv_iov (ksock_conn_t *conn) { -#if SOCKNAL_SINGLE_FRAG_RX - struct iovec scratch; - struct iovec *scratchiov = &scratch; - int niov = 1; -#else - struct iovec *scratchiov = conn->ksnc_rx_scratch_iov; - int niov = conn->ksnc_rx_niov; -#endif struct iovec *iov = conn->ksnc_rx_iov; - struct msghdr msg = { - .msg_name = NULL, - .msg_namelen = 0, - .msg_iov = scratchiov, - .msg_iovlen = niov, - .msg_control = NULL, - .msg_controllen = 0, - .msg_flags = 0 - }; - mm_segment_t oldmm = get_fs(); - int nob; - int i; - int rc; - - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - LASSERT (niov > 0); - - for (nob = i = 0; i < niov; i++) { - scratchiov[i] = iov[i]; - nob += scratchiov[i].iov_len; - } - LASSERT (nob <= conn->ksnc_rx_nob_wanted); - - set_fs (KERNEL_DS); - rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT); - /* NB this is just a boolean..........................^ */ - set_fs (oldmm); + int nob; + int rc; + + LASSERT (conn->ksnc_rx_niov > 0); + + /* Never touch conn->ksnc_rx_iov or change connection + * status inside ksocknal_lib_recv_iov */ + rc = ksocknal_lib_recv_iov(conn); if (rc <= 0) return (rc); @@ -424,9 +262,9 @@ ksocknal_recv_iov (ksock_conn_t *conn) /* received something... */ nob = rc; - conn->ksnc_peer->ksnp_last_alive = jiffies; - conn->ksnc_rx_deadline = jiffies + - ksocknal_tunables.ksnd_io_timeout * HZ; + conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); + conn->ksnc_rx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); mb(); /* order with setting rx_started */ conn->ksnc_rx_started = 1; @@ -435,13 +273,13 @@ ksocknal_recv_iov (ksock_conn_t *conn) do { LASSERT (conn->ksnc_rx_niov > 0); - + if (nob < iov->iov_len) { iov->iov_len -= nob; iov->iov_base = (void *)(((unsigned long)iov->iov_base) + nob); return (-EAGAIN); } - + nob -= iov->iov_len; conn->ksnc_rx_iov = ++iov; conn->ksnc_rx_niov--; @@ -453,59 +291,24 @@ ksocknal_recv_iov (ksock_conn_t *conn) int ksocknal_recv_kiov (ksock_conn_t *conn) { -#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK - struct iovec scratch; - struct iovec *scratchiov = &scratch; - int niov = 1; -#else -#ifdef CONFIG_HIGHMEM -#warning "XXX risk of kmap deadlock on multiple frags..." -#endif - struct iovec *scratchiov = conn->ksnc_rx_scratch_iov; - int niov = conn->ksnc_rx_nkiov; -#endif - ptl_kiov_t *kiov = conn->ksnc_rx_kiov; - struct msghdr msg = { - .msg_name = NULL, - .msg_namelen = 0, - .msg_iov = scratchiov, - .msg_iovlen = niov, - .msg_control = NULL, - .msg_controllen = 0, - .msg_flags = 0 - }; - mm_segment_t oldmm = get_fs(); - int nob; - int i; - int rc; - + lnet_kiov_t *kiov = conn->ksnc_rx_kiov; + int nob; + int rc; LASSERT (conn->ksnc_rx_nkiov > 0); - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - for (nob = i = 0; i < niov; i++) { - scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; - nob += scratchiov[i].iov_len = kiov[i].kiov_len; - } - LASSERT (nob <= conn->ksnc_rx_nob_wanted); - - set_fs (KERNEL_DS); - rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT); - /* NB this is just a boolean.......................^ */ - set_fs (oldmm); - - for (i = 0; i < niov; i++) - kunmap(kiov[i].kiov_page); + /* Never touch conn->ksnc_rx_kiov or change connection + * status inside ksocknal_lib_recv_iov */ + rc = ksocknal_lib_recv_kiov(conn); if (rc <= 0) return (rc); - + /* received something... */ nob = rc; - conn->ksnc_peer->ksnp_last_alive = jiffies; - conn->ksnc_rx_deadline = jiffies + - ksocknal_tunables.ksnd_io_timeout * HZ; + conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); + conn->ksnc_rx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); mb(); /* order with setting rx_started */ conn->ksnc_rx_started = 1; @@ -514,13 +317,13 @@ ksocknal_recv_kiov (ksock_conn_t *conn) do { LASSERT (conn->ksnc_rx_nkiov > 0); - + if (nob < kiov->kiov_len) { kiov->kiov_offset += nob; kiov->kiov_len -= nob; return -EAGAIN; } - + nob -= kiov->kiov_len; conn->ksnc_rx_kiov = ++kiov; conn->ksnc_rx_nkiov--; @@ -530,20 +333,19 @@ ksocknal_recv_kiov (ksock_conn_t *conn) } int -ksocknal_receive (ksock_conn_t *conn) +ksocknal_receive (ksock_conn_t *conn) { /* Return 1 on success, 0 on EOF, < 0 on error. * Caller checks ksnc_rx_nob_wanted to determine * progress/completion. */ int rc; ENTRY; - + if (ksocknal_data.ksnd_stall_rx != 0) { - set_current_state (TASK_UNINTERRUPTIBLE); - schedule_timeout (ksocknal_data.ksnd_stall_rx * HZ); + cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx)); } - rc = ksocknal_getconnsock (conn); + rc = ksocknal_connsock_addref(conn); if (rc != 0) { LASSERT (conn->ksnc_closing); return (-ESHUTDOWN); @@ -569,107 +371,140 @@ ksocknal_receive (ksock_conn_t *conn) /* Completed a fragment */ if (conn->ksnc_rx_nob_wanted == 0) { - /* Completed a message segment (header or payload) */ - if ((ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0 && - (conn->ksnc_rx_state == SOCKNAL_RX_BODY || - conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD)) { - /* Remind the socket to ack eagerly... */ - ksocknal_eager_ack(conn); - } rc = 1; break; } } - ksocknal_putconnsock (conn); + ksocknal_connsock_decref(conn); RETURN (rc); } -#if SOCKNAL_ZC void -ksocknal_zc_callback (zccd_t *zcd) +ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx) { - ksock_tx_t *tx = KSOCK_ZCCD_2_TX(zcd); - ksock_sched_t *sched = tx->tx_conn->ksnc_scheduler; - unsigned long flags; + lnet_msg_t *lnetmsg = tx->tx_lnetmsg; + int rc = (tx->tx_resid == 0) ? 0 : -EIO; ENTRY; - /* Schedule tx for cleanup (can't do it now due to lock conflicts) */ + LASSERT(ni != NULL || tx->tx_conn != NULL); - spin_lock_irqsave (&sched->kss_lock, flags); + if (tx->tx_conn != NULL) + ksocknal_conn_decref(tx->tx_conn); - list_add_tail (&tx->tx_list, &sched->kss_zctxdone_list); - wake_up (&sched->kss_waitq); + if (ni == NULL && tx->tx_conn != NULL) + ni = tx->tx_conn->ksnc_peer->ksnp_ni; + + ksocknal_free_tx (tx); + if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */ + lnet_finalize (ni, lnetmsg, rc); - spin_unlock_irqrestore (&sched->kss_lock, flags); EXIT; } -#endif void -ksocknal_tx_done (ksock_tx_t *tx, int asynch) +ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) { - ksock_ltx_t *ltx; - ENTRY; + ksock_tx_t *tx; + + while (!list_empty (txlist)) { + tx = list_entry (txlist->next, ksock_tx_t, tx_list); + + if (error && tx->tx_lnetmsg != NULL) { + CDEBUG (D_NETERROR, "Deleting packet type %d len %d %s->%s\n", + le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type), + le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length), + libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)), + libcfs_nid2str(le64_to_cpu (tx->tx_lnetmsg->msg_hdr.dest_nid))); + } else if (error) { + CDEBUG (D_NETERROR, "Deleting noop packet\n"); + } - if (tx->tx_conn != NULL) { -#if SOCKNAL_ZC - /* zero copy completion isn't always from - * process_transmit() so it needs to keep a ref on - * tx_conn... */ - if (asynch) - ksocknal_put_conn (tx->tx_conn); -#else - LASSERT (!asynch); -#endif + list_del (&tx->tx_list); + + LASSERT (atomic_read(&tx->tx_refcount) == 1); + ksocknal_tx_done (ni, tx); } +} - if (tx->tx_isfwd) { /* was a forwarded packet? */ - kpr_fwd_done (&ksocknal_data.ksnd_router, - KSOCK_TX_2_KPR_FWD_DESC (tx), - (tx->tx_resid == 0) ? 0 : -ECONNABORTED); - EXIT; +static void +ksocknal_check_zc_req(ksock_tx_t *tx) +{ + ksock_conn_t *conn = tx->tx_conn; + ksock_peer_t *peer = conn->ksnc_peer; + lnet_kiov_t *kiov = tx->tx_kiov; + int nkiov = tx->tx_nkiov; + + /* Set tx_msg.ksm_zc_req_cookie to a unique non-zero cookie and add tx + * to ksnp_zc_req_list if some fragment of this message should be sent + * zero-copy. Our peer will send an ACK containing this cookie when + * she has received this message to tell us we can signal completion. + * tx_msg.ksm_zc_req_cookie remains non-zero while tx is on + * ksnp_zc_req_list. */ + + if (conn->ksnc_proto != &ksocknal_protocol_v2x || + !conn->ksnc_zc_capable) return; + + while (nkiov > 0) { + if (kiov->kiov_len >= *ksocknal_tunables.ksnd_zc_min_frag) + break; + --nkiov; + ++kiov; } - /* local send */ - ltx = KSOCK_TX_2_KSOCK_LTX (tx); + if (nkiov == 0) + return; - lib_finalize (&ksocknal_lib, ltx->ltx_private, ltx->ltx_cookie, - (tx->tx_resid == 0) ? PTL_OK : PTL_FAIL); + /* assign cookie and queue tx to pending list, it will be released when + * a matching ack is received. See ksocknal_handle_zc_ack() */ - ksocknal_free_ltx (ltx); - EXIT; + ksocknal_tx_addref(tx); + + spin_lock(&peer->ksnp_lock); + + /* ZC_REQ is going to be pinned to the peer */ + tx->tx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + + LASSERT (tx->tx_msg.ksm_zc_req_cookie == 0); + tx->tx_msg.ksm_zc_req_cookie = peer->ksnp_zc_next_cookie++; + list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list); + + spin_unlock(&peer->ksnp_lock); } -void -ksocknal_tx_launched (ksock_tx_t *tx) +static void +ksocknal_unzc_req(ksock_tx_t *tx) { -#if SOCKNAL_ZC - if (atomic_read (&tx->tx_zccd.zccd_count) != 1) { - ksock_conn_t *conn = tx->tx_conn; - - /* zccd skbufs are still in-flight. First take a ref on - * conn, so it hangs about for ksocknal_tx_done... */ - atomic_inc (&conn->ksnc_refcount); + ksock_peer_t *peer = tx->tx_conn->ksnc_peer; - /* ...then drop the initial ref on zccd, so the zero copy - * callback can occur */ - zccd_put (&tx->tx_zccd); + spin_lock(&peer->ksnp_lock); + + if (tx->tx_msg.ksm_zc_req_cookie == 0) { + /* Not waiting for an ACK */ + spin_unlock(&peer->ksnp_lock); return; } -#endif - /* Any zero-copy-ness (if any) has completed; I can complete the - * transmit now, avoiding an extra schedule */ - ksocknal_tx_done (tx, 0); + + tx->tx_msg.ksm_zc_req_cookie = 0; + list_del(&tx->tx_zc_list); + + spin_unlock(&peer->ksnp_lock); + + ksocknal_tx_decref(tx); } int ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { - unsigned long flags; int rc; - + + if (!tx->tx_checked_zc) { + tx->tx_checked_zc = 1; + ksocknal_check_zc_req(tx); + } + rc = ksocknal_transmit (conn, tx); CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc); @@ -678,7 +513,6 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) /* Sent everything OK */ LASSERT (rc == 0); - ksocknal_tx_launched (tx); return (0); } @@ -690,105 +524,82 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) counter++; /* exponential backoff warnings */ if ((counter & (-counter)) == counter) - CWARN("%d ENOMEM tx %p\n", counter, conn); + CWARN("%u ENOMEM tx %p (%u allocated)\n", + counter, conn, atomic_read(&libcfs_kmemory)); /* Queue on ksnd_enomem_conns for retry after a timeout */ - spin_lock_irqsave(&ksocknal_data.ksnd_reaper_lock, flags); + spin_lock_bh (&ksocknal_data.ksnd_reaper_lock); /* enomem list takes over scheduler's ref... */ LASSERT (conn->ksnc_tx_scheduled); list_add_tail(&conn->ksnc_tx_list, &ksocknal_data.ksnd_enomem_conns); - if (!time_after_eq(jiffies + SOCKNAL_ENOMEM_RETRY, + if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(), + SOCKNAL_ENOMEM_RETRY), ksocknal_data.ksnd_reaper_waketime)) - wake_up (&ksocknal_data.ksnd_reaper_waitq); - - spin_unlock_irqrestore(&ksocknal_data.ksnd_reaper_lock, flags); + cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq); + + spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock); return (rc); } /* Actual error */ LASSERT (rc < 0); - if (!conn->ksnc_closing) - CERROR("[%p] Error %d on write to "LPX64 + if (!conn->ksnc_closing) { + switch (rc) { + case -ECONNRESET: + LCONSOLE_WARN("Host %u.%u.%u.%u reset our connection " + "while we were sending data; it may have " + "rebooted.\n", + HIPQUAD(conn->ksnc_ipaddr)); + break; + default: + LCONSOLE_WARN("There was an unexpected network error " + "while writing to %u.%u.%u.%u: %d.\n", + HIPQUAD(conn->ksnc_ipaddr), rc); + break; + } + CDEBUG(D_NET, "[%p] Error %d on write to %s" " ip %d.%d.%d.%d:%d\n", conn, rc, - conn->ksnc_peer->ksnp_nid, + libcfs_id2str(conn->ksnc_peer->ksnp_id), HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); + } + + ksocknal_unzc_req(tx); - ksocknal_close_conn_and_siblings (conn, rc); - ksocknal_tx_launched (tx); + /* it's not an error if conn is being closed */ + ksocknal_close_conn_and_siblings (conn, + (conn->ksnc_closing) ? 0 : rc); return (rc); } void -ksocknal_launch_autoconnect_locked (ksock_route_t *route) +ksocknal_launch_connection_locked (ksock_route_t *route) { - unsigned long flags; /* called holding write lock on ksnd_global_lock */ - LASSERT (!route->ksnr_deleted); - LASSERT ((route->ksnr_connected & (1 << SOCKNAL_CONN_ANY)) == 0); - LASSERT ((route->ksnr_connected & KSNR_TYPED_ROUTES) != KSNR_TYPED_ROUTES); - LASSERT (route->ksnr_connecting == 0); - - if (ksocknal_tunables.ksnd_typed_conns) - route->ksnr_connecting = - KSNR_TYPED_ROUTES & ~route->ksnr_connected; - else - route->ksnr_connecting = (1 << SOCKNAL_CONN_ANY); - - atomic_inc (&route->ksnr_refcount); /* extra ref for asynchd */ - - spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags); - - list_add_tail (&route->ksnr_connect_list, - &ksocknal_data.ksnd_autoconnectd_routes); - wake_up (&ksocknal_data.ksnd_autoconnectd_waitq); - - spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags); -} - -ksock_peer_t * -ksocknal_find_target_peer_locked (ksock_tx_t *tx, ptl_nid_t nid) -{ - char ipbuf[PTL_NALFMT_SIZE]; - ptl_nid_t target_nid; - int rc; - ksock_peer_t *peer = ksocknal_find_peer_locked (nid); + LASSERT (!route->ksnr_scheduled); + LASSERT (!route->ksnr_connecting); + LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0); - if (peer != NULL) - return (peer); - - if (tx->tx_isfwd) { - CERROR ("Can't send packet to "LPX64 - " %s: routed target is not a peer\n", - nid, portals_nid2str(SOCKNAL, nid, ipbuf)); - return (NULL); - } + route->ksnr_scheduled = 1; /* scheduling conn for connd */ + ksocknal_route_addref(route); /* extra ref for connd */ - rc = kpr_lookup (&ksocknal_data.ksnd_router, nid, tx->tx_nob, - &target_nid); - if (rc != 0) { - CERROR ("Can't route to "LPX64" %s: router error %d\n", - nid, portals_nid2str(SOCKNAL, nid, ipbuf), rc); - return (NULL); - } + spin_lock_bh (&ksocknal_data.ksnd_connd_lock); - peer = ksocknal_find_peer_locked (target_nid); - if (peer != NULL) - return (peer); + list_add_tail (&route->ksnr_connd_list, + &ksocknal_data.ksnd_connd_routes); + cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq); - CERROR ("Can't send packet to "LPX64" %s: no peer entry\n", - target_nid, portals_nid2str(SOCKNAL, target_nid, ipbuf)); - return (NULL); + spin_unlock_bh (&ksocknal_data.ksnd_connd_lock); } ksock_conn_t * -ksocknal_find_conn_locked (ksock_tx_t *tx, ksock_peer_t *peer) +ksocknal_find_conn_locked (int payload_nob, ksock_peer_t *peer) { struct list_head *tmp; ksock_conn_t *typed = NULL; @@ -799,35 +610,48 @@ ksocknal_find_conn_locked (ksock_tx_t *tx, ksock_peer_t *peer) list_for_each (tmp, &peer->ksnp_conns) { ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); + int hdr_nob = 0; #if SOCKNAL_ROUND_ROBIN const int nob = 0; #else int nob = atomic_read(&c->ksnc_tx_nob) + - c->ksnc_sock->sk->sk_wmem_queued; + SOCK_WMEM_QUEUED(c->ksnc_sock); #endif LASSERT (!c->ksnc_closing); + LASSERT (c->ksnc_proto != NULL); if (fallback == NULL || nob < fnob) { fallback = c; fnob = nob; } - if (!ksocknal_tunables.ksnd_typed_conns) + if (!*ksocknal_tunables.ksnd_typed_conns) continue; + if (payload_nob == 0) { + /* noop packet */ + hdr_nob = offsetof(ksock_msg_t, ksm_u); + } else { + /* lnet packet */ + hdr_nob = (c->ksnc_proto == &ksocknal_protocol_v2x)? + offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload): + sizeof(lnet_hdr_t); + } + switch (c->ksnc_type) { default: + CERROR("ksnc_type bad: %u\n", c->ksnc_type); LBUG(); - case SOCKNAL_CONN_ANY: + case SOCKLND_CONN_ANY: break; - case SOCKNAL_CONN_BULK_IN: + case SOCKLND_CONN_BULK_IN: continue; - case SOCKNAL_CONN_BULK_OUT: - if (tx->tx_nob < ksocknal_tunables.ksnd_min_bulk) + case SOCKLND_CONN_BULK_OUT: + if ((hdr_nob + payload_nob) < *ksocknal_tunables.ksnd_min_bulk) continue; break; - case SOCKNAL_CONN_CONTROL: - if (tx->tx_nob >= ksocknal_tunables.ksnd_min_bulk) + case SOCKLND_CONN_CONTROL: + if ((hdr_nob + payload_nob) >= *ksocknal_tunables.ksnd_min_bulk) continue; break; } @@ -852,55 +676,180 @@ ksocknal_find_conn_locked (ksock_tx_t *tx, ksock_peer_t *peer) } void +ksocknal_next_mono_tx(ksock_conn_t *conn) +{ + ksock_tx_t *tx = conn->ksnc_tx_mono; + + /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ + LASSERT(conn->ksnc_proto == &ksocknal_protocol_v2x); + LASSERT(!list_empty(&conn->ksnc_tx_queue)); + LASSERT(tx != NULL); + + if (tx->tx_list.next == &conn->ksnc_tx_queue) { + /* no more packets queued */ + conn->ksnc_tx_mono = NULL; + } else { + conn->ksnc_tx_mono = list_entry(tx->tx_list.next, ksock_tx_t, tx_list); + LASSERT(conn->ksnc_tx_mono->tx_msg.ksm_type == tx->tx_msg.ksm_type); + } +} + +int +ksocknal_piggyback_zcack(ksock_conn_t *conn, __u64 cookie) +{ + ksock_tx_t *tx = conn->ksnc_tx_mono; + + /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ + + if (tx == NULL) + return 0; + + if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) { + /* tx is noop zc-ack, can't piggyback zc-ack cookie */ + return 0; + } + + LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET); + LASSERT(tx->tx_msg.ksm_zc_ack_cookie == 0); + + /* piggyback the zc-ack cookie */ + tx->tx_msg.ksm_zc_ack_cookie = cookie; + ksocknal_next_mono_tx(conn); + + return 1; +} + +void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) { - unsigned long flags; ksock_sched_t *sched = conn->ksnc_scheduler; + ksock_msg_t *msg = &tx->tx_msg; + ksock_tx_t *ztx; + int bufnob = 0; /* called holding global lock (read or irq-write) and caller may * not have dropped this lock between finding conn and calling me, * so we don't need the {get,put}connsock dance to deref * ksnc_sock... */ LASSERT(!conn->ksnc_closing); - LASSERT(tx->tx_resid == tx->tx_nob); - - CDEBUG (D_NET, "Sending to "LPX64" ip %d.%d.%d.%d:%d\n", - conn->ksnc_peer->ksnp_nid, + + CDEBUG (D_NET, "Sending to %s ip %d.%d.%d.%d:%d\n", + libcfs_id2str(conn->ksnc_peer->ksnp_id), HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); + tx->tx_checked_zc = 0; + conn->ksnc_proto->pro_pack(tx); + + /* Ensure the frags we've been given EXACTLY match the number of + * bytes we want to send. Many TCP/IP stacks disregard any total + * size parameters passed to them and just look at the frags. + * + * We always expect at least 1 mapped fragment containing the + * complete ksocknal message header. */ + LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) + + lnet_kiov_nob (tx->tx_nkiov, tx->tx_kiov) == tx->tx_nob); + LASSERT (tx->tx_niov >= 1); + LASSERT (tx->tx_resid == tx->tx_nob); + + CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", + tx, (tx->tx_lnetmsg != NULL)? tx->tx_lnetmsg->msg_hdr.type: + KSOCK_MSG_NOOP, + tx->tx_nob, tx->tx_niov, tx->tx_nkiov); + atomic_add (tx->tx_nob, &conn->ksnc_tx_nob); tx->tx_conn = conn; + ksocknal_conn_addref(conn); /* +1 ref for tx */ -#if SOCKNAL_ZC - zccd_init (&tx->tx_zccd, ksocknal_zc_callback); - /* NB this sets 1 ref on zccd, so the callback can only occur after - * I've released this ref. */ -#endif - spin_lock_irqsave (&sched->kss_lock, flags); + /* + * NB Darwin: SOCK_WMEM_QUEUED()->sock_getsockopt() will take + * a blockable lock(socket lock), so SOCK_WMEM_QUEUED can't be + * put in spinlock. + */ + bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock); + spin_lock_bh (&sched->kss_lock); - if (list_empty(&conn->ksnc_tx_queue) && - conn->ksnc_sock->sk->sk_wmem_queued == 0) { + if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) { /* First packet starts the timeout */ - conn->ksnc_tx_deadline = jiffies + - ksocknal_tunables.ksnd_io_timeout * HZ; + conn->ksnc_tx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); conn->ksnc_tx_bufnob = 0; mb(); /* order with adding to tx_queue */ } - list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); - + ztx = NULL; + + if (msg->ksm_type == KSOCK_MSG_NOOP) { + /* The packet is noop ZC ACK, try to piggyback the ack_cookie + * on a normal packet so I don't need to send it */ + LASSERT(msg->ksm_zc_req_cookie == 0); + LASSERT(msg->ksm_zc_ack_cookie != 0); + + if (conn->ksnc_tx_mono != NULL) { + if (ksocknal_piggyback_zcack(conn, msg->ksm_zc_ack_cookie)) { + /* zc-ack cookie is piggybacked */ + atomic_sub (tx->tx_nob, &conn->ksnc_tx_nob); + ztx = tx; /* Put to freelist later */ + } else { + /* no packet can piggyback zc-ack cookie */ + list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); + } + } else { + /* It's the first mono-packet */ + conn->ksnc_tx_mono = tx; + list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); + } + + } else { + /* It's a normal packet - can it piggback a noop zc-ack that + * has been queued already? */ + LASSERT(msg->ksm_zc_ack_cookie == 0); + + if (conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x packet */ + conn->ksnc_tx_mono != NULL) { + if (conn->ksnc_tx_mono->tx_msg.ksm_type == KSOCK_MSG_NOOP) { + /* There is a noop zc-ack can be piggybacked */ + ztx = conn->ksnc_tx_mono; + + msg->ksm_zc_ack_cookie = ztx->tx_msg.ksm_zc_ack_cookie; + ksocknal_next_mono_tx(conn); + + /* use tx to replace the noop zc-ack packet, ztx will + * be put to freelist later */ + list_add(&tx->tx_list, &ztx->tx_list); + list_del(&ztx->tx_list); + + atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob); + } else { + /* no noop zc-ack packet, just enqueue it */ + LASSERT(conn->ksnc_tx_mono->tx_msg.ksm_type == KSOCK_MSG_LNET); + list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); + } + + } else if (conn->ksnc_proto == &ksocknal_protocol_v2x) { + /* it's the first mono-packet, enqueue it */ + conn->ksnc_tx_mono = tx; + list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); + } else { + /* V1.x packet, just enqueue it */ + list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue); + } + } + + if (ztx != NULL) + list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs); + if (conn->ksnc_tx_ready && /* able to send */ !conn->ksnc_tx_scheduled) { /* not scheduled to send */ /* +1 ref for scheduler */ - atomic_inc (&conn->ksnc_refcount); - list_add_tail (&conn->ksnc_tx_list, + ksocknal_conn_addref(conn); + list_add_tail (&conn->ksnc_tx_list, &sched->kss_tx_conns); conn->ksnc_tx_scheduled = 1; - wake_up (&sched->kss_waitq); + cfs_waitq_signal (&sched->kss_waitq); } - spin_unlock_irqrestore (&sched->kss_lock, flags); + spin_unlock_bh (&sched->kss_lock); } ksock_route_t * @@ -908,31 +857,28 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) { struct list_head *tmp; ksock_route_t *route; - int bits; - + list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - bits = route->ksnr_connected; - /* All typed connections established? */ - if ((bits & KSNR_TYPED_ROUTES) == KSNR_TYPED_ROUTES) - continue; + LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); - /* Untyped connection established? */ - if ((bits & (1 << SOCKNAL_CONN_ANY)) != 0) + if (route->ksnr_scheduled) /* connections being established */ continue; - /* connection being established? */ - if (route->ksnr_connecting != 0) + /* all route types connected ? */ + if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0) continue; /* too soon to retry this guy? */ - if (!time_after_eq (jiffies, route->ksnr_timeout)) + if (!(route->ksnr_retry_interval == 0 || /* first attempt */ + cfs_time_aftereq (cfs_time_current(), + route->ksnr_timeout))) continue; - + return (route); } - + return (NULL); } @@ -944,539 +890,264 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - - if (route->ksnr_connecting != 0) + + LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); + + if (route->ksnr_scheduled) return (route); } - + return (NULL); } int -ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid) +ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) { - unsigned long flags; ksock_peer_t *peer; ksock_conn_t *conn; ksock_route_t *route; rwlock_t *g_lock; - - /* Ensure the frags we've been given EXACTLY match the number of - * bytes we want to send. Many TCP/IP stacks disregard any total - * size parameters passed to them and just look at the frags. - * - * We always expect at least 1 mapped fragment containing the - * complete portals header. */ - LASSERT (lib_iov_nob (tx->tx_niov, tx->tx_iov) + - lib_kiov_nob (tx->tx_nkiov, tx->tx_kiov) == tx->tx_nob); - LASSERT (tx->tx_niov >= 1); - LASSERT (tx->tx_iov[0].iov_len >= sizeof (ptl_hdr_t)); - - CDEBUG (D_NET, "packet %p type %d, nob %d niov %d nkiov %d\n", - tx, ((ptl_hdr_t *)tx->tx_iov[0].iov_base)->type, - tx->tx_nob, tx->tx_niov, tx->tx_nkiov); + int retry; + int rc; - tx->tx_conn = NULL; /* only set when assigned a conn */ - tx->tx_resid = tx->tx_nob; - tx->tx_hdr = (ptl_hdr_t *)tx->tx_iov[0].iov_base; + LASSERT (tx->tx_conn == NULL); + LASSERT (tx->tx_lnetmsg != NULL); g_lock = &ksocknal_data.ksnd_global_lock; + + for (retry = 0;; retry = 1) { #if !SOCKNAL_ROUND_ROBIN - read_lock (g_lock); + read_lock (g_lock); + peer = ksocknal_find_peer_locked(ni, id); + if (peer != NULL) { + if (ksocknal_find_connectable_route_locked(peer) == NULL) { + conn = ksocknal_find_conn_locked (tx->tx_lnetmsg->msg_len, peer); + if (conn != NULL) { + /* I've got no routes that need to be + * connecting and I do have an actual + * connection... */ + ksocknal_queue_tx_locked (tx, conn); + read_unlock (g_lock); + return (0); + } + } + } - peer = ksocknal_find_target_peer_locked (tx, nid); - if (peer == NULL) { + /* I'll need a write lock... */ read_unlock (g_lock); - return (-EHOSTUNREACH); - } +#endif + write_lock_bh (g_lock); - if (ksocknal_find_connectable_route_locked(peer) == NULL) { - conn = ksocknal_find_conn_locked (tx, peer); - if (conn != NULL) { - /* I've got no autoconnect routes that need to be - * connecting and I do have an actual connection... */ - ksocknal_queue_tx_locked (tx, conn); - read_unlock (g_lock); - return (0); + peer = ksocknal_find_peer_locked(ni, id); + if (peer != NULL) + break; + + write_unlock_bh (g_lock); + + if ((id.pid & LNET_PID_USERFLAG) != 0) { + CERROR("Refusing to create a connection to " + "userspace process %s\n", libcfs_id2str(id)); + return -EHOSTUNREACH; + } + + if (retry) { + CERROR("Can't find peer %s\n", libcfs_id2str(id)); + return -EHOSTUNREACH; } - } - - /* I'll need a write lock... */ - read_unlock (g_lock); -#endif - write_lock_irqsave(g_lock, flags); - peer = ksocknal_find_target_peer_locked (tx, nid); - if (peer == NULL) { - write_unlock_irqrestore(g_lock, flags); - return (-EHOSTUNREACH); + rc = ksocknal_add_peer(ni, id, + LNET_NIDADDR(id.nid), + lnet_acceptor_port()); + if (rc != 0) { + CERROR("Can't add peer %s: %d\n", + libcfs_id2str(id), rc); + return rc; + } } for (;;) { - /* launch any/all autoconnections that need it */ + /* launch any/all connections that need it */ route = ksocknal_find_connectable_route_locked (peer); if (route == NULL) break; - ksocknal_launch_autoconnect_locked (route); + ksocknal_launch_connection_locked (route); } - conn = ksocknal_find_conn_locked (tx, peer); + conn = ksocknal_find_conn_locked (tx->tx_lnetmsg->msg_len, peer); if (conn != NULL) { /* Connection exists; queue message on it */ ksocknal_queue_tx_locked (tx, conn); - write_unlock_irqrestore (g_lock, flags); + write_unlock_bh (g_lock); return (0); } - route = ksocknal_find_connecting_route_locked (peer); - if (route != NULL) { - /* At least 1 connection is being established; queue the - * message... */ + if (peer->ksnp_accepting > 0 || + ksocknal_find_connecting_route_locked (peer) != NULL) { + /* the message is going to be pinned to the peer */ + tx->tx_deadline = + cfs_time_shift(*ksocknal_tunables.ksnd_timeout); + + /* Queue the message until a connection is established */ list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); - write_unlock_irqrestore (g_lock, flags); - return (0); + write_unlock_bh (g_lock); + return 0; } - - write_unlock_irqrestore (g_lock, flags); + + write_unlock_bh (g_lock); + + /* NB Routes may be ignored if connections to them failed recently */ + CDEBUG(D_NETERROR, "No usable routes to %s\n", libcfs_id2str(id)); return (-EHOSTUNREACH); } -ptl_err_t -ksocknal_sendmsg(lib_nal_t *nal, - void *private, - lib_msg_t *cookie, - ptl_hdr_t *hdr, - int type, - ptl_nid_t nid, - ptl_pid_t pid, - unsigned int payload_niov, - struct iovec *payload_iov, - ptl_kiov_t *payload_kiov, - size_t payload_offset, - size_t payload_nob) +int +ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { - ksock_ltx_t *ltx; - int desc_size; - int rc; + int type = lntmsg->msg_type; + lnet_process_id_t target = lntmsg->msg_target; + unsigned int payload_niov = lntmsg->msg_niov; + struct iovec *payload_iov = lntmsg->msg_iov; + lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + ksock_tx_t *tx; + int desc_size; + int rc; /* NB 'private' is different depending on what we're sending. * Just ignore it... */ - CDEBUG(D_NET, "sending "LPSZ" bytes in %d frags to nid:"LPX64 - " pid %d\n", payload_nob, payload_niov, nid , pid); + CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n", + payload_nob, payload_niov, libcfs_id2str(target)); LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= PTL_MD_MAX_IOV); - - /* It must be OK to kmap() if required */ - LASSERT (payload_kiov == NULL || !in_interrupt ()); + LASSERT (payload_niov <= LNET_MAX_IOV); /* payload is either all vaddrs or all pages */ LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); - + LASSERT (!in_interrupt ()); + if (payload_iov != NULL) - desc_size = offsetof(ksock_ltx_t, ltx_iov[1 + payload_niov]); + desc_size = offsetof(ksock_tx_t, + tx_frags.virt.iov[1 + payload_niov]); else - desc_size = offsetof(ksock_ltx_t, ltx_kiov[payload_niov]); - - if (in_interrupt() || - type == PTL_MSG_ACK || - type == PTL_MSG_REPLY) { - /* Can't block if in interrupt or responding to an incoming - * message */ - PORTAL_ALLOC_ATOMIC(ltx, desc_size); - } else { - PORTAL_ALLOC(ltx, desc_size); - } - - if (ltx == NULL) { - CERROR("Can't allocate tx desc type %d size %d %s\n", - type, desc_size, in_interrupt() ? "(intr)" : ""); - return (PTL_NO_SPACE); + desc_size = offsetof(ksock_tx_t, + tx_frags.paged.kiov[payload_niov]); + + tx = ksocknal_alloc_tx(desc_size); + if (tx == NULL) { + CERROR("Can't allocate tx desc type %d size %d\n", + type, desc_size); + return (-ENOMEM); } - atomic_inc(&ksocknal_data.ksnd_nactive_ltxs); - - ltx->ltx_desc_size = desc_size; - - /* We always have 1 mapped frag for the header */ - ltx->ltx_tx.tx_iov = ltx->ltx_iov; - ltx->ltx_iov[0].iov_base = <x->ltx_hdr; - ltx->ltx_iov[0].iov_len = sizeof(*hdr); - ltx->ltx_hdr = *hdr; - - ltx->ltx_private = private; - ltx->ltx_cookie = cookie; - - ltx->ltx_tx.tx_isfwd = 0; - ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_nob; + tx->tx_conn = NULL; /* set when assigned a conn */ + tx->tx_lnetmsg = lntmsg; if (payload_iov != NULL) { - /* payload is all mapped */ - ltx->ltx_tx.tx_kiov = NULL; - ltx->ltx_tx.tx_nkiov = 0; - - ltx->ltx_tx.tx_niov = - 1 + lib_extract_iov(payload_niov, <x->ltx_iov[1], - payload_niov, payload_iov, - payload_offset, payload_nob); + tx->tx_kiov = NULL; + tx->tx_nkiov = 0; + tx->tx_iov = tx->tx_frags.virt.iov; + tx->tx_niov = 1 + + lnet_extract_iov(payload_niov, &tx->tx_iov[1], + payload_niov, payload_iov, + payload_offset, payload_nob); } else { - /* payload is all pages */ - ltx->ltx_tx.tx_niov = 1; - - ltx->ltx_tx.tx_kiov = ltx->ltx_kiov; - ltx->ltx_tx.tx_nkiov = - lib_extract_kiov(payload_niov, ltx->ltx_kiov, - payload_niov, payload_kiov, - payload_offset, payload_nob); + tx->tx_niov = 1; + tx->tx_iov = &tx->tx_frags.paged.iov; + tx->tx_kiov = tx->tx_frags.paged.kiov; + tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov, + payload_niov, payload_kiov, + payload_offset, payload_nob); } - rc = ksocknal_launch_packet(<x->ltx_tx, nid); - if (rc == 0) - return (PTL_OK); - - ksocknal_free_ltx(ltx); - return (PTL_FAIL); -} + ksocknal_init_msg(&tx->tx_msg, KSOCK_MSG_LNET); -ptl_err_t -ksocknal_send (lib_nal_t *nal, void *private, lib_msg_t *cookie, - ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid, - unsigned int payload_niov, struct iovec *payload_iov, - size_t payload_offset, size_t payload_len) -{ - return (ksocknal_sendmsg(nal, private, cookie, - hdr, type, nid, pid, - payload_niov, payload_iov, NULL, - payload_offset, payload_len)); -} - -ptl_err_t -ksocknal_send_pages (lib_nal_t *nal, void *private, lib_msg_t *cookie, - ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid, - unsigned int payload_niov, ptl_kiov_t *payload_kiov, - size_t payload_offset, size_t payload_len) -{ - return (ksocknal_sendmsg(nal, private, cookie, - hdr, type, nid, pid, - payload_niov, NULL, payload_kiov, - payload_offset, payload_len)); -} + /* The first fragment will be set later in pro_pack */ + rc = ksocknal_launch_packet(ni, tx, target); + if (rc == 0) + return (0); -void -ksocknal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd) -{ - ptl_nid_t nid = fwd->kprfd_gateway_nid; - ksock_ftx_t *ftx = (ksock_ftx_t *)&fwd->kprfd_scratch; - int rc; - - CDEBUG (D_NET, "Forwarding [%p] -> "LPX64" ("LPX64"))\n", fwd, - fwd->kprfd_gateway_nid, fwd->kprfd_target_nid); - - /* I'm the gateway; must be the last hop */ - if (nid == ksocknal_lib.libnal_ni.ni_pid.nid) - nid = fwd->kprfd_target_nid; - - /* setup iov for hdr */ - ftx->ftx_iov.iov_base = fwd->kprfd_hdr; - ftx->ftx_iov.iov_len = sizeof(ptl_hdr_t); - - ftx->ftx_tx.tx_isfwd = 1; /* This is a forwarding packet */ - ftx->ftx_tx.tx_nob = sizeof(ptl_hdr_t) + fwd->kprfd_nob; - ftx->ftx_tx.tx_niov = 1; - ftx->ftx_tx.tx_iov = &ftx->ftx_iov; - ftx->ftx_tx.tx_nkiov = fwd->kprfd_niov; - ftx->ftx_tx.tx_kiov = fwd->kprfd_kiov; - - rc = ksocknal_launch_packet (&ftx->ftx_tx, nid); - if (rc != 0) - kpr_fwd_done (&ksocknal_data.ksnd_router, fwd, rc); + ksocknal_free_tx(tx); + return (-EIO); } int ksocknal_thread_start (int (*fn)(void *arg), void *arg) { - long pid = kernel_thread (fn, arg, 0); - unsigned long flags; + long pid = cfs_kernel_thread (fn, arg, 0); if (pid < 0) return ((int)pid); - write_lock_irqsave(&ksocknal_data.ksnd_global_lock, flags); + write_lock_bh (&ksocknal_data.ksnd_global_lock); ksocknal_data.ksnd_nthreads++; - write_unlock_irqrestore(&ksocknal_data.ksnd_global_lock, flags); + write_unlock_bh (&ksocknal_data.ksnd_global_lock); return (0); } void ksocknal_thread_fini (void) { - unsigned long flags; - - write_lock_irqsave(&ksocknal_data.ksnd_global_lock, flags); + write_lock_bh (&ksocknal_data.ksnd_global_lock); ksocknal_data.ksnd_nthreads--; - write_unlock_irqrestore(&ksocknal_data.ksnd_global_lock, flags); -} - -void -ksocknal_fmb_callback (void *arg, int error) -{ - ksock_fmb_t *fmb = (ksock_fmb_t *)arg; - ksock_fmb_pool_t *fmp = fmb->fmb_pool; - ptl_hdr_t *hdr = &fmb->fmb_hdr; - ksock_conn_t *conn = NULL; - ksock_sched_t *sched; - unsigned long flags; - char ipbuf[PTL_NALFMT_SIZE]; - char ipbuf2[PTL_NALFMT_SIZE]; - - if (error != 0) - CERROR("Failed to route packet from " - LPX64" %s to "LPX64" %s: %d\n", - le64_to_cpu(hdr->src_nid), - portals_nid2str(SOCKNAL, le64_to_cpu(hdr->src_nid), ipbuf), - le64_to_cpu(hdr->dest_nid), - portals_nid2str(SOCKNAL, le64_to_cpu(hdr->dest_nid), ipbuf2), - error); - else - CDEBUG (D_NET, "routed packet from "LPX64" to "LPX64": OK\n", - le64_to_cpu(hdr->src_nid), le64_to_cpu(hdr->dest_nid)); - - /* drop peer ref taken on init */ - ksocknal_put_peer (fmb->fmb_peer); - - spin_lock_irqsave (&fmp->fmp_lock, flags); - - list_add (&fmb->fmb_list, &fmp->fmp_idle_fmbs); - fmp->fmp_nactive_fmbs--; - - if (!list_empty (&fmp->fmp_blocked_conns)) { - conn = list_entry (fmb->fmb_pool->fmp_blocked_conns.next, - ksock_conn_t, ksnc_rx_list); - list_del (&conn->ksnc_rx_list); - } - - spin_unlock_irqrestore (&fmp->fmp_lock, flags); - - if (conn == NULL) - return; - - CDEBUG (D_NET, "Scheduling conn %p\n", conn); - LASSERT (conn->ksnc_rx_scheduled); - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_FMB_SLEEP); - - conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB; - - sched = conn->ksnc_scheduler; - - spin_lock_irqsave (&sched->kss_lock, flags); - - list_add_tail (&conn->ksnc_rx_list, &sched->kss_rx_conns); - wake_up (&sched->kss_waitq); - - spin_unlock_irqrestore (&sched->kss_lock, flags); -} - -ksock_fmb_t * -ksocknal_get_idle_fmb (ksock_conn_t *conn) -{ - int payload_nob = conn->ksnc_rx_nob_left; - unsigned long flags; - ksock_fmb_pool_t *pool; - ksock_fmb_t *fmb; - - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB); - LASSERT (kpr_routing(&ksocknal_data.ksnd_router)); - - if (payload_nob <= SOCKNAL_SMALL_FWD_PAGES * PAGE_SIZE) - pool = &ksocknal_data.ksnd_small_fmp; - else - pool = &ksocknal_data.ksnd_large_fmp; - - spin_lock_irqsave (&pool->fmp_lock, flags); - - if (!list_empty (&pool->fmp_idle_fmbs)) { - fmb = list_entry(pool->fmp_idle_fmbs.next, - ksock_fmb_t, fmb_list); - list_del (&fmb->fmb_list); - pool->fmp_nactive_fmbs++; - spin_unlock_irqrestore (&pool->fmp_lock, flags); - - return (fmb); - } - - /* deschedule until fmb free */ - - conn->ksnc_rx_state = SOCKNAL_RX_FMB_SLEEP; - - list_add_tail (&conn->ksnc_rx_list, - &pool->fmp_blocked_conns); - - spin_unlock_irqrestore (&pool->fmp_lock, flags); - return (NULL); + write_unlock_bh (&ksocknal_data.ksnd_global_lock); } int -ksocknal_init_fmb (ksock_conn_t *conn, ksock_fmb_t *fmb) -{ - int payload_nob = conn->ksnc_rx_nob_left; - ptl_nid_t dest_nid = le64_to_cpu(conn->ksnc_hdr.dest_nid); - int niov = 0; - int nob = payload_nob; - - LASSERT (conn->ksnc_rx_scheduled); - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB); - LASSERT (conn->ksnc_rx_nob_wanted == conn->ksnc_rx_nob_left); - LASSERT (payload_nob >= 0); - LASSERT (payload_nob <= fmb->fmb_pool->fmp_buff_pages * PAGE_SIZE); - LASSERT (sizeof (ptl_hdr_t) < PAGE_SIZE); - LASSERT (fmb->fmb_kiov[0].kiov_offset == 0); - - /* Take a ref on the conn's peer to prevent module unload before - * forwarding completes. */ - fmb->fmb_peer = conn->ksnc_peer; - atomic_inc (&conn->ksnc_peer->ksnp_refcount); - - /* Copy the header we just read into the forwarding buffer. If - * there's payload, start reading reading it into the buffer, - * otherwise the forwarding buffer can be kicked off - * immediately. */ - fmb->fmb_hdr = conn->ksnc_hdr; - - while (nob > 0) { - LASSERT (niov < fmb->fmb_pool->fmp_buff_pages); - LASSERT (fmb->fmb_kiov[niov].kiov_offset == 0); - fmb->fmb_kiov[niov].kiov_len = MIN (PAGE_SIZE, nob); - nob -= PAGE_SIZE; - niov++; - } - - kpr_fwd_init(&fmb->fmb_fwd, dest_nid, &fmb->fmb_hdr, - payload_nob, niov, fmb->fmb_kiov, - ksocknal_fmb_callback, fmb); - - if (payload_nob == 0) { /* got complete packet already */ - CDEBUG (D_NET, "%p "LPX64"->"LPX64" fwd_start (immediate)\n", - conn, le64_to_cpu(conn->ksnc_hdr.src_nid), dest_nid); - - kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd); - - ksocknal_new_packet (conn, 0); /* on to next packet */ - return (1); - } - - conn->ksnc_cookie = fmb; /* stash fmb for later */ - conn->ksnc_rx_state = SOCKNAL_RX_BODY_FWD; /* read in the payload */ - - /* Set up conn->ksnc_rx_kiov to read the payload into fmb's kiov-ed - * buffer */ - LASSERT (niov <= sizeof(conn->ksnc_rx_iov_space)/sizeof(ptl_kiov_t)); - - conn->ksnc_rx_niov = 0; - conn->ksnc_rx_nkiov = niov; - conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; - memcpy(conn->ksnc_rx_kiov, fmb->fmb_kiov, niov * sizeof(ptl_kiov_t)); - - CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d reading body\n", conn, - le64_to_cpu(conn->ksnc_hdr.src_nid), dest_nid, payload_nob); - return (0); -} - -void -ksocknal_fwd_parse (ksock_conn_t *conn) +ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) { - ksock_peer_t *peer; - ptl_nid_t dest_nid = le64_to_cpu(conn->ksnc_hdr.dest_nid); - ptl_nid_t src_nid = le64_to_cpu(conn->ksnc_hdr.src_nid); - int body_len = le32_to_cpu(conn->ksnc_hdr.payload_length); - char str[PTL_NALFMT_SIZE]; - char str2[PTL_NALFMT_SIZE]; - - CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d parsing header\n", conn, - src_nid, dest_nid, conn->ksnc_rx_nob_left); - - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER); - LASSERT (conn->ksnc_rx_scheduled); - - if (body_len < 0) { /* length corrupt (overflow) */ - CERROR("dropping packet from "LPX64" (%s) for "LPX64" (%s): " - "packet size %d illegal\n", - src_nid, portals_nid2str(TCPNAL, src_nid, str), - dest_nid, portals_nid2str(TCPNAL, dest_nid, str2), - body_len); + static char ksocknal_slop_buffer[4096]; - ksocknal_new_packet (conn, 0); /* on to new packet */ - return; - } + int nob; + unsigned int niov; + int skipped; - if (!kpr_routing(&ksocknal_data.ksnd_router)) { /* not forwarding */ - CERROR("dropping packet from "LPX64" (%s) for "LPX64 - " (%s): not forwarding\n", - src_nid, portals_nid2str(TCPNAL, src_nid, str), - dest_nid, portals_nid2str(TCPNAL, dest_nid, str2)); - /* on to new packet (skip this one's body) */ - ksocknal_new_packet (conn, body_len); - return; - } + LASSERT(conn->ksnc_proto != NULL); - if (body_len > PTL_MTU) { /* too big to forward */ - CERROR ("dropping packet from "LPX64" (%s) for "LPX64 - "(%s): packet size %d too big\n", - src_nid, portals_nid2str(TCPNAL, src_nid, str), - dest_nid, portals_nid2str(TCPNAL, dest_nid, str2), - body_len); - /* on to new packet (skip this one's body) */ - ksocknal_new_packet (conn, body_len); - return; + if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) { + /* Remind the socket to ack eagerly... */ + ksocknal_lib_eager_ack(conn); } - /* should have gone direct */ - peer = ksocknal_get_peer (conn->ksnc_hdr.dest_nid); - if (peer != NULL) { - CERROR ("dropping packet from "LPX64" (%s) for "LPX64 - "(%s): target is a peer\n", - src_nid, portals_nid2str(TCPNAL, src_nid, str), - dest_nid, portals_nid2str(TCPNAL, dest_nid, str2)); - ksocknal_put_peer (peer); /* drop ref from get above */ + if (nob_to_skip == 0) { /* right at next packet boundary now */ + conn->ksnc_rx_started = 0; + mb (); /* racing with timeout thread */ - /* on to next packet (skip this one's body) */ - ksocknal_new_packet (conn, body_len); - return; - } + switch (conn->ksnc_proto->pro_version) { + case KSOCK_PROTO_V2: + conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER; + conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space; + conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg; - conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB; /* Getting FMB now */ - conn->ksnc_rx_nob_left = body_len; /* stash packet size */ - conn->ksnc_rx_nob_wanted = body_len; /* (no slop) */ -} - -int -ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) -{ - static char ksocknal_slop_buffer[4096]; + conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u); + conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u); + conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u); + break; - int nob; - int niov; - int skipped; + case KSOCK_PROTO_V1: + /* Receiving bare lnet_hdr_t */ + conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER; + conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t); + conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t); - if (nob_to_skip == 0) { /* right at next packet boundary now */ - conn->ksnc_rx_started = 0; - mb (); /* racing with timeout thread */ - - conn->ksnc_rx_state = SOCKNAL_RX_HEADER; - conn->ksnc_rx_nob_wanted = sizeof (ptl_hdr_t); - conn->ksnc_rx_nob_left = sizeof (ptl_hdr_t); + conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space; + conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg; + conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); + break; - conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space; - conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_hdr; - conn->ksnc_rx_iov[0].iov_len = sizeof (ptl_hdr_t); + default: + LBUG (); + } conn->ksnc_rx_niov = 1; conn->ksnc_rx_kiov = NULL; conn->ksnc_rx_nkiov = 0; + conn->ksnc_rx_csum = ~0; return (1); } - /* Set up to skip as much a possible now. If there's more left + /* Set up to skip as much as possible now. If there's more left * (ran out of iov entries) we'll get called again */ conn->ksnc_rx_state = SOCKNAL_RX_SLOP; @@ -1504,128 +1175,267 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) return (0); } -int -ksocknal_process_receive (ksock_conn_t *conn) +/* (Sink) handle incoming ZC request from sender */ +static int +ksocknal_handle_zc_req(ksock_peer_t *peer, __u64 cookie) { - ksock_fmb_t *fmb; - int rc; - - LASSERT (atomic_read (&conn->ksnc_refcount) > 0); + ksock_conn_t *conn; + ksock_tx_t *tx; + ksock_sched_t *sched; + int rc; - /* doesn't need a forwarding buffer */ - if (conn->ksnc_rx_state != SOCKNAL_RX_GET_FMB) - goto try_read; + read_lock (&ksocknal_data.ksnd_global_lock); - get_fmb: - fmb = ksocknal_get_idle_fmb (conn); - if (fmb == NULL) { - /* conn descheduled waiting for idle fmb */ - return (0); + conn = ksocknal_find_conn_locked (0, peer); + if (conn == NULL) { + read_unlock (&ksocknal_data.ksnd_global_lock); + CERROR("Can't find connection to send zcack.\n"); + return -ECONNRESET; } - if (ksocknal_init_fmb (conn, fmb)) { - /* packet forwarded */ - return (0); + sched = conn->ksnc_scheduler; + + spin_lock_bh (&sched->kss_lock); + rc = ksocknal_piggyback_zcack(conn, cookie); + spin_unlock_bh (&sched->kss_lock); + + read_unlock (&ksocknal_data.ksnd_global_lock); + if (rc) { + /* Ack cookie is piggybacked */ + return 0; } - try_read: - /* NB: sched lock NOT held */ - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER || - conn->ksnc_rx_state == SOCKNAL_RX_BODY || - conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD || - conn->ksnc_rx_state == SOCKNAL_RX_SLOP); + tx = ksocknal_alloc_tx(KSOCK_NOOP_TX_SIZE); + if (tx == NULL) { + CERROR("Can't allocate noop tx desc\n"); + return -ENOMEM; + } - LASSERT (conn->ksnc_rx_nob_wanted > 0); + tx->tx_conn = NULL; + tx->tx_lnetmsg = NULL; + tx->tx_kiov = NULL; + tx->tx_nkiov = 0; + tx->tx_iov = tx->tx_frags.virt.iov; + tx->tx_niov = 1; - rc = ksocknal_receive(conn); + ksocknal_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); + tx->tx_msg.ksm_zc_ack_cookie = cookie; /* incoming cookie */ - if (rc <= 0) { - LASSERT (rc != -EAGAIN); + read_lock (&ksocknal_data.ksnd_global_lock); - if (rc == 0) - CWARN ("[%p] EOF from "LPX64" ip %d.%d.%d.%d:%d\n", - conn, conn->ksnc_peer->ksnp_nid, - HIPQUAD(conn->ksnc_ipaddr), - conn->ksnc_port); - else if (!conn->ksnc_closing) - CERROR ("[%p] Error %d on read from "LPX64 - " ip %d.%d.%d.%d:%d\n", - conn, rc, conn->ksnc_peer->ksnp_nid, - HIPQUAD(conn->ksnc_ipaddr), - conn->ksnc_port); + conn = ksocknal_find_conn_locked (0, peer); + if (conn == NULL) { + read_unlock (&ksocknal_data.ksnd_global_lock); + ksocknal_free_tx(tx); + CERROR("Can't find connection to send zcack.\n"); + return -ECONNRESET; + } + ksocknal_queue_tx_locked(tx, conn); + + read_unlock (&ksocknal_data.ksnd_global_lock); + + return 0; +} + +/* (Sender) handle ZC_ACK from sink */ +static int +ksocknal_handle_zc_ack(ksock_peer_t *peer, __u64 cookie) +{ + ksock_tx_t *tx; + struct list_head *ctmp; + + spin_lock(&peer->ksnp_lock); - ksocknal_close_conn_and_siblings (conn, rc); - return (rc == 0 ? -ESHUTDOWN : rc); + list_for_each(ctmp, &peer->ksnp_zc_req_list) { + tx = list_entry (ctmp, ksock_tx_t, tx_zc_list); + if (tx->tx_msg.ksm_zc_req_cookie != cookie) + continue; + + tx->tx_msg.ksm_zc_req_cookie = 0; + list_del(&tx->tx_zc_list); + + spin_unlock(&peer->ksnp_lock); + + ksocknal_tx_decref(tx); + return 0; } + spin_unlock(&peer->ksnp_lock); + return -EPROTO; +} + +int +ksocknal_process_receive (ksock_conn_t *conn) +{ + int rc; + + LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); + + /* NB: sched lock NOT held */ + /* SOCKNAL_RX_LNET_HEADER is here for backward compatability */ + LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || + conn->ksnc_rx_state == SOCKNAL_RX_SLOP); + again: if (conn->ksnc_rx_nob_wanted != 0) { - /* short read */ - return (-EAGAIN); + rc = ksocknal_receive(conn); + + if (rc <= 0) { + LASSERT (rc != -EAGAIN); + + if (rc == 0) + CDEBUG (D_NET, "[%p] EOF from %s" + " ip %d.%d.%d.%d:%d\n", conn, + libcfs_id2str(conn->ksnc_peer->ksnp_id), + HIPQUAD(conn->ksnc_ipaddr), + conn->ksnc_port); + else if (!conn->ksnc_closing) + CERROR ("[%p] Error %d on read from %s" + " ip %d.%d.%d.%d:%d\n", + conn, rc, + libcfs_id2str(conn->ksnc_peer->ksnp_id), + HIPQUAD(conn->ksnc_ipaddr), + conn->ksnc_port); + + /* it's not an error if conn is being closed */ + ksocknal_close_conn_and_siblings (conn, + (conn->ksnc_closing) ? 0 : rc); + return (rc == 0 ? -ESHUTDOWN : rc); + } + + if (conn->ksnc_rx_nob_wanted != 0) { + /* short read */ + return (-EAGAIN); + } } - switch (conn->ksnc_rx_state) { - case SOCKNAL_RX_HEADER: - if (conn->ksnc_hdr.type != cpu_to_le32(PTL_MSG_HELLO) && - le64_to_cpu(conn->ksnc_hdr.dest_nid) != - ksocknal_lib.libnal_ni.ni_pid.nid) { - /* This packet isn't for me */ - ksocknal_fwd_parse (conn); - switch (conn->ksnc_rx_state) { - case SOCKNAL_RX_HEADER: /* skipped (zero payload) */ - return (0); /* => come back later */ - case SOCKNAL_RX_SLOP: /* skipping packet's body */ - goto try_read; /* => go read it */ - case SOCKNAL_RX_GET_FMB: /* forwarding */ - goto get_fmb; /* => go get a fwd msg buffer */ - default: - LBUG (); + case SOCKNAL_RX_KSM_HEADER: + if (conn->ksnc_flip) { + __swab32s(&conn->ksnc_msg.ksm_type); + __swab32s(&conn->ksnc_msg.ksm_csum); + __swab64s(&conn->ksnc_msg.ksm_zc_req_cookie); + __swab64s(&conn->ksnc_msg.ksm_zc_ack_cookie); + } + + if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP && + conn->ksnc_msg.ksm_csum != 0 && /* has checksum */ + conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) { + /* NOOP Checksum error */ + CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n", + libcfs_id2str(conn->ksnc_peer->ksnp_id), + conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum); + ksocknal_new_packet(conn, 0); + ksocknal_close_conn_and_siblings(conn, -EPROTO); + return (-EIO); + } + + if (conn->ksnc_msg.ksm_zc_ack_cookie != 0) { + LASSERT(conn->ksnc_proto == &ksocknal_protocol_v2x); + + rc = ksocknal_handle_zc_ack(conn->ksnc_peer, + conn->ksnc_msg.ksm_zc_ack_cookie); + if (rc != 0) { + CERROR("%s: Unknown zero copy ACK cookie: "LPU64"\n", + libcfs_id2str(conn->ksnc_peer->ksnp_id), + conn->ksnc_msg.ksm_zc_ack_cookie); + ksocknal_new_packet(conn, 0); + ksocknal_close_conn_and_siblings(conn, -EPROTO); + return (rc); } - /* Not Reached */ } - /* sets wanted_len, iovs etc */ - rc = lib_parse(&ksocknal_lib, &conn->ksnc_hdr, conn); + if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) { + ksocknal_new_packet (conn, 0); + return 0; /* NOOP is done and just return */ + } + LASSERT (conn->ksnc_msg.ksm_type == KSOCK_MSG_LNET); - if (rc != PTL_OK) { + conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER; + conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t); + conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t); + + conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space; + conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg; + conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t); + + conn->ksnc_rx_niov = 1; + conn->ksnc_rx_kiov = NULL; + conn->ksnc_rx_nkiov = 0; + + goto again; /* read lnet header now */ + + case SOCKNAL_RX_LNET_HEADER: + /* unpack message header */ + conn->ksnc_proto->pro_unpack(&conn->ksnc_msg); + + if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { + /* Userspace peer */ + lnet_process_id_t *id = &conn->ksnc_peer->ksnp_id; + lnet_hdr_t *lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; + + /* Substitute process ID assigned at connection time */ + lhdr->src_pid = cpu_to_le32(id->pid); + lhdr->src_nid = cpu_to_le64(id->nid); + } + + conn->ksnc_rx_state = SOCKNAL_RX_PARSE; + ksocknal_conn_addref(conn); /* ++ref while parsing */ + + rc = lnet_parse(conn->ksnc_peer->ksnp_ni, + &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr, + conn->ksnc_peer->ksnp_id.nid, conn, 0); + if (rc < 0) { /* I just received garbage: give up on this conn */ + ksocknal_new_packet(conn, 0); ksocknal_close_conn_and_siblings (conn, rc); + ksocknal_conn_decref(conn); return (-EPROTO); } - if (conn->ksnc_rx_nob_wanted != 0) { /* need to get payload? */ - conn->ksnc_rx_state = SOCKNAL_RX_BODY; - goto try_read; /* go read the payload */ - } - /* Fall through (completed packet for me) */ + /* I'm racing with ksocknal_recv() */ + LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD); - case SOCKNAL_RX_BODY: - /* payload all received */ - lib_finalize(&ksocknal_lib, NULL, conn->ksnc_cookie, PTL_OK); - /* Fall through */ + if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD) + return 0; - case SOCKNAL_RX_SLOP: - /* starting new packet? */ - if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left)) - return (0); /* come back later */ - goto try_read; /* try to finish reading slop now */ + /* ksocknal_recv() got called */ + goto again; - case SOCKNAL_RX_BODY_FWD: + case SOCKNAL_RX_LNET_PAYLOAD: /* payload all received */ - CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d fwd_start (got body)\n", - conn, le64_to_cpu(conn->ksnc_hdr.src_nid), - le64_to_cpu(conn->ksnc_hdr.dest_nid), - conn->ksnc_rx_nob_left); + rc = 0; - /* forward the packet. NB ksocknal_init_fmb() put fmb into - * conn->ksnc_cookie */ - fmb = (ksock_fmb_t *)conn->ksnc_cookie; - kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd); + if (conn->ksnc_rx_nob_left == 0 && /* not truncating */ + conn->ksnc_msg.ksm_csum != 0 && /* has checksum */ + conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) { + CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n", + libcfs_id2str(conn->ksnc_peer->ksnp_id), + conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum); + rc = -EIO; + } + + lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc); - /* no slop in forwarded packets */ - LASSERT (conn->ksnc_rx_nob_left == 0); + if (rc == 0 && conn->ksnc_msg.ksm_zc_req_cookie != 0) { + LASSERT(conn->ksnc_proto == &ksocknal_protocol_v2x); + rc = ksocknal_handle_zc_req(conn->ksnc_peer, + conn->ksnc_msg.ksm_zc_req_cookie); + } + + if (rc != 0) { + ksocknal_new_packet(conn, 0); + ksocknal_close_conn_and_siblings (conn, rc); + return (-EPROTO); + } + /* Fall through */ - ksocknal_new_packet (conn, 0); /* on to next packet */ - return (0); /* (later) */ + case SOCKNAL_RX_SLOP: + /* starting new packet? */ + if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left)) + return 0; /* come back later */ + goto again; /* try to finish reading slop now */ default: break; @@ -1636,78 +1446,76 @@ ksocknal_process_receive (ksock_conn_t *conn) return (-EINVAL); /* keep gcc happy */ } -ptl_err_t -ksocknal_recv (lib_nal_t *nal, void *private, lib_msg_t *msg, - unsigned int niov, struct iovec *iov, - size_t offset, size_t mlen, size_t rlen) +int +ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, + unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { - ksock_conn_t *conn = (ksock_conn_t *)private; + ksock_conn_t *conn = (ksock_conn_t *)private; + ksock_sched_t *sched = conn->ksnc_scheduler; LASSERT (mlen <= rlen); - LASSERT (niov <= PTL_MD_MAX_IOV); - + LASSERT (niov <= LNET_MAX_IOV); + conn->ksnc_cookie = msg; conn->ksnc_rx_nob_wanted = mlen; conn->ksnc_rx_nob_left = rlen; - conn->ksnc_rx_nkiov = 0; - conn->ksnc_rx_kiov = NULL; - conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov; - conn->ksnc_rx_niov = - lib_extract_iov(PTL_MD_MAX_IOV, conn->ksnc_rx_iov, - niov, iov, offset, mlen); + if (mlen == 0 || iov != NULL) { + conn->ksnc_rx_nkiov = 0; + conn->ksnc_rx_kiov = NULL; + conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov; + conn->ksnc_rx_niov = + lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov, + niov, iov, offset, mlen); + } else { + conn->ksnc_rx_niov = 0; + conn->ksnc_rx_iov = NULL; + conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; + conn->ksnc_rx_nkiov = + lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov, + niov, kiov, offset, mlen); + } - LASSERT (mlen == - lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) + - lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov)); + LASSERT (mlen == + lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) + + lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov)); - return (PTL_OK); -} + LASSERT (conn->ksnc_rx_scheduled); -ptl_err_t -ksocknal_recv_pages (lib_nal_t *nal, void *private, lib_msg_t *msg, - unsigned int niov, ptl_kiov_t *kiov, - size_t offset, size_t mlen, size_t rlen) -{ - ksock_conn_t *conn = (ksock_conn_t *)private; + spin_lock_bh (&sched->kss_lock); - LASSERT (mlen <= rlen); - LASSERT (niov <= PTL_MD_MAX_IOV); - - conn->ksnc_cookie = msg; - conn->ksnc_rx_nob_wanted = mlen; - conn->ksnc_rx_nob_left = rlen; + switch (conn->ksnc_rx_state) { + case SOCKNAL_RX_PARSE_WAIT: + list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); + cfs_waitq_signal (&sched->kss_waitq); + LASSERT (conn->ksnc_rx_ready); + break; - conn->ksnc_rx_niov = 0; - conn->ksnc_rx_iov = NULL; - conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; - conn->ksnc_rx_nkiov = - lib_extract_kiov(PTL_MD_MAX_IOV, conn->ksnc_rx_kiov, - niov, kiov, offset, mlen); + case SOCKNAL_RX_PARSE: + /* scheduler hasn't noticed I'm parsing yet */ + break; + } - LASSERT (mlen == - lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) + - lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov)); + conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD; - return (PTL_OK); + spin_unlock_bh (&sched->kss_lock); + ksocknal_conn_decref(conn); + return (0); } static inline int ksocknal_sched_cansleep(ksock_sched_t *sched) { - unsigned long flags; int rc; - spin_lock_irqsave(&sched->kss_lock, flags); + spin_lock_bh (&sched->kss_lock); rc = (!ksocknal_data.ksnd_shuttingdown && -#if SOCKNAL_ZC - list_empty(&sched->kss_zctxdone_list) && -#endif list_empty(&sched->kss_rx_conns) && list_empty(&sched->kss_tx_conns)); - - spin_unlock_irqrestore(&sched->kss_lock, flags); + + spin_unlock_bh (&sched->kss_lock); return (rc); } @@ -1716,28 +1524,27 @@ int ksocknal_scheduler (void *arg) ksock_sched_t *sched = (ksock_sched_t *)arg; ksock_conn_t *conn; ksock_tx_t *tx; - unsigned long flags; int rc; int nloops = 0; int id = sched - ksocknal_data.ksnd_schedulers; char name[16]; - snprintf (name, sizeof (name),"ksocknald_%02d", id); - kportal_daemonize (name); - kportal_blockallsigs (); + snprintf (name, sizeof (name),"socknal_sd%02d", id); + cfs_daemonize (name); + cfs_block_allsigs (); -#if (CONFIG_SMP && CPU_AFFINITY) +#if defined(CONFIG_SMP) && defined(CPU_AFFINITY) id = ksocknal_sched2cpu(id); if (cpu_online(id)) { - cpumask_t m; + cpumask_t m = CPU_MASK_NONE; cpu_set(id, m); set_cpus_allowed(current, m); } else { CERROR ("Can't set CPU affinity for %s to %d\n", name, id); } #endif /* CONFIG_SMP && CPU_AFFINITY */ - - spin_lock_irqsave (&sched->kss_lock, flags); + + spin_lock_bh (&sched->kss_lock); while (!ksocknal_data.ksnd_shuttingdown) { int did_something = 0; @@ -1757,11 +1564,11 @@ int ksocknal_scheduler (void *arg) * data_ready can set it any time after we release * kss_lock. */ conn->ksnc_rx_ready = 0; - spin_unlock_irqrestore(&sched->kss_lock, flags); - + spin_unlock_bh (&sched->kss_lock); + rc = ksocknal_process_receive(conn); - - spin_lock_irqsave(&sched->kss_lock, flags); + + spin_lock_bh (&sched->kss_lock); /* I'm the only one that can clear this flag */ LASSERT(conn->ksnc_rx_scheduled); @@ -1769,14 +1576,12 @@ int ksocknal_scheduler (void *arg) /* Did process_receive get everything it wanted? */ if (rc == 0) conn->ksnc_rx_ready = 1; - - if (conn->ksnc_rx_state == SOCKNAL_RX_FMB_SLEEP || - conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB) { - /* Conn blocked for a forwarding buffer. - * It will get queued for my attention when - * one becomes available (and it might just - * already have been!). Meanwhile my ref - * on it stays put. */ + + if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) { + /* Conn blocked waiting for ksocknal_recv() + * I change its state (under lock) to signal + * it can be rescheduled */ + conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT; } else if (conn->ksnc_rx_ready) { /* reschedule for rx */ list_add_tail (&conn->ksnc_rx_list, @@ -1784,42 +1589,62 @@ int ksocknal_scheduler (void *arg) } else { conn->ksnc_rx_scheduled = 0; /* drop my ref */ - ksocknal_put_conn(conn); + ksocknal_conn_decref(conn); } did_something = 1; } if (!list_empty (&sched->kss_tx_conns)) { + CFS_LIST_HEAD (zlist); + + if (!list_empty(&sched->kss_zombie_noop_txs)) { + list_add(&zlist, &sched->kss_zombie_noop_txs); + list_del_init(&sched->kss_zombie_noop_txs); + } + conn = list_entry(sched->kss_tx_conns.next, ksock_conn_t, ksnc_tx_list); list_del (&conn->ksnc_tx_list); - + LASSERT(conn->ksnc_tx_scheduled); LASSERT(conn->ksnc_tx_ready); LASSERT(!list_empty(&conn->ksnc_tx_queue)); - + tx = list_entry(conn->ksnc_tx_queue.next, ksock_tx_t, tx_list); + + if (conn->ksnc_tx_mono == tx) + ksocknal_next_mono_tx(conn); + /* dequeue now so empty list => more to send */ list_del(&tx->tx_list); - + /* Clear tx_ready in case send isn't complete. Do * it BEFORE we call process_transmit, since * write_space can set it any time after we release * kss_lock. */ conn->ksnc_tx_ready = 0; - spin_unlock_irqrestore (&sched->kss_lock, flags); + spin_unlock_bh (&sched->kss_lock); - rc = ksocknal_process_transmit(conn, tx); + if (!list_empty(&zlist)) { + /* free zombie noop txs, it's fast because + * noop txs are just put in freelist */ + ksocknal_txlist_done(NULL, &zlist, 0); + } - spin_lock_irqsave (&sched->kss_lock, flags); + rc = ksocknal_process_transmit(conn, tx); if (rc == -ENOMEM || rc == -EAGAIN) { /* Incomplete send: replace tx on HEAD of tx_queue */ + spin_lock_bh (&sched->kss_lock); list_add (&tx->tx_list, &conn->ksnc_tx_queue); } else { - /* Complete send; assume space for more */ + /* Complete send; tx -ref */ + ksocknal_tx_decref (tx); + + spin_lock_bh (&sched->kss_lock); + /* assume space for more */ conn->ksnc_tx_ready = 1; } @@ -1829,802 +1654,771 @@ int ksocknal_scheduler (void *arg) } else if (conn->ksnc_tx_ready && !list_empty (&conn->ksnc_tx_queue)) { /* reschedule for tx */ - list_add_tail (&conn->ksnc_tx_list, + list_add_tail (&conn->ksnc_tx_list, &sched->kss_tx_conns); } else { conn->ksnc_tx_scheduled = 0; /* drop my ref */ - ksocknal_put_conn (conn); + ksocknal_conn_decref(conn); } - - did_something = 1; - } -#if SOCKNAL_ZC - if (!list_empty (&sched->kss_zctxdone_list)) { - ksock_tx_t *tx = - list_entry(sched->kss_zctxdone_list.next, - ksock_tx_t, tx_list); - did_something = 1; - - list_del (&tx->tx_list); - spin_unlock_irqrestore (&sched->kss_lock, flags); - ksocknal_tx_done (tx, 1); - - spin_lock_irqsave (&sched->kss_lock, flags); + did_something = 1; } -#endif if (!did_something || /* nothing to do */ ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */ - spin_unlock_irqrestore (&sched->kss_lock, flags); + spin_unlock_bh (&sched->kss_lock); nloops = 0; if (!did_something) { /* wait for something to do */ - rc = wait_event_interruptible (sched->kss_waitq, - !ksocknal_sched_cansleep(sched)); + rc = wait_event_interruptible_exclusive( + sched->kss_waitq, + !ksocknal_sched_cansleep(sched)); LASSERT (rc == 0); - } else - our_cond_resched(); + } else { + our_cond_resched(); + } - spin_lock_irqsave (&sched->kss_lock, flags); + spin_lock_bh (&sched->kss_lock); } } - spin_unlock_irqrestore (&sched->kss_lock, flags); + spin_unlock_bh (&sched->kss_lock); ksocknal_thread_fini (); return (0); } -void -ksocknal_data_ready (struct sock *sk, int n) +/* + * Add connection to kss_rx_conns of scheduler + * and wakeup the scheduler. + */ +void ksocknal_read_callback (ksock_conn_t *conn) { - unsigned long flags; - ksock_conn_t *conn; ksock_sched_t *sched; ENTRY; - /* interleave correctly with closing sockets... */ - read_lock (&ksocknal_data.ksnd_global_lock); - - conn = sk->sk_user_data; - if (conn == NULL) { /* raced with ksocknal_terminate_conn */ - LASSERT (sk->sk_data_ready != &ksocknal_data_ready); - sk->sk_data_ready (sk, n); - } else { - sched = conn->ksnc_scheduler; + sched = conn->ksnc_scheduler; - spin_lock_irqsave (&sched->kss_lock, flags); + spin_lock_bh (&sched->kss_lock); - conn->ksnc_rx_ready = 1; + conn->ksnc_rx_ready = 1; - if (!conn->ksnc_rx_scheduled) { /* not being progressed */ - list_add_tail(&conn->ksnc_rx_list, - &sched->kss_rx_conns); - conn->ksnc_rx_scheduled = 1; - /* extra ref for scheduler */ - atomic_inc (&conn->ksnc_refcount); + if (!conn->ksnc_rx_scheduled) { /* not being progressed */ + list_add_tail(&conn->ksnc_rx_list, + &sched->kss_rx_conns); + conn->ksnc_rx_scheduled = 1; + /* extra ref for scheduler */ + ksocknal_conn_addref(conn); - wake_up (&sched->kss_waitq); - } - - spin_unlock_irqrestore (&sched->kss_lock, flags); + cfs_waitq_signal (&sched->kss_waitq); } - - read_unlock (&ksocknal_data.ksnd_global_lock); + spin_unlock_bh (&sched->kss_lock); EXIT; } -void -ksocknal_write_space (struct sock *sk) +/* + * Add connection to kss_tx_conns of scheduler + * and wakeup the scheduler. + */ +void ksocknal_write_callback (ksock_conn_t *conn) { - unsigned long flags; - ksock_conn_t *conn; ksock_sched_t *sched; + ENTRY; - /* interleave correctly with closing sockets... */ - read_lock (&ksocknal_data.ksnd_global_lock); + sched = conn->ksnc_scheduler; - conn = sk->sk_user_data; + spin_lock_bh (&sched->kss_lock); - CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n", - sk, tcp_wspace(sk), SOCKNAL_TX_LOW_WATER(sk), conn, - (conn == NULL) ? "" : (conn->ksnc_tx_ready ? - " ready" : " blocked"), - (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ? - " scheduled" : " idle"), - (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ? - " empty" : " queued")); + conn->ksnc_tx_ready = 1; - if (conn == NULL) { /* raced with ksocknal_terminate_conn */ - LASSERT (sk->sk_write_space != &ksocknal_write_space); - sk->sk_write_space (sk); + if (!conn->ksnc_tx_scheduled && // not being progressed + !list_empty(&conn->ksnc_tx_queue)){//packets to send + list_add_tail (&conn->ksnc_tx_list, + &sched->kss_tx_conns); + conn->ksnc_tx_scheduled = 1; + /* extra ref for scheduler */ + ksocknal_conn_addref(conn); - read_unlock (&ksocknal_data.ksnd_global_lock); - return; + cfs_waitq_signal (&sched->kss_waitq); } - if (tcp_wspace(sk) >= SOCKNAL_TX_LOW_WATER(sk)) { /* got enough space */ - sched = conn->ksnc_scheduler; + spin_unlock_bh (&sched->kss_lock); - spin_lock_irqsave (&sched->kss_lock, flags); + EXIT; +} - clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags); - conn->ksnc_tx_ready = 1; +ksock_proto_t * +ksocknal_parse_proto_version (ksock_hello_msg_t *hello) +{ + if ((hello->kshm_magic == LNET_PROTO_MAGIC && + hello->kshm_version == KSOCK_PROTO_V2) || + (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC) && + hello->kshm_version == __swab32(KSOCK_PROTO_V2))) { +#if SOCKNAL_VERSION_DEBUG + if (*ksocknal_tunables.ksnd_protocol != 2) + return NULL; +#endif + return &ksocknal_protocol_v2x; + } - if (!conn->ksnc_tx_scheduled && // not being progressed - !list_empty(&conn->ksnc_tx_queue)){//packets to send - list_add_tail (&conn->ksnc_tx_list, - &sched->kss_tx_conns); - conn->ksnc_tx_scheduled = 1; - /* extra ref for scheduler */ - atomic_inc (&conn->ksnc_refcount); + if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { + lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello; - wake_up (&sched->kss_waitq); - } + CLASSERT (sizeof (lnet_magicversion_t) == + offsetof (ksock_hello_msg_t, kshm_src_nid)); - spin_unlock_irqrestore (&sched->kss_lock, flags); + if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) && + hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR)) + return &ksocknal_protocol_v1x; } - read_unlock (&ksocknal_data.ksnd_global_lock); + return NULL; } -int -ksocknal_sock_write (struct socket *sock, void *buffer, int nob) +static int +ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello) { - int rc; - mm_segment_t oldmm = get_fs(); - - while (nob > 0) { - struct iovec iov = { - .iov_base = buffer, - .iov_len = nob - }; - struct msghdr msg = { - .msg_name = NULL, - .msg_namelen = 0, - .msg_iov = &iov, - .msg_iovlen = 1, - .msg_control = NULL, - .msg_controllen = 0, - .msg_flags = 0 - }; - - set_fs (KERNEL_DS); - rc = sock_sendmsg (sock, &msg, iov.iov_len); - set_fs (oldmm); - - if (rc < 0) - return (rc); - - if (rc == 0) { - CERROR ("Unexpected zero rc\n"); - return (-ECONNABORTED); + cfs_socket_t *sock = conn->ksnc_sock; + lnet_hdr_t *hdr; + lnet_magicversion_t *hmv; + int rc; + int i; + + CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid)); + + LIBCFS_ALLOC(hdr, sizeof(*hdr)); + if (hdr == NULL) { + CERROR("Can't allocate lnet_hdr_t\n"); + return -ENOMEM; + } + + hmv = (lnet_magicversion_t *)&hdr->dest_nid; + + /* Re-organize V2.x message header to V1.x (lnet_hdr_t) + * header and send out */ + hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC); + hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR); + hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR); + + if (the_lnet.ln_testprotocompat != 0) { + /* single-shot proto check */ + LNET_LOCK(); + if ((the_lnet.ln_testprotocompat & 1) != 0) { + hmv->version_major++; /* just different! */ + the_lnet.ln_testprotocompat &= ~1; } + if ((the_lnet.ln_testprotocompat & 2) != 0) { + hmv->magic = LNET_PROTO_MAGIC; + the_lnet.ln_testprotocompat &= ~2; + } + LNET_UNLOCK(); + } + + hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid); + hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid); + hdr->type = cpu_to_le32 (LNET_MSG_HELLO); + hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32)); + hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype); + hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation); - buffer = ((char *)buffer) + rc; - nob -= rc; + rc = libcfs_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout()); + + if (rc != 0) { + CDEBUG (D_NETERROR, "Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n", + rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); + goto out; } - - return (0); -} -int -ksocknal_sock_read (struct socket *sock, void *buffer, int nob) -{ - int rc; - mm_segment_t oldmm = get_fs(); - - while (nob > 0) { - struct iovec iov = { - .iov_base = buffer, - .iov_len = nob - }; - struct msghdr msg = { - .msg_name = NULL, - .msg_namelen = 0, - .msg_iov = &iov, - .msg_iovlen = 1, - .msg_control = NULL, - .msg_controllen = 0, - .msg_flags = 0 - }; - - set_fs (KERNEL_DS); - rc = sock_recvmsg (sock, &msg, iov.iov_len, 0); - set_fs (oldmm); - - if (rc < 0) - return (rc); + if (hello->kshm_nips == 0) + goto out; - if (rc == 0) - return (-ECONNABORTED); + for (i = 0; i < hello->kshm_nips; i++) { + hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]); + } - buffer = ((char *)buffer) + rc; - nob -= rc; + rc = libcfs_sock_write(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), + lnet_acceptor_timeout()); + if (rc != 0) { + CDEBUG (D_NETERROR, "Error %d sending HELLO payload (%d)" + " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips, + HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); } - - return (0); +out: + LIBCFS_FREE(hdr, sizeof(*hdr)); + + return rc; } -int -ksocknal_send_hello (ksock_conn_t *conn, __u32 *ipaddrs, int nipaddrs) +static int +ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello) { - /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ - struct socket *sock = conn->ksnc_sock; - ptl_hdr_t hdr; - ptl_magicversion_t *hmv = (ptl_magicversion_t *)&hdr.dest_nid; - int i; - int rc; - - LASSERT (conn->ksnc_type != SOCKNAL_CONN_NONE); - LASSERT (nipaddrs <= SOCKNAL_MAX_INTERFACES); - - /* No need for getconnsock/putconnsock */ - LASSERT (!conn->ksnc_closing); + cfs_socket_t *sock = conn->ksnc_sock; + int rc; - LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid)); - hmv->magic = cpu_to_le32 (PORTALS_PROTO_MAGIC); - hmv->version_major = cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR); - hmv->version_minor = cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR); + hello->kshm_magic = LNET_PROTO_MAGIC; + hello->kshm_version = KSOCK_PROTO_V2; - hdr.src_nid = cpu_to_le64 (ksocknal_lib.libnal_ni.ni_pid.nid); - hdr.type = cpu_to_le32 (PTL_MSG_HELLO); - hdr.payload_length = cpu_to_le32 (nipaddrs * sizeof(*ipaddrs)); + if (the_lnet.ln_testprotocompat != 0) { + /* single-shot proto check */ + LNET_LOCK(); + if ((the_lnet.ln_testprotocompat & 1) != 0) { + hello->kshm_version++; /* just different! */ + the_lnet.ln_testprotocompat &= ~1; + } + LNET_UNLOCK(); + } - hdr.msg.hello.type = cpu_to_le32 (conn->ksnc_type); - hdr.msg.hello.incarnation = - cpu_to_le64 (ksocknal_data.ksnd_incarnation); + rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips), + lnet_acceptor_timeout()); - /* Receiver is eager */ - rc = ksocknal_sock_write (sock, &hdr, sizeof(hdr)); if (rc != 0) { - CERROR ("Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n", + CDEBUG (D_NETERROR, "Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n", rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); - return (rc); - } - - if (nipaddrs == 0) - return (0); - - for (i = 0; i < nipaddrs; i++) { - ipaddrs[i] = __cpu_to_le32 (ipaddrs[i]); + return rc; } - rc = ksocknal_sock_write (sock, ipaddrs, nipaddrs * sizeof(*ipaddrs)); - if (rc != 0) - CERROR ("Error %d sending HELLO payload (%d)" - " to %u.%u.%u.%u/%d\n", rc, nipaddrs, - HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); - return (rc); -} - -int -ksocknal_invert_type(int type) -{ - switch (type) - { - case SOCKNAL_CONN_ANY: - case SOCKNAL_CONN_CONTROL: - return (type); - case SOCKNAL_CONN_BULK_IN: - return SOCKNAL_CONN_BULK_OUT; - case SOCKNAL_CONN_BULK_OUT: - return SOCKNAL_CONN_BULK_IN; - default: - return (SOCKNAL_CONN_NONE); - } -} + if (hello->kshm_nips == 0) + return 0; -int -ksocknal_recv_hello (ksock_conn_t *conn, ptl_nid_t *nid, - __u64 *incarnation, __u32 *ipaddrs) -{ - struct socket *sock = conn->ksnc_sock; - int rc; - int nips; - int i; - int type; - ptl_hdr_t hdr; - ptl_magicversion_t *hmv; - - hmv = (ptl_magicversion_t *)&hdr.dest_nid; - LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid)); - - rc = ksocknal_sock_read (sock, hmv, sizeof (*hmv)); + rc = libcfs_sock_write(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), + lnet_acceptor_timeout()); if (rc != 0) { - CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n", - rc, HIPQUAD(conn->ksnc_ipaddr)); - return (rc); + CDEBUG (D_NETERROR, "Error %d sending HELLO payload (%d)" + " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips, + HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port); } - if (hmv->magic != le32_to_cpu (PORTALS_PROTO_MAGIC)) { - CERROR ("Bad magic %#08x (%#08x expected) from %u.%u.%u.%u\n", - __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC, - HIPQUAD(conn->ksnc_ipaddr)); - return (-EPROTO); - } + return rc; +} - if (hmv->version_major != cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR) || - hmv->version_minor != cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR)) { - CERROR ("Incompatible protocol version %d.%d (%d.%d expected)" - " from %u.%u.%u.%u\n", - le16_to_cpu (hmv->version_major), - le16_to_cpu (hmv->version_minor), - PORTALS_PROTO_VERSION_MAJOR, - PORTALS_PROTO_VERSION_MINOR, - HIPQUAD(conn->ksnc_ipaddr)); - return (-EPROTO); - } +static int +ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,int timeout) +{ + cfs_socket_t *sock = conn->ksnc_sock; + lnet_hdr_t *hdr; + int rc; + int i; -#if (PORTALS_PROTO_VERSION_MAJOR != 1) -# error "This code only understands protocol version 1.x" -#endif - /* version 1 sends magic/version as the dest_nid of a 'hello' - * header, followed by payload full of interface IP addresses. - * Read the rest of it in now... */ + LIBCFS_ALLOC(hdr, sizeof(*hdr)); + if (hdr == NULL) { + CERROR("Can't allocate lnet_hdr_t\n"); + return -ENOMEM; + } - rc = ksocknal_sock_read (sock, hmv + 1, sizeof (hdr) - sizeof (*hmv)); + rc = libcfs_sock_read(sock, &hdr->src_nid, + sizeof (*hdr) - offsetof (lnet_hdr_t, src_nid), + timeout); if (rc != 0) { CERROR ("Error %d reading rest of HELLO hdr from %u.%u.%u.%u\n", rc, HIPQUAD(conn->ksnc_ipaddr)); - return (rc); + LASSERT (rc < 0 && rc != -EALREADY); + goto out; } /* ...and check we got what we expected */ - if (hdr.type != cpu_to_le32 (PTL_MSG_HELLO)) { + if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) { CERROR ("Expecting a HELLO hdr," " but got type %d from %u.%u.%u.%u\n", - le32_to_cpu (hdr.type), + le32_to_cpu (hdr->type), HIPQUAD(conn->ksnc_ipaddr)); - return (-EPROTO); + rc = -EPROTO; + goto out; } - if (le64_to_cpu(hdr.src_nid) == PTL_NID_ANY) { - CERROR("Expecting a HELLO hdr with a NID, but got PTL_NID_ANY" - "from %u.%u.%u.%u\n", HIPQUAD(conn->ksnc_ipaddr)); - return (-EPROTO); + hello->kshm_src_nid = le64_to_cpu (hdr->src_nid); + hello->kshm_src_pid = le32_to_cpu (hdr->src_pid); + hello->kshm_src_incarnation = le64_to_cpu (hdr->msg.hello.incarnation); + hello->kshm_ctype = le32_to_cpu (hdr->msg.hello.type); + hello->kshm_nips = le32_to_cpu (hdr->payload_length) / + sizeof (__u32); + + if (hello->kshm_nips > LNET_MAX_INTERFACES) { + CERROR("Bad nips %d from ip %u.%u.%u.%u\n", + hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr)); + rc = -EPROTO; + goto out; } - if (*nid == PTL_NID_ANY) { /* don't know peer's nid yet */ - *nid = le64_to_cpu(hdr.src_nid); - } else if (*nid != le64_to_cpu (hdr.src_nid)) { - CERROR ("Connected to nid "LPX64"@%u.%u.%u.%u " - "but expecting "LPX64"\n", - le64_to_cpu (hdr.src_nid), - HIPQUAD(conn->ksnc_ipaddr), *nid); - return (-EPROTO); + if (hello->kshm_nips == 0) + goto out; + + rc = libcfs_sock_read(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), timeout); + if (rc != 0) { + CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n", + rc, HIPQUAD(conn->ksnc_ipaddr)); + LASSERT (rc < 0 && rc != -EALREADY); + goto out; } - type = __le32_to_cpu(hdr.msg.hello.type); + for (i = 0; i < hello->kshm_nips; i++) { + hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]); - if (conn->ksnc_type == SOCKNAL_CONN_NONE) { - /* I've accepted this connection; peer determines type */ - conn->ksnc_type = ksocknal_invert_type(type); - if (conn->ksnc_type == SOCKNAL_CONN_NONE) { - CERROR ("Unexpected type %d from "LPX64"@%u.%u.%u.%u\n", - type, *nid, HIPQUAD(conn->ksnc_ipaddr)); - return (-EPROTO); + if (hello->kshm_ips[i] == 0) { + CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n", + i, HIPQUAD(conn->ksnc_ipaddr)); + rc = -EPROTO; + break; } - } else if (ksocknal_invert_type(type) != conn->ksnc_type) { - CERROR ("Mismatched types: me %d, "LPX64"@%u.%u.%u.%u %d\n", - conn->ksnc_type, *nid, HIPQUAD(conn->ksnc_ipaddr), - le32_to_cpu(hdr.msg.hello.type)); - return (-EPROTO); } +out: + LIBCFS_FREE(hdr, sizeof(*hdr)); + + return rc; +} + +static int +ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) +{ + cfs_socket_t *sock = conn->ksnc_sock; + int rc; + int i; - *incarnation = le64_to_cpu(hdr.msg.hello.incarnation); + if (hello->kshm_magic == LNET_PROTO_MAGIC) + conn->ksnc_flip = 0; + else + conn->ksnc_flip = 1; - nips = __le32_to_cpu (hdr.payload_length) / sizeof (__u32); + rc = libcfs_sock_read(sock, &hello->kshm_src_nid, + offsetof(ksock_hello_msg_t, kshm_ips) - + offsetof(ksock_hello_msg_t, kshm_src_nid), + timeout); + if (rc != 0) { + CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n", + rc, HIPQUAD(conn->ksnc_ipaddr)); + LASSERT (rc < 0 && rc != -EALREADY); + return rc; + } - if (nips > SOCKNAL_MAX_INTERFACES || - nips * sizeof(__u32) != __le32_to_cpu (hdr.payload_length)) { - CERROR("Bad payload length %d from "LPX64"@%u.%u.%u.%u\n", - __le32_to_cpu (hdr.payload_length), - *nid, HIPQUAD(conn->ksnc_ipaddr)); + if (conn->ksnc_flip) { + __swab32s(&hello->kshm_src_pid); + __swab64s(&hello->kshm_src_nid); + __swab32s(&hello->kshm_dst_pid); + __swab64s(&hello->kshm_dst_nid); + __swab64s(&hello->kshm_src_incarnation); + __swab64s(&hello->kshm_dst_incarnation); + __swab32s(&hello->kshm_ctype); + __swab32s(&hello->kshm_nips); } - if (nips == 0) - return (0); - - rc = ksocknal_sock_read (sock, ipaddrs, nips * sizeof(*ipaddrs)); + if (hello->kshm_nips > LNET_MAX_INTERFACES) { + CERROR("Bad nips %d from ip %u.%u.%u.%u\n", + hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr)); + return -EPROTO; + } + + if (hello->kshm_nips == 0) + return 0; + + rc = libcfs_sock_read(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), timeout); if (rc != 0) { - CERROR ("Error %d reading IPs from "LPX64"@%u.%u.%u.%u\n", - rc, *nid, HIPQUAD(conn->ksnc_ipaddr)); - return (rc); + CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n", + rc, HIPQUAD(conn->ksnc_ipaddr)); + LASSERT (rc < 0 && rc != -EALREADY); + return rc; } - for (i = 0; i < nips; i++) { - ipaddrs[i] = __le32_to_cpu(ipaddrs[i]); - - if (ipaddrs[i] == 0) { - CERROR("Zero IP[%d] from "LPX64"@%u.%u.%u.%u\n", - i, *nid, HIPQUAD(conn->ksnc_ipaddr)); - return (-EPROTO); + for (i = 0; i < hello->kshm_nips; i++) { + if (conn->ksnc_flip) + __swab32s(&hello->kshm_ips[i]); + + if (hello->kshm_ips[i] == 0) { + CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n", + i, HIPQUAD(conn->ksnc_ipaddr)); + return -EPROTO; } } - return (nips); + return 0; } -int -ksocknal_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle) +static void +ksocknal_pack_msg_v1(ksock_tx_t *tx) { - mm_segment_t oldmm = get_fs (); - struct socket *sock = conn->ksnc_sock; - int len; - int rc; + /* V1.x has no KSOCK_MSG_NOOP */ + LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); + LASSERT(tx->tx_lnetmsg != NULL); - rc = ksocknal_getconnsock (conn); - if (rc != 0) { - LASSERT (conn->ksnc_closing); - *txmem = *rxmem = *nagle = 0; - return (-ESHUTDOWN); - } - - set_fs (KERNEL_DS); + tx->tx_iov[0].iov_base = (void *)&tx->tx_lnetmsg->msg_hdr; + tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t); - len = sizeof(*txmem); - rc = sock_getsockopt(sock, SOL_SOCKET, SO_SNDBUF, - (char *)txmem, &len); - if (rc == 0) { - len = sizeof(*rxmem); - rc = sock_getsockopt(sock, SOL_SOCKET, SO_RCVBUF, - (char *)rxmem, &len); - } - if (rc == 0) { - len = sizeof(*nagle); - rc = sock->ops->getsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)nagle, &len); + tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); +} + +static void +ksocknal_pack_msg_v2(ksock_tx_t *tx) +{ + tx->tx_iov[0].iov_base = (void *)&tx->tx_msg; + + if (tx->tx_lnetmsg != NULL) { + LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); + + tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr; + tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload); + tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload) + + tx->tx_lnetmsg->msg_len; + } else { + LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); + + tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); + tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); } + /* Don't checksum before start sending, because packet can be piggybacked with ACK */ +} - set_fs (oldmm); - ksocknal_putconnsock (conn); +static void +ksocknal_unpack_msg_v1(ksock_msg_t *msg) +{ + msg->ksm_type = KSOCK_MSG_LNET; + msg->ksm_csum = 0; + msg->ksm_zc_req_cookie = 0; + msg->ksm_zc_ack_cookie = 0; +} - if (rc == 0) - *nagle = !*nagle; - else - *txmem = *rxmem = *nagle = 0; - - return (rc); +static void +ksocknal_unpack_msg_v2(ksock_msg_t *msg) +{ + return; /* Do nothing */ } +ksock_proto_t ksocknal_protocol_v1x = +{ + KSOCK_PROTO_V1, + ksocknal_send_hello_v1, + ksocknal_recv_hello_v1, + ksocknal_pack_msg_v1, + ksocknal_unpack_msg_v1 +}; + +ksock_proto_t ksocknal_protocol_v2x = +{ + KSOCK_PROTO_V2, + ksocknal_send_hello_v2, + ksocknal_recv_hello_v2, + ksocknal_pack_msg_v2, + ksocknal_unpack_msg_v2 +}; + int -ksocknal_setup_sock (struct socket *sock) +ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, + lnet_nid_t peer_nid, ksock_hello_msg_t *hello) { - mm_segment_t oldmm = get_fs (); - int rc; - int option; - int keep_idle; - int keep_intvl; - int keep_count; - int do_keepalive; - struct linger linger; + /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ + ksock_net_t *net = (ksock_net_t *)ni->ni_data; - sock->sk->sk_allocation = GFP_NOFS; + LASSERT (0 <= hello->kshm_nips && hello->kshm_nips <= LNET_MAX_INTERFACES); - /* Ensure this socket aborts active sends immediately when we close - * it. */ + /* rely on caller to hold a ref on socket so it wouldn't disappear */ + LASSERT (conn->ksnc_proto != NULL); - linger.l_onoff = 0; - linger.l_linger = 0; + hello->kshm_src_nid = ni->ni_nid; + hello->kshm_dst_nid = peer_nid; + hello->kshm_src_pid = the_lnet.ln_pid; - set_fs (KERNEL_DS); - rc = sock_setsockopt (sock, SOL_SOCKET, SO_LINGER, - (char *)&linger, sizeof (linger)); - set_fs (oldmm); - if (rc != 0) { - CERROR ("Can't set SO_LINGER: %d\n", rc); - return (rc); + hello->kshm_src_incarnation = net->ksnn_incarnation; + hello->kshm_ctype = conn->ksnc_type; + + return conn->ksnc_proto->pro_send_hello(conn, hello); +} + +int +ksocknal_invert_type(int type) +{ + switch (type) + { + case SOCKLND_CONN_ANY: + case SOCKLND_CONN_CONTROL: + return (type); + case SOCKLND_CONN_BULK_IN: + return SOCKLND_CONN_BULK_OUT; + case SOCKLND_CONN_BULK_OUT: + return SOCKLND_CONN_BULK_IN; + default: + return (SOCKLND_CONN_NONE); } +} - option = -1; - set_fs (KERNEL_DS); - rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_LINGER2, - (char *)&option, sizeof (option)); - set_fs (oldmm); +int +ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, + ksock_hello_msg_t *hello, lnet_process_id_t *peerid, + __u64 *incarnation) +{ + /* Return < 0 fatal error + * 0 success + * EALREADY lost connection race + * EPROTO protocol version mismatch + */ + cfs_socket_t *sock = conn->ksnc_sock; + int active = (conn->ksnc_proto != NULL); + int timeout; + int proto_match; + int rc; + ksock_proto_t *proto; + lnet_process_id_t recv_id; + + /* socket type set on active connections - not set on passive */ + LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); + + timeout = active ? *ksocknal_tunables.ksnd_timeout : + lnet_acceptor_timeout(); + + rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); if (rc != 0) { - CERROR ("Can't set SO_LINGER2: %d\n", rc); - return (rc); + CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n", + rc, HIPQUAD(conn->ksnc_ipaddr)); + LASSERT (rc < 0); + return rc; } - if (!ksocknal_tunables.ksnd_nagle) { - option = 1; - - set_fs (KERNEL_DS); - rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_NODELAY, - (char *)&option, sizeof (option)); - set_fs (oldmm); - if (rc != 0) { - CERROR ("Can't disable nagle: %d\n", rc); - return (rc); - } + if (hello->kshm_magic != LNET_PROTO_MAGIC && + hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) && + hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) { + /* Unexpected magic! */ + CERROR ("Bad magic(1) %#08x (%#08x expected) from " + "%u.%u.%u.%u\n", __cpu_to_le32 (hello->kshm_magic), + LNET_PROTO_TCP_MAGIC, + HIPQUAD(conn->ksnc_ipaddr)); + return -EPROTO; } - - if (ksocknal_tunables.ksnd_buffer_size > 0) { - option = ksocknal_tunables.ksnd_buffer_size; - - set_fs (KERNEL_DS); - rc = sock_setsockopt (sock, SOL_SOCKET, SO_SNDBUF, - (char *)&option, sizeof (option)); - set_fs (oldmm); - if (rc != 0) { - CERROR ("Can't set send buffer %d: %d\n", - option, rc); - return (rc); - } - set_fs (KERNEL_DS); - rc = sock_setsockopt (sock, SOL_SOCKET, SO_RCVBUF, - (char *)&option, sizeof (option)); - set_fs (oldmm); - if (rc != 0) { - CERROR ("Can't set receive buffer %d: %d\n", - option, rc); - return (rc); + rc = libcfs_sock_read(sock, &hello->kshm_version, + sizeof(hello->kshm_version), timeout); + if (rc != 0) { + CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n", + rc, HIPQUAD(conn->ksnc_ipaddr)); + LASSERT (rc < 0); + return rc; + } + + proto = ksocknal_parse_proto_version(hello); + if (proto == NULL) { + if (!active) { + /* unknown protocol from peer, tell peer my protocol */ + conn->ksnc_proto = &ksocknal_protocol_v2x; +#if SOCKNAL_VERSION_DEBUG + if (*ksocknal_tunables.ksnd_protocol != 2) + conn->ksnc_proto = &ksocknal_protocol_v1x; +#endif + hello->kshm_nips = 0; + ksocknal_send_hello(ni, conn, ni->ni_nid, hello); } - } - /* snapshot tunables */ - keep_idle = ksocknal_tunables.ksnd_keepalive_idle; - keep_count = ksocknal_tunables.ksnd_keepalive_count; - keep_intvl = ksocknal_tunables.ksnd_keepalive_intvl; - - do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0); + CERROR ("Unknown protocol version (%d.x expected)" + " from %u.%u.%u.%u\n", + conn->ksnc_proto->pro_version, + HIPQUAD(conn->ksnc_ipaddr)); - option = (do_keepalive ? 1 : 0); - set_fs (KERNEL_DS); - rc = sock_setsockopt (sock, SOL_SOCKET, SO_KEEPALIVE, - (char *)&option, sizeof (option)); - set_fs (oldmm); - if (rc != 0) { - CERROR ("Can't set SO_KEEPALIVE: %d\n", rc); - return (rc); + return -EPROTO; } - if (!do_keepalive) - return (0); + proto_match = (conn->ksnc_proto == proto); + conn->ksnc_proto = proto; - set_fs (KERNEL_DS); - rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE, - (char *)&keep_idle, sizeof (keep_idle)); - set_fs (oldmm); + /* receive the rest of hello message anyway */ + rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout); if (rc != 0) { - CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc); - return (rc); + CERROR("Error %d reading or checking hello from from %u.%u.%u.%u\n", + rc, HIPQUAD(conn->ksnc_ipaddr)); + LASSERT (rc < 0); + return rc; } - set_fs (KERNEL_DS); - rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPINTVL, - (char *)&keep_intvl, sizeof (keep_intvl)); - set_fs (oldmm); - if (rc != 0) { - CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc); - return (rc); + *incarnation = hello->kshm_src_incarnation; + + if (hello->kshm_src_nid == LNET_NID_ANY) { + CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY" + "from %u.%u.%u.%u\n", HIPQUAD(conn->ksnc_ipaddr)); + return -EPROTO; } - set_fs (KERNEL_DS); - rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT, - (char *)&keep_count, sizeof (keep_count)); - set_fs (oldmm); - if (rc != 0) { - CERROR ("Can't set TCP_KEEPCNT: %d\n", rc); - return (rc); + if (!active && + conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { + /* Userspace NAL assigns peer process ID from socket */ + recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG; + recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr); + } else { + recv_id.nid = hello->kshm_src_nid; + recv_id.pid = hello->kshm_src_pid; } - return (0); -} + if (!active) { + *peerid = recv_id; -static int -ksocknal_connect_sock(struct socket **sockp, int *may_retry, - ksock_route_t *route, int local_port) -{ - struct sockaddr_in locaddr; - struct sockaddr_in srvaddr; - struct socket *sock; - int rc; - int option; - mm_segment_t oldmm = get_fs(); - struct timeval tv; - - memset(&locaddr, 0, sizeof(locaddr)); - locaddr.sin_family = AF_INET; - locaddr.sin_port = htons(local_port); - locaddr.sin_addr.s_addr = - (route->ksnr_myipaddr != 0) ? htonl(route->ksnr_myipaddr) - : INADDR_ANY; - - memset (&srvaddr, 0, sizeof (srvaddr)); - srvaddr.sin_family = AF_INET; - srvaddr.sin_port = htons (route->ksnr_port); - srvaddr.sin_addr.s_addr = htonl (route->ksnr_ipaddr); - - *may_retry = 0; - - rc = sock_create (PF_INET, SOCK_STREAM, 0, &sock); - *sockp = sock; - if (rc != 0) { - CERROR ("Can't create autoconnect socket: %d\n", rc); - return (rc); - } + /* peer determines type */ + conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype); + if (conn->ksnc_type == SOCKLND_CONN_NONE) { + CERROR ("Unexpected type %d from %s ip %u.%u.%u.%u\n", + hello->kshm_ctype, libcfs_id2str(*peerid), + HIPQUAD(conn->ksnc_ipaddr)); + return -EPROTO; + } - /* Ugh; have to map_fd for compatibility with sockets passed in - * from userspace. And we actually need the sock->file refcounting - * that this gives you :) */ + return 0; + } - rc = sock_map_fd (sock); - if (rc < 0) { - sock_release (sock); - CERROR ("sock_map_fd error %d\n", rc); - return (rc); + if (peerid->pid != recv_id.pid || + peerid->nid != recv_id.nid) { + LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host" + " %u.%u.%u.%u, but they claimed they were " + "%s; please check your Lustre " + "configuration.\n", + libcfs_id2str(*peerid), + HIPQUAD(conn->ksnc_ipaddr), + libcfs_id2str(recv_id)); + return -EPROTO; } - /* NB the file descriptor (rc) now owns the ref on sock->file */ - LASSERT (sock->file != NULL); - LASSERT (file_count(sock->file) == 1); + if (hello->kshm_ctype == SOCKLND_CONN_NONE) { + /* Possible protocol mismatch or I lost the connection race */ + return proto_match ? EALREADY : EPROTO; + } - get_file(sock->file); /* extra ref makes sock->file */ - sys_close(rc); /* survive this close */ + if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) { + CERROR ("Mismatched types: me %d, %s ip %u.%u.%u.%u %d\n", + conn->ksnc_type, libcfs_id2str(*peerid), + HIPQUAD(conn->ksnc_ipaddr), + hello->kshm_ctype); + return -EPROTO; + } - /* Still got a single ref on sock->file */ - LASSERT (file_count(sock->file) == 1); + return 0; +} - /* Set the socket timeouts, so our connection attempt completes in - * finite time */ - tv.tv_sec = ksocknal_tunables.ksnd_io_timeout; - tv.tv_usec = 0; +void +ksocknal_connect (ksock_route_t *route) +{ + CFS_LIST_HEAD (zombies); + ksock_peer_t *peer = route->ksnr_peer; + int type; + int wanted; + cfs_socket_t *sock; + cfs_time_t deadline; + int retry_later = 0; + int rc = 0; - set_fs (KERNEL_DS); - rc = sock_setsockopt (sock, SOL_SOCKET, SO_SNDTIMEO, - (char *)&tv, sizeof (tv)); - set_fs (oldmm); - if (rc != 0) { - CERROR ("Can't set send timeout %d: %d\n", - ksocknal_tunables.ksnd_io_timeout, rc); - goto failed; - } - - set_fs (KERNEL_DS); - rc = sock_setsockopt (sock, SOL_SOCKET, SO_RCVTIMEO, - (char *)&tv, sizeof (tv)); - set_fs (oldmm); - if (rc != 0) { - CERROR ("Can't set receive timeout %d: %d\n", - ksocknal_tunables.ksnd_io_timeout, rc); - goto failed; - } + deadline = cfs_time_add(cfs_time_current(), + cfs_time_seconds(*ksocknal_tunables.ksnd_timeout)); - set_fs (KERNEL_DS); - option = 1; - rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, - (char *)&option, sizeof (option)); - set_fs (oldmm); - if (rc != 0) { - CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc); - goto failed; - } + write_lock_bh (&ksocknal_data.ksnd_global_lock); - rc = sock->ops->bind(sock, - (struct sockaddr *)&locaddr, sizeof(locaddr)); - if (rc == -EADDRINUSE) { - CDEBUG(D_NET, "Port %d already in use\n", local_port); - *may_retry = 1; - goto failed; - } - if (rc != 0) { - CERROR("Error trying to bind to reserved port %d: %d\n", - local_port, rc); - goto failed; - } + LASSERT (route->ksnr_scheduled); + LASSERT (!route->ksnr_connecting); - rc = sock->ops->connect(sock, - (struct sockaddr *)&srvaddr, sizeof(srvaddr), - sock->file->f_flags); - if (rc == 0) - return 0; + route->ksnr_connecting = 1; - /* EADDRNOTAVAIL probably means we're already connected to the same - * peer/port on the same local port on a differently typed - * connection. Let our caller retry with a different local - * port... */ - *may_retry = (rc == -EADDRNOTAVAIL); + for (;;) { + wanted = ksocknal_route_mask() & ~route->ksnr_connected; - CDEBUG(*may_retry ? D_NET : D_ERROR, - "Error %d connecting %u.%u.%u.%u/%d -> %u.%u.%u.%u/%d\n", rc, - HIPQUAD(route->ksnr_myipaddr), local_port, - HIPQUAD(route->ksnr_ipaddr), route->ksnr_port); + /* stop connecting if peer/route got closed under me, or + * route got connected while queued */ + if (peer->ksnp_closing || route->ksnr_deleted || + wanted == 0) { + retry_later = 0; + break; + } - failed: - fput(sock->file); - return rc; -} + /* reschedule if peer is connecting to me */ + if (peer->ksnp_accepting > 0) { + CDEBUG(D_NET, + "peer %s(%d) already connecting to me, retry later.\n", + libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting); + retry_later = 1; + } -int -ksocknal_connect_peer (ksock_route_t *route, int type) -{ - struct socket *sock; - int rc; - int port; - int may_retry; - - /* Iterate through reserved ports. When typed connections are - * used, we will need to bind to multiple ports, but we only know - * this at connect time. But, by that time we've already called - * bind() so we need a new socket. */ + if (retry_later) /* needs reschedule */ + break; - for (port = 1023; port > 512; --port) { + if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) { + type = SOCKLND_CONN_ANY; + } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) { + type = SOCKLND_CONN_CONTROL; + } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) { + type = SOCKLND_CONN_BULK_IN; + } else { + LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0); + type = SOCKLND_CONN_BULK_OUT; + } - rc = ksocknal_connect_sock(&sock, &may_retry, route, port); + write_unlock_bh (&ksocknal_data.ksnd_global_lock); - if (rc == 0) { - rc = ksocknal_create_conn(route, sock, type); - fput(sock->file); - return rc; + if (cfs_time_aftereq(cfs_time_current(), deadline)) { + rc = -ETIMEDOUT; + lnet_connect_console_error(rc, peer->ksnp_id.nid, + route->ksnr_ipaddr, + route->ksnr_port); + goto failed; } - if (!may_retry) - return rc; - } + rc = lnet_connect(&sock, peer->ksnp_id.nid, + route->ksnr_myipaddr, + route->ksnr_ipaddr, route->ksnr_port); + if (rc != 0) + goto failed; + + rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type); + if (rc < 0) { + lnet_connect_console_error(rc, peer->ksnp_id.nid, + route->ksnr_ipaddr, + route->ksnr_port); + goto failed; + } - CERROR("Out of ports trying to bind to a reserved port\n"); - return (-EADDRINUSE); -} + /* A +ve RC means I have to retry because I lost the connection + * race or I have to renegotiate protocol version */ + retry_later = (rc != 0); + if (retry_later) + CDEBUG(D_NET, "peer %s: conn race, retry later.\n", + libcfs_nid2str(peer->ksnp_id.nid)); -void -ksocknal_autoconnect (ksock_route_t *route) -{ - LIST_HEAD (zombies); - ksock_tx_t *tx; - ksock_peer_t *peer; - unsigned long flags; - int rc; - int type; - - for (;;) { - for (type = 0; type < SOCKNAL_CONN_NTYPES; type++) - if ((route->ksnr_connecting & (1 << type)) != 0) - break; - LASSERT (type < SOCKNAL_CONN_NTYPES); + write_lock_bh (&ksocknal_data.ksnd_global_lock); + } - rc = ksocknal_connect_peer (route, type); - if (rc != 0) - break; - - /* successfully autoconnected: create_conn did the - * route/conn binding and scheduled any blocked packets */ + route->ksnr_scheduled = 0; + route->ksnr_connecting = 0; - if (route->ksnr_connecting == 0) { - /* No more connections required */ - return; - } + if (retry_later) { + /* re-queue for attention; this frees me up to handle + * the peer's incoming connection request */ + ksocknal_launch_connection_locked(route); } - /* Connection attempt failed */ + write_unlock_bh (&ksocknal_data.ksnd_global_lock); + return; - write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags); + failed: + write_lock_bh (&ksocknal_data.ksnd_global_lock); - peer = route->ksnr_peer; + route->ksnr_scheduled = 0; route->ksnr_connecting = 0; /* This is a retry rather than a new connection */ - LASSERT (route->ksnr_retry_interval != 0); - route->ksnr_timeout = jiffies + route->ksnr_retry_interval; - route->ksnr_retry_interval = MIN (route->ksnr_retry_interval * 2, - SOCKNAL_MAX_RECONNECT_INTERVAL); + route->ksnr_retry_interval *= 2; + route->ksnr_retry_interval = + MAX(route->ksnr_retry_interval, + cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000); + route->ksnr_retry_interval = + MIN(route->ksnr_retry_interval, + cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000); - if (!list_empty (&peer->ksnp_tx_queue) && - ksocknal_find_connecting_route_locked (peer) == NULL) { + LASSERT (route->ksnr_retry_interval != 0); + route->ksnr_timeout = cfs_time_add(cfs_time_current(), + route->ksnr_retry_interval); + + if (!list_empty(&peer->ksnp_tx_queue) && + peer->ksnp_accepting == 0 && + ksocknal_find_connecting_route_locked(peer) == NULL) { + /* ksnp_tx_queue is queued on a conn on successful + * connection */ LASSERT (list_empty (&peer->ksnp_conns)); - /* None of the connections that the blocked packets are - * waiting for have been successful. Complete them now... */ - do { - tx = list_entry (peer->ksnp_tx_queue.next, - ksock_tx_t, tx_list); - list_del (&tx->tx_list); - list_add_tail (&tx->tx_list, &zombies); - } while (!list_empty (&peer->ksnp_tx_queue)); + /* take all the blocked packets while I've got the lock and + * complete below... */ + list_add(&zombies, &peer->ksnp_tx_queue); + list_del_init(&peer->ksnp_tx_queue); } #if 0 /* irrelevent with only eager routes */ @@ -2633,122 +2427,181 @@ ksocknal_autoconnect (ksock_route_t *route) list_del(&route->ksnr_list); list_add_tail(&route->ksnr_list, &peer->ksnp_routes); } -#endif - write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); - - while (!list_empty (&zombies)) { - char ipbuf[PTL_NALFMT_SIZE]; - char ipbuf2[PTL_NALFMT_SIZE]; - tx = list_entry (zombies.next, ksock_tx_t, tx_list); - - CERROR ("Deleting packet type %d len %d ("LPX64" %s->"LPX64" %s)\n", - le32_to_cpu (tx->tx_hdr->type), - le32_to_cpu (tx->tx_hdr->payload_length), - le64_to_cpu (tx->tx_hdr->src_nid), - portals_nid2str(SOCKNAL, - le64_to_cpu(tx->tx_hdr->src_nid), - ipbuf), - le64_to_cpu (tx->tx_hdr->dest_nid), - portals_nid2str(SOCKNAL, - le64_to_cpu(tx->tx_hdr->src_nid), - ipbuf2)); +#endif + write_unlock_bh (&ksocknal_data.ksnd_global_lock); - list_del (&tx->tx_list); - /* complete now */ - ksocknal_tx_done (tx, 0); - } + ksocknal_peer_failed(peer); + ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1); +} + +static inline int +ksocknal_connd_connect_route_locked(void) +{ + /* Only handle an outgoing connection request if there is someone left + * to handle incoming connections */ + return !list_empty(&ksocknal_data.ksnd_connd_routes) && + ((ksocknal_data.ksnd_connd_connecting + 1) < + *ksocknal_tunables.ksnd_nconnds); +} + +static inline int +ksocknal_connd_ready(void) +{ + int rc; + + spin_lock_bh (&ksocknal_data.ksnd_connd_lock); + + rc = ksocknal_data.ksnd_shuttingdown || + !list_empty(&ksocknal_data.ksnd_connd_connreqs) || + ksocknal_connd_connect_route_locked(); + + spin_unlock_bh (&ksocknal_data.ksnd_connd_lock); + + return rc; } int -ksocknal_autoconnectd (void *arg) +ksocknal_connd (void *arg) { long id = (long)arg; char name[16]; - unsigned long flags; + ksock_connreq_t *cr; ksock_route_t *route; - int rc; - snprintf (name, sizeof (name), "ksocknal_ad%02ld", id); - kportal_daemonize (name); - kportal_blockallsigs (); + snprintf (name, sizeof (name), "socknal_cd%02ld", id); + cfs_daemonize (name); + cfs_block_allsigs (); - spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags); + spin_lock_bh (&ksocknal_data.ksnd_connd_lock); while (!ksocknal_data.ksnd_shuttingdown) { - if (!list_empty (&ksocknal_data.ksnd_autoconnectd_routes)) { - route = list_entry (ksocknal_data.ksnd_autoconnectd_routes.next, - ksock_route_t, ksnr_connect_list); + if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) { + /* Connection accepted by the listener */ + cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next, + ksock_connreq_t, ksncr_list); - list_del (&route->ksnr_connect_list); - spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags); + list_del(&cr->ksncr_list); + spin_unlock_bh (&ksocknal_data.ksnd_connd_lock); - ksocknal_autoconnect (route); - ksocknal_put_route (route); + ksocknal_create_conn(cr->ksncr_ni, NULL, + cr->ksncr_sock, SOCKLND_CONN_NONE); + lnet_ni_decref(cr->ksncr_ni); + LIBCFS_FREE(cr, sizeof(*cr)); - spin_lock_irqsave(&ksocknal_data.ksnd_autoconnectd_lock, - flags); - continue; + spin_lock_bh (&ksocknal_data.ksnd_connd_lock); + } + + if (ksocknal_connd_connect_route_locked()) { + /* Connection request */ + route = list_entry (ksocknal_data.ksnd_connd_routes.next, + ksock_route_t, ksnr_connd_list); + + list_del (&route->ksnr_connd_list); + ksocknal_data.ksnd_connd_connecting++; + spin_unlock_bh (&ksocknal_data.ksnd_connd_lock); + + ksocknal_connect (route); + ksocknal_route_decref(route); + + spin_lock_bh (&ksocknal_data.ksnd_connd_lock); + ksocknal_data.ksnd_connd_connecting--; } - spin_unlock_irqrestore(&ksocknal_data.ksnd_autoconnectd_lock, - flags); + spin_unlock_bh (&ksocknal_data.ksnd_connd_lock); - rc = wait_event_interruptible(ksocknal_data.ksnd_autoconnectd_waitq, - ksocknal_data.ksnd_shuttingdown || - !list_empty(&ksocknal_data.ksnd_autoconnectd_routes)); + wait_event_interruptible_exclusive( + ksocknal_data.ksnd_connd_waitq, + ksocknal_connd_ready()); - spin_lock_irqsave(&ksocknal_data.ksnd_autoconnectd_lock, flags); + spin_lock_bh (&ksocknal_data.ksnd_connd_lock); } - spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags); + spin_unlock_bh (&ksocknal_data.ksnd_connd_lock); ksocknal_thread_fini (); return (0); } ksock_conn_t * -ksocknal_find_timed_out_conn (ksock_peer_t *peer) +ksocknal_find_timed_out_conn (ksock_peer_t *peer) { /* We're called with a shared lock on ksnd_global_lock */ ksock_conn_t *conn; struct list_head *ctmp; list_for_each (ctmp, &peer->ksnp_conns) { + int error; conn = list_entry (ctmp, ksock_conn_t, ksnc_list); - /* Don't need the {get,put}connsock dance to deref ksnc_sock... */ + /* Don't need the {get,put}connsock dance to deref ksnc_sock */ LASSERT (!conn->ksnc_closing); - if (conn->ksnc_sock->sk->sk_err != 0) { - /* Something (e.g. failed keepalive) set the socket error */ - atomic_inc (&conn->ksnc_refcount); - CERROR ("Socket error %d: "LPX64" %p %d.%d.%d.%d\n", - conn->ksnc_sock->sk->sk_err, peer->ksnp_nid, - conn, HIPQUAD(conn->ksnc_ipaddr)); + /* SOCK_ERROR will reset error code of socket in + * some platform (like Darwin8.x) */ + error = SOCK_ERROR(conn->ksnc_sock); + if (error != 0) { + ksocknal_conn_addref(conn); + + switch (error) { + case ECONNRESET: + CDEBUG(D_NETERROR, "A connection with %s " + "(%u.%u.%u.%u:%d) was reset; " + "it may have rebooted.\n", + libcfs_id2str(peer->ksnp_id), + HIPQUAD(conn->ksnc_ipaddr), + conn->ksnc_port); + break; + case ETIMEDOUT: + CDEBUG(D_NETERROR, "A connection with %s " + "(%u.%u.%u.%u:%d) timed out; the " + "network or node may be down.\n", + libcfs_id2str(peer->ksnp_id), + HIPQUAD(conn->ksnc_ipaddr), + conn->ksnc_port); + break; + default: + CDEBUG(D_NETERROR, "An unexpected network error %d " + "occurred with %s " + "(%u.%u.%u.%u:%d\n", error, + libcfs_id2str(peer->ksnp_id), + HIPQUAD(conn->ksnc_ipaddr), + conn->ksnc_port); + break; + } + return (conn); } if (conn->ksnc_rx_started && - time_after_eq (jiffies, conn->ksnc_rx_deadline)) { + cfs_time_aftereq(cfs_time_current(), + conn->ksnc_rx_deadline)) { /* Timed out incomplete incoming message */ - atomic_inc (&conn->ksnc_refcount); - CERROR ("Timed out RX from "LPX64" %p %d.%d.%d.%d\n", - peer->ksnp_nid,conn,HIPQUAD(conn->ksnc_ipaddr)); + ksocknal_conn_addref(conn); + CDEBUG(D_NETERROR, "Timeout receiving from %s " + "(%u.%u.%u.%u:%d), state %d wanted %d left %d\n", + libcfs_id2str(peer->ksnp_id), + HIPQUAD(conn->ksnc_ipaddr), + conn->ksnc_port, + conn->ksnc_rx_state, + conn->ksnc_rx_nob_wanted, + conn->ksnc_rx_nob_left); return (conn); } - if ((!list_empty (&conn->ksnc_tx_queue) || - conn->ksnc_sock->sk->sk_wmem_queued != 0) && - time_after_eq (jiffies, conn->ksnc_tx_deadline)) { + if ((!list_empty(&conn->ksnc_tx_queue) || + SOCK_WMEM_QUEUED(conn->ksnc_sock) != 0) && + cfs_time_aftereq(cfs_time_current(), + conn->ksnc_tx_deadline)) { /* Timed out messages queued for sending or * buffered in the socket's send buffer */ - atomic_inc (&conn->ksnc_refcount); - CERROR ("Timed out TX to "LPX64" %s%d %p %d.%d.%d.%d\n", - peer->ksnp_nid, - list_empty (&conn->ksnc_tx_queue) ? "" : "Q ", - conn->ksnc_sock->sk->sk_wmem_queued, conn, - HIPQUAD(conn->ksnc_ipaddr)); + ksocknal_conn_addref(conn); + CDEBUG(D_NETERROR, "Timeout sending data to %s " + "(%u.%u.%u.%u:%d) the network or that " + "node may be down.\n", + libcfs_id2str(peer->ksnp_id), + HIPQUAD(conn->ksnc_ipaddr), + conn->ksnc_port); return (conn); } } @@ -2756,6 +2609,31 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) return (NULL); } +static inline void +ksocknal_flush_stale_txs(ksock_peer_t *peer) +{ + ksock_tx_t *tx; + CFS_LIST_HEAD (stale_txs); + + write_lock_bh (&ksocknal_data.ksnd_global_lock); + + while (!list_empty (&peer->ksnp_tx_queue)) { + tx = list_entry (peer->ksnp_tx_queue.next, + ksock_tx_t, tx_list); + + if (!cfs_time_aftereq(cfs_time_current(), + tx->tx_deadline)) + break; + + list_del (&tx->tx_list); + list_add_tail (&tx->tx_list, &stale_txs); + } + + write_unlock_bh (&ksocknal_data.ksnd_global_lock); + + ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1); +} + void ksocknal_check_peer_timeouts (int idx) { @@ -2773,48 +2651,85 @@ ksocknal_check_peer_timeouts (int idx) list_for_each (ptmp, peers) { peer = list_entry (ptmp, ksock_peer_t, ksnp_list); conn = ksocknal_find_timed_out_conn (peer); - + if (conn != NULL) { read_unlock (&ksocknal_data.ksnd_global_lock); - CERROR ("Timeout out conn->"LPX64" ip %d.%d.%d.%d:%d\n", - peer->ksnp_nid, - HIPQUAD(conn->ksnc_ipaddr), - conn->ksnc_port); ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); - + /* NB we won't find this one again, but we can't * just proceed with the next peer, since we dropped * ksnd_global_lock and it might be dead already! */ - ksocknal_put_conn (conn); + ksocknal_conn_decref(conn); goto again; } + + /* we can't process stale txs right here because we're + * holding only shared lock */ + if (!list_empty (&peer->ksnp_tx_queue)) { + ksock_tx_t *tx = list_entry (peer->ksnp_tx_queue.next, + ksock_tx_t, tx_list); + + if (cfs_time_aftereq(cfs_time_current(), + tx->tx_deadline)) { + + ksocknal_peer_addref(peer); + read_unlock (&ksocknal_data.ksnd_global_lock); + + ksocknal_flush_stale_txs(peer); + + ksocknal_peer_decref(peer); + goto again; + } + } } + /* print out warnings about stale ZC_REQs */ + list_for_each_entry(peer, peers, ksnp_list) { + ksock_tx_t *tx; + int n = 0; + + list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) { + if (!cfs_time_aftereq(cfs_time_current(), + tx->tx_deadline)) + break; + n++; + } + + if (n != 0) { + tx = list_entry (peer->ksnp_zc_req_list.next, + ksock_tx_t, tx_zc_list); + CWARN("Stale ZC_REQs for peer %s detected: %d; the " + "oldest (%p) timed out %ld secs ago\n", + libcfs_nid2str(peer->ksnp_id.nid), n, tx, + cfs_duration_sec(cfs_time_current() - + tx->tx_deadline)); + } + } + read_unlock (&ksocknal_data.ksnd_global_lock); } int ksocknal_reaper (void *arg) { - wait_queue_t wait; - unsigned long flags; + cfs_waitlink_t wait; ksock_conn_t *conn; ksock_sched_t *sched; struct list_head enomem_conns; int nenomem_conns; - int timeout; + cfs_duration_t timeout; int i; int peer_index = 0; - unsigned long deadline = jiffies; - - kportal_daemonize ("ksocknal_reaper"); - kportal_blockallsigs (); + cfs_time_t deadline = cfs_time_current(); - INIT_LIST_HEAD(&enomem_conns); - init_waitqueue_entry (&wait, current); + cfs_daemonize ("socknal_reaper"); + cfs_block_allsigs (); - spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags); + CFS_INIT_LIST_HEAD(&enomem_conns); + cfs_waitlink_init (&wait); + + spin_lock_bh (&ksocknal_data.ksnd_reaper_lock); while (!ksocknal_data.ksnd_shuttingdown) { @@ -2822,13 +2737,13 @@ ksocknal_reaper (void *arg) conn = list_entry (ksocknal_data.ksnd_deathrow_conns.next, ksock_conn_t, ksnc_list); list_del (&conn->ksnc_list); - - spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags); + + spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock); ksocknal_terminate_conn (conn); - ksocknal_put_conn (conn); + ksocknal_conn_decref(conn); - spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags); + spin_lock_bh (&ksocknal_data.ksnd_reaper_lock); continue; } @@ -2836,12 +2751,12 @@ ksocknal_reaper (void *arg) conn = list_entry (ksocknal_data.ksnd_zombie_conns.next, ksock_conn_t, ksnc_list); list_del (&conn->ksnc_list); - - spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags); + + spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock); ksocknal_destroy_conn (conn); - spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags); + spin_lock_bh (&ksocknal_data.ksnd_reaper_lock); continue; } @@ -2850,7 +2765,7 @@ ksocknal_reaper (void *arg) list_del_init(&ksocknal_data.ksnd_enomem_conns); } - spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags); + spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock); /* reschedule all the connections that stalled with ENOMEM... */ nenomem_conns = 0; @@ -2861,23 +2776,24 @@ ksocknal_reaper (void *arg) sched = conn->ksnc_scheduler; - spin_lock_irqsave (&sched->kss_lock, flags); + spin_lock_bh (&sched->kss_lock); LASSERT (conn->ksnc_tx_scheduled); conn->ksnc_tx_ready = 1; - list_add_tail (&conn->ksnc_tx_list, &sched->kss_tx_conns); - wake_up (&sched->kss_waitq); + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); + cfs_waitq_signal (&sched->kss_waitq); - spin_unlock_irqrestore (&sched->kss_lock, flags); + spin_unlock_bh (&sched->kss_lock); nenomem_conns++; } - + /* careful with the jiffy wrap... */ - while ((timeout = (int)(deadline - jiffies)) <= 0) { + while ((timeout = cfs_time_sub(deadline, + cfs_time_current())) <= 0) { const int n = 4; const int p = 1; int chunk = ksocknal_data.ksnd_peer_hash_size; - + /* Time to check for timeouts on a few more peers: I do * checks every 'p' seconds on a proportion of the peer * table and I need to check every connection 'n' times @@ -2885,19 +2801,19 @@ ksocknal_reaper (void *arg) * timeout on any connection within (n+1)/n times the * timeout interval. */ - if (ksocknal_tunables.ksnd_io_timeout > n * p) - chunk = (chunk * n * p) / - ksocknal_tunables.ksnd_io_timeout; + if (*ksocknal_tunables.ksnd_timeout > n * p) + chunk = (chunk * n * p) / + *ksocknal_tunables.ksnd_timeout; if (chunk == 0) chunk = 1; for (i = 0; i < chunk; i++) { ksocknal_check_peer_timeouts (peer_index); - peer_index = (peer_index + 1) % + peer_index = (peer_index + 1) % ksocknal_data.ksnd_peer_hash_size; } - deadline += p * HZ; + deadline = cfs_time_add(deadline, cfs_time_seconds(p)); } if (nenomem_conns != 0) { @@ -2906,33 +2822,25 @@ ksocknal_reaper (void *arg) * if any go back on my enomem list. */ timeout = SOCKNAL_ENOMEM_RETRY; } - ksocknal_data.ksnd_reaper_waketime = jiffies + timeout; + ksocknal_data.ksnd_reaper_waketime = + cfs_time_add(cfs_time_current(), timeout); set_current_state (TASK_INTERRUPTIBLE); - add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait); + cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait); if (!ksocknal_data.ksnd_shuttingdown && list_empty (&ksocknal_data.ksnd_deathrow_conns) && list_empty (&ksocknal_data.ksnd_zombie_conns)) - schedule_timeout (timeout); + cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout); set_current_state (TASK_RUNNING); - remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait); + cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait); - spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags); + spin_lock_bh (&ksocknal_data.ksnd_reaper_lock); } - spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags); + spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock); ksocknal_thread_fini (); return (0); } - -lib_nal_t ksocknal_lib = { - libnal_data: &ksocknal_data, /* NAL private data */ - libnal_send: ksocknal_send, - libnal_send_pages: ksocknal_send_pages, - libnal_recv: ksocknal_recv, - libnal_recv_pages: ksocknal_recv_pages, - libnal_dist: ksocknal_dist -};