1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@zabbo.net>
6 * Author: Peter J. Braam <braam@clusterfs.com>
7 * Author: Phil Schwan <phil@clusterfs.com>
8 * Author: Eric Barton <eric@bartonsoftware.com>
10 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 * Portals is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Portals is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Portals; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
28 # include <linux/syscalls.h>
32 * LIB functions follow
36 ksocknal_read(nal_cb_t *nal, void *private, void *dst_addr,
37 user_ptr src_addr, size_t len)
39 CDEBUG(D_NET, LPX64": reading %ld bytes from %p -> %p\n",
40 nal->ni.nid, (long)len, src_addr, dst_addr);
42 memcpy( dst_addr, src_addr, len );
47 ksocknal_write(nal_cb_t *nal, void *private, user_ptr dst_addr,
48 void *src_addr, size_t len)
50 CDEBUG(D_NET, LPX64": writing %ld bytes from %p -> %p\n",
51 nal->ni.nid, (long)len, src_addr, dst_addr);
53 memcpy( dst_addr, src_addr, len );
58 ksocknal_malloc(nal_cb_t *nal, size_t len)
62 PORTAL_ALLOC(buf, len);
71 ksocknal_free(nal_cb_t *nal, void *buf, size_t len)
73 PORTAL_FREE(buf, len);
77 ksocknal_printf(nal_cb_t *nal, const char *fmt, ...)
83 vsnprintf (msg, sizeof (msg), fmt, ap); /* sprint safely */
86 msg[sizeof (msg) - 1] = 0; /* ensure terminated */
88 CDEBUG (D_NET, "%s", msg);
92 ksocknal_cli(nal_cb_t *nal, unsigned long *flags)
94 ksock_nal_data_t *data = nal->nal_data;
96 /* OK to ignore 'flags'; we're only ever serialise threads and
97 * never need to lock out interrupts */
98 spin_lock(&data->ksnd_nal_cb_lock);
102 ksocknal_sti(nal_cb_t *nal, unsigned long *flags)
104 ksock_nal_data_t *data;
105 data = nal->nal_data;
107 spin_unlock(&data->ksnd_nal_cb_lock);
111 ksocknal_dist(nal_cb_t *nal, ptl_nid_t nid, unsigned long *dist)
113 /* I would guess that if ksocknal_get_peer (nid) == NULL,
114 and we're not routing, then 'nid' is very distant :) */
115 if ( nal->ni.nid == nid ) {
125 ksocknal_free_ltx (ksock_ltx_t *ltx)
127 atomic_dec(&ksocknal_data.ksnd_nactive_ltxs);
128 PORTAL_FREE(ltx, ltx->ltx_desc_size);
131 #if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC)
133 ksocknal_kvaddr_to_page (unsigned long vaddr)
137 if (vaddr >= VMALLOC_START &&
139 page = vmalloc_to_page ((void *)vaddr);
141 else if (vaddr >= PKMAP_BASE &&
142 vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE))
143 page = vmalloc_to_page ((void *)vaddr);
144 /* in 2.4 ^ just walks the page tables */
147 page = virt_to_page (vaddr);
158 ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
160 struct socket *sock = conn->ksnc_sock;
161 struct iovec *iov = tx->tx_iov;
162 int fragsize = iov->iov_len;
163 unsigned long vaddr = (unsigned long)iov->iov_base;
164 int more = (tx->tx_niov > 1) ||
165 (tx->tx_nkiov > 0) ||
166 (!list_empty (&conn->ksnc_tx_queue));
167 #if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC)
168 int offset = vaddr & (PAGE_SIZE - 1);
169 int zcsize = MIN (fragsize, PAGE_SIZE - offset);
174 /* NB we can't trust socket ops to either consume our iovs
175 * or leave them alone, so we only send 1 frag at a time. */
176 LASSERT (fragsize <= tx->tx_resid);
177 LASSERT (tx->tx_niov > 0);
179 #if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC)
180 if (zcsize >= ksocknal_data.ksnd_zc_min_frag &&
181 (sock->sk->route_caps & NETIF_F_SG) &&
182 (sock->sk->route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)) &&
183 (page = ksocknal_kvaddr_to_page (vaddr)) != NULL) {
185 CDEBUG(D_NET, "vaddr %p, page %p->%p + offset %x for %d\n",
186 (void *)vaddr, page, page_address(page), offset, zcsize);
188 if (fragsize > zcsize) {
193 rc = tcp_sendpage_zccd(sock, page, offset, zcsize,
194 more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT,
199 /* NB don't pass tx's iov; sendmsg may or may not update it */
200 struct iovec fragiov = { .iov_base = (void *)vaddr,
201 .iov_len = fragsize};
202 struct msghdr msg = {
209 .msg_flags = more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT
211 mm_segment_t oldmm = get_fs();
214 rc = sock_sendmsg(sock, &msg, fragsize);
221 if (rc < iov->iov_len) {
222 /* didn't send whole iov entry... */
223 iov->iov_base = (void *)(vaddr + rc);
235 ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
237 struct socket *sock = conn->ksnc_sock;
238 ptl_kiov_t *kiov = tx->tx_kiov;
239 int fragsize = kiov->kiov_len;
240 struct page *page = kiov->kiov_page;
241 int offset = kiov->kiov_offset;
242 int more = (tx->tx_nkiov > 1) ||
243 (!list_empty (&conn->ksnc_tx_queue));
246 /* NB we can't trust socket ops to either consume our iovs
247 * or leave them alone, so we only send 1 frag at a time. */
248 LASSERT (fragsize <= tx->tx_resid);
249 LASSERT (offset + fragsize <= PAGE_SIZE);
250 LASSERT (tx->tx_niov == 0);
251 LASSERT (tx->tx_nkiov > 0);
254 if (fragsize >= ksocknal_data.ksnd_zc_min_frag &&
255 (sock->sk->route_caps & NETIF_F_SG) &&
256 (sock->sk->route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM))) {
258 CDEBUG(D_NET, "page %p + offset %x for %d\n",
259 page, offset, fragsize);
261 rc = tcp_sendpage_zccd(sock, page, offset, fragsize,
262 more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT,
267 char *addr = ((char *)kmap (page)) + offset;
268 struct iovec fragiov = {.iov_base = addr,
269 .iov_len = fragsize};
270 struct msghdr msg = {
277 .msg_flags = more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT
279 mm_segment_t oldmm = get_fs();
282 rc = sock_sendmsg(sock, &msg, fragsize);
292 kiov->kiov_offset = offset + rc;
293 kiov->kiov_len = fragsize - rc;
304 ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
308 if (ksocknal_data.ksnd_stall_tx != 0) {
309 set_current_state (TASK_UNINTERRUPTIBLE);
310 schedule_timeout (ksocknal_data.ksnd_stall_tx * HZ);
313 LASSERT (tx->tx_resid != 0);
315 rc = ksocknal_getconnsock (conn);
317 LASSERT (conn->ksnc_closing);
322 if (ksocknal_data.ksnd_enomem_tx > 0) {
324 ksocknal_data.ksnd_enomem_tx--;
326 } else if (tx->tx_niov != 0) {
327 rc = ksocknal_send_iov (conn, tx);
329 rc = ksocknal_send_kiov (conn, tx);
333 /* Didn't write anything.
335 * NB: rc == 0 and rc == -EAGAIN both mean try
336 * again later (linux stack returns -EAGAIN for
337 * this, but Adaptech TOE returns 0).
339 * Also, sends never fail with -ENOMEM, just
340 * -EAGAIN, but with the added bonus that we can't
341 * expect write_space() to call us back to tell us
342 * when to try sending again. We use the
343 * SOCK_NOSPACE flag to diagnose... */
345 LASSERT(rc != -ENOMEM);
347 if (rc == 0 || rc == -EAGAIN) {
348 if (test_bit(SOCK_NOSPACE,
349 &conn->ksnc_sock->flags)) {
355 if ((counter & (-counter)) == counter)
356 CWARN("%d ENOMEM tx %p\n",
366 /* Consider the connection alive since we managed to chuck
367 * more data into it. Really, we'd like to consider it
368 * alive only when the peer ACKs something, but
369 * write_space() only gets called back while SOCK_NOSPACE
370 * is set. Instead, we presume peer death has occurred if
371 * the socket doesn't drain within a timout */
372 conn->ksnc_tx_deadline = jiffies +
373 ksocknal_data.ksnd_io_timeout * HZ;
374 conn->ksnc_peer->ksnp_last_alive = jiffies;
376 } while (tx->tx_resid != 0);
378 ksocknal_putconnsock (conn);
383 ksocknal_eager_ack (ksock_conn_t *conn)
386 mm_segment_t oldmm = get_fs();
387 struct socket *sock = conn->ksnc_sock;
389 /* Remind the socket to ACK eagerly. If I don't, the socket might
390 * think I'm about to send something it could piggy-back the ACK
391 * on, introducing delay in completing zero-copy sends in my
395 sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK,
396 (char *)&opt, sizeof (opt));
401 ksocknal_recv_iov (ksock_conn_t *conn)
403 struct iovec *iov = conn->ksnc_rx_iov;
404 int fragsize = iov->iov_len;
405 unsigned long vaddr = (unsigned long)iov->iov_base;
406 struct iovec fragiov = { .iov_base = (void *)vaddr,
407 .iov_len = fragsize};
408 struct msghdr msg = {
417 mm_segment_t oldmm = get_fs();
420 /* NB we can't trust socket ops to either consume our iovs
421 * or leave them alone, so we only receive 1 frag at a time. */
422 LASSERT (conn->ksnc_rx_niov > 0);
423 LASSERT (fragsize <= conn->ksnc_rx_nob_wanted);
426 rc = sock_recvmsg (conn->ksnc_sock, &msg, fragsize, MSG_DONTWAIT);
427 /* NB this is just a boolean............................^ */
433 /* received something... */
434 conn->ksnc_peer->ksnp_last_alive = jiffies;
435 conn->ksnc_rx_deadline = jiffies +
436 ksocknal_data.ksnd_io_timeout * HZ;
437 mb(); /* order with setting rx_started */
438 conn->ksnc_rx_started = 1;
440 conn->ksnc_rx_nob_wanted -= rc;
441 conn->ksnc_rx_nob_left -= rc;
444 iov->iov_base = (void *)(vaddr + rc);
445 iov->iov_len = fragsize - rc;
450 conn->ksnc_rx_niov--;
455 ksocknal_recv_kiov (ksock_conn_t *conn)
457 ptl_kiov_t *kiov = conn->ksnc_rx_kiov;
458 struct page *page = kiov->kiov_page;
459 int offset = kiov->kiov_offset;
460 int fragsize = kiov->kiov_len;
461 unsigned long vaddr = ((unsigned long)kmap (page)) + offset;
462 struct iovec fragiov = { .iov_base = (void *)vaddr,
463 .iov_len = fragsize};
464 struct msghdr msg = {
473 mm_segment_t oldmm = get_fs();
476 /* NB we can't trust socket ops to either consume our iovs
477 * or leave them alone, so we only receive 1 frag at a time. */
478 LASSERT (fragsize <= conn->ksnc_rx_nob_wanted);
479 LASSERT (conn->ksnc_rx_nkiov > 0);
480 LASSERT (offset + fragsize <= PAGE_SIZE);
483 rc = sock_recvmsg (conn->ksnc_sock, &msg, fragsize, MSG_DONTWAIT);
484 /* NB this is just a boolean............................^ */
492 /* received something... */
493 conn->ksnc_peer->ksnp_last_alive = jiffies;
494 conn->ksnc_rx_deadline = jiffies +
495 ksocknal_data.ksnd_io_timeout * HZ;
496 mb(); /* order with setting rx_started */
497 conn->ksnc_rx_started = 1;
499 conn->ksnc_rx_nob_wanted -= rc;
500 conn->ksnc_rx_nob_left -= rc;
503 kiov->kiov_offset = offset + rc;
504 kiov->kiov_len = fragsize - rc;
508 conn->ksnc_rx_kiov++;
509 conn->ksnc_rx_nkiov--;
514 ksocknal_receive (ksock_conn_t *conn)
516 /* Return 1 on success, 0 on EOF, < 0 on error.
517 * Caller checks ksnc_rx_nob_wanted to determine
518 * progress/completion. */
522 if (ksocknal_data.ksnd_stall_rx != 0) {
523 set_current_state (TASK_UNINTERRUPTIBLE);
524 schedule_timeout (ksocknal_data.ksnd_stall_rx * HZ);
527 rc = ksocknal_getconnsock (conn);
529 LASSERT (conn->ksnc_closing);
534 if (conn->ksnc_rx_niov != 0)
535 rc = ksocknal_recv_iov (conn);
537 rc = ksocknal_recv_kiov (conn);
540 /* error/EOF or partial receive */
543 } else if (rc == 0 && conn->ksnc_rx_started) {
544 /* EOF in the middle of a message */
550 /* Completed a fragment */
552 if (conn->ksnc_rx_nob_wanted == 0) {
553 /* Completed a message segment (header or payload) */
554 if ((ksocknal_data.ksnd_eager_ack & conn->ksnc_type) != 0 &&
555 (conn->ksnc_rx_state == SOCKNAL_RX_BODY ||
556 conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD)) {
557 /* Remind the socket to ack eagerly... */
558 ksocknal_eager_ack(conn);
565 ksocknal_putconnsock (conn);
571 ksocknal_zc_callback (zccd_t *zcd)
573 ksock_tx_t *tx = KSOCK_ZCCD_2_TX(zcd);
574 ksock_sched_t *sched = tx->tx_conn->ksnc_scheduler;
578 /* Schedule tx for cleanup (can't do it now due to lock conflicts) */
580 spin_lock_irqsave (&sched->kss_lock, flags);
582 list_add_tail (&tx->tx_list, &sched->kss_zctxdone_list);
583 wake_up (&sched->kss_waitq);
585 spin_unlock_irqrestore (&sched->kss_lock, flags);
591 ksocknal_tx_done (ksock_tx_t *tx, int asynch)
596 if (tx->tx_conn != NULL) {
597 /* This tx got queued on a conn; do the accounting... */
598 atomic_sub (tx->tx_nob, &tx->tx_conn->ksnc_tx_nob);
600 /* zero copy completion isn't always from
601 * process_transmit() so it needs to keep a ref on
604 ksocknal_put_conn (tx->tx_conn);
610 if (tx->tx_isfwd) { /* was a forwarded packet? */
611 kpr_fwd_done (&ksocknal_data.ksnd_router,
612 KSOCK_TX_2_KPR_FWD_DESC (tx),
613 (tx->tx_resid == 0) ? 0 : -ECONNABORTED);
619 ltx = KSOCK_TX_2_KSOCK_LTX (tx);
621 lib_finalize (&ksocknal_lib, ltx->ltx_private, ltx->ltx_cookie,
622 (tx->tx_resid == 0) ? PTL_OK : PTL_FAIL);
624 ksocknal_free_ltx (ltx);
629 ksocknal_tx_launched (ksock_tx_t *tx)
632 if (atomic_read (&tx->tx_zccd.zccd_count) != 1) {
633 ksock_conn_t *conn = tx->tx_conn;
635 /* zccd skbufs are still in-flight. First take a ref on
636 * conn, so it hangs about for ksocknal_tx_done... */
637 atomic_inc (&conn->ksnc_refcount);
639 /* ...then drop the initial ref on zccd, so the zero copy
640 * callback can occur */
641 zccd_put (&tx->tx_zccd);
645 /* Any zero-copy-ness (if any) has completed; I can complete the
646 * transmit now, avoiding an extra schedule */
647 ksocknal_tx_done (tx, 0);
651 ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
656 rc = ksocknal_transmit (conn, tx);
658 CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
660 if (tx->tx_resid == 0) {
661 /* Sent everything OK */
664 ksocknal_tx_launched (tx);
672 /* Queue on ksnd_enomem_conns for retry after a timeout */
673 spin_lock_irqsave(&ksocknal_data.ksnd_reaper_lock, flags);
675 /* enomem list takes over scheduler's ref... */
676 LASSERT (conn->ksnc_tx_scheduled);
677 list_add_tail(&conn->ksnc_tx_list,
678 &ksocknal_data.ksnd_enomem_conns);
679 if (!time_after_eq(jiffies + SOCKNAL_ENOMEM_RETRY,
680 ksocknal_data.ksnd_reaper_waketime))
681 wake_up (&ksocknal_data.ksnd_reaper_waitq);
683 spin_unlock_irqrestore(&ksocknal_data.ksnd_reaper_lock, flags);
690 if (!conn->ksnc_closing)
691 CERROR("[%p] Error %d on write to "LPX64
692 " ip %d.%d.%d.%d:%d\n", conn, rc,
693 conn->ksnc_peer->ksnp_nid,
694 HIPQUAD(conn->ksnc_ipaddr),
697 ksocknal_close_conn_and_siblings (conn, rc);
698 ksocknal_tx_launched (tx);
704 ksocknal_launch_autoconnect_locked (ksock_route_t *route)
708 /* called holding write lock on ksnd_global_lock */
710 LASSERT (!route->ksnr_deleted);
711 LASSERT ((route->ksnr_connected & (1 << SOCKNAL_CONN_ANY)) == 0);
712 LASSERT ((route->ksnr_connected & KSNR_TYPED_ROUTES) != KSNR_TYPED_ROUTES);
713 LASSERT (!route->ksnr_connecting);
715 if (ksocknal_data.ksnd_typed_conns)
716 route->ksnr_connecting =
717 KSNR_TYPED_ROUTES & ~route->ksnr_connected;
719 route->ksnr_connecting = (1 << SOCKNAL_CONN_ANY);
721 atomic_inc (&route->ksnr_refcount); /* extra ref for asynchd */
723 spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
725 list_add_tail (&route->ksnr_connect_list,
726 &ksocknal_data.ksnd_autoconnectd_routes);
727 wake_up (&ksocknal_data.ksnd_autoconnectd_waitq);
729 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
733 ksocknal_find_target_peer_locked (ksock_tx_t *tx, ptl_nid_t nid)
735 char ipbuf[PTL_NALFMT_SIZE];
736 ptl_nid_t target_nid;
738 ksock_peer_t *peer = ksocknal_find_peer_locked (nid);
744 CERROR ("Can't send packet to "LPX64
745 " %s: routed target is not a peer\n",
746 nid, portals_nid2str(SOCKNAL, nid, ipbuf));
750 rc = kpr_lookup (&ksocknal_data.ksnd_router, nid, tx->tx_nob,
753 CERROR ("Can't route to "LPX64" %s: router error %d\n",
754 nid, portals_nid2str(SOCKNAL, nid, ipbuf), rc);
758 peer = ksocknal_find_peer_locked (target_nid);
762 CERROR ("Can't send packet to "LPX64" %s: no peer entry\n",
763 target_nid, portals_nid2str(SOCKNAL, target_nid, ipbuf));
768 ksocknal_find_conn_locked (ksock_tx_t *tx, ksock_peer_t *peer)
770 struct list_head *tmp;
771 ksock_conn_t *typed = NULL;
773 ksock_conn_t *fallback = NULL;
776 /* Find the conn with the shortest tx queue */
777 list_for_each (tmp, &peer->ksnp_conns) {
778 ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
779 int nob = atomic_read(&c->ksnc_tx_nob) +
780 c->ksnc_sock->sk->sk_wmem_queued;
782 LASSERT (!c->ksnc_closing);
784 if (fallback == NULL || nob < fnob) {
789 if (!ksocknal_data.ksnd_typed_conns)
792 switch (c->ksnc_type) {
795 case SOCKNAL_CONN_ANY:
797 case SOCKNAL_CONN_BULK_IN:
799 case SOCKNAL_CONN_BULK_OUT:
800 if (tx->tx_nob < ksocknal_data.ksnd_min_bulk)
803 case SOCKNAL_CONN_CONTROL:
804 if (tx->tx_nob >= ksocknal_data.ksnd_min_bulk)
809 if (typed == NULL || nob < tnob) {
815 /* prefer the typed selection */
816 return ((typed != NULL) ? typed : fallback);
820 ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
823 ksock_sched_t *sched = conn->ksnc_scheduler;
825 /* called holding global lock (read or irq-write) and caller may
826 * not have dropped this lock between finding conn and calling me,
827 * so we don't need the {get,put}connsock dance to deref
829 LASSERT(!conn->ksnc_closing);
830 LASSERT(tx->tx_resid == tx->tx_nob);
832 CDEBUG (D_NET, "Sending to "LPX64" ip %d.%d.%d.%d:%d\n",
833 conn->ksnc_peer->ksnp_nid,
834 HIPQUAD(conn->ksnc_ipaddr),
837 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
841 zccd_init (&tx->tx_zccd, ksocknal_zc_callback);
842 /* NB this sets 1 ref on zccd, so the callback can only occur after
843 * I've released this ref. */
845 spin_lock_irqsave (&sched->kss_lock, flags);
847 conn->ksnc_tx_deadline = jiffies +
848 ksocknal_data.ksnd_io_timeout * HZ;
849 mb(); /* order with list_add_tail */
851 list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
853 if (conn->ksnc_tx_ready && /* able to send */
854 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
855 /* +1 ref for scheduler */
856 atomic_inc (&conn->ksnc_refcount);
857 list_add_tail (&conn->ksnc_tx_list,
858 &sched->kss_tx_conns);
859 conn->ksnc_tx_scheduled = 1;
860 wake_up (&sched->kss_waitq);
863 spin_unlock_irqrestore (&sched->kss_lock, flags);
867 ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
869 struct list_head *tmp;
870 ksock_route_t *route;
871 ksock_route_t *candidate = NULL;
875 list_for_each (tmp, &peer->ksnp_routes) {
876 route = list_entry (tmp, ksock_route_t, ksnr_list);
877 bits = route->ksnr_connected;
879 if ((bits & KSNR_TYPED_ROUTES) == KSNR_TYPED_ROUTES ||
880 (bits & (1 << SOCKNAL_CONN_ANY)) != 0 ||
881 route->ksnr_connecting != 0) {
882 /* All typed connections have been established, or
883 * an untyped connection has been established, or
884 * connections are currently being established */
889 /* too soon to retry this guy? */
890 if (!time_after_eq (jiffies, route->ksnr_timeout))
893 /* always do eager routes */
894 if (route->ksnr_eager)
897 if (candidate == NULL) {
898 /* If we don't find any other route that is fully
899 * connected or connecting, the first connectable
900 * route is returned. If it fails to connect, it
901 * will get placed at the end of the list */
906 return (found ? NULL : candidate);
910 ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
912 struct list_head *tmp;
913 ksock_route_t *route;
915 list_for_each (tmp, &peer->ksnp_routes) {
916 route = list_entry (tmp, ksock_route_t, ksnr_list);
918 if (route->ksnr_connecting != 0)
926 ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid)
931 ksock_route_t *route;
934 /* Ensure the frags we've been given EXACTLY match the number of
935 * bytes we want to send. Many TCP/IP stacks disregard any total
936 * size parameters passed to them and just look at the frags.
938 * We always expect at least 1 mapped fragment containing the
939 * complete portals header. */
940 LASSERT (lib_iov_nob (tx->tx_niov, tx->tx_iov) +
941 lib_kiov_nob (tx->tx_nkiov, tx->tx_kiov) == tx->tx_nob);
942 LASSERT (tx->tx_niov >= 1);
943 LASSERT (tx->tx_iov[0].iov_len >= sizeof (ptl_hdr_t));
945 CDEBUG (D_NET, "packet %p type %d, nob %d niov %d nkiov %d\n",
946 tx, ((ptl_hdr_t *)tx->tx_iov[0].iov_base)->type,
947 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
949 tx->tx_conn = NULL; /* only set when assigned a conn */
950 tx->tx_resid = tx->tx_nob;
951 tx->tx_hdr = (ptl_hdr_t *)tx->tx_iov[0].iov_base;
953 g_lock = &ksocknal_data.ksnd_global_lock;
956 peer = ksocknal_find_target_peer_locked (tx, nid);
958 read_unlock (g_lock);
959 return (-EHOSTUNREACH);
962 if (ksocknal_find_connectable_route_locked(peer) == NULL) {
963 conn = ksocknal_find_conn_locked (tx, peer);
965 /* I've got no autoconnect routes that need to be
966 * connecting and I do have an actual connection... */
967 ksocknal_queue_tx_locked (tx, conn);
968 read_unlock (g_lock);
973 /* Making one or more connections; I'll need a write lock... */
975 atomic_inc (&peer->ksnp_refcount); /* +1 ref for me while I unlock */
976 read_unlock (g_lock);
977 write_lock_irqsave (g_lock, flags);
979 if (peer->ksnp_closing) { /* peer deleted as I blocked! */
980 write_unlock_irqrestore (g_lock, flags);
981 ksocknal_put_peer (peer);
982 return (-EHOSTUNREACH);
984 ksocknal_put_peer (peer); /* drop ref I got above */
987 /* launch any/all autoconnections that need it */
988 route = ksocknal_find_connectable_route_locked (peer);
992 ksocknal_launch_autoconnect_locked (route);
995 conn = ksocknal_find_conn_locked (tx, peer);
997 /* Connection exists; queue message on it */
998 ksocknal_queue_tx_locked (tx, conn);
999 write_unlock_irqrestore (g_lock, flags);
1003 route = ksocknal_find_connecting_route_locked (peer);
1004 if (route != NULL) {
1005 /* At least 1 connection is being established; queue the
1007 list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
1008 write_unlock_irqrestore (g_lock, flags);
1012 write_unlock_irqrestore (g_lock, flags);
1013 return (-EHOSTUNREACH);
1017 ksocknal_sendmsg(nal_cb_t *nal,
1024 unsigned int payload_niov,
1025 struct iovec *payload_iov,
1026 ptl_kiov_t *payload_kiov,
1027 size_t payload_offset,
1034 /* NB 'private' is different depending on what we're sending.
1035 * Just ignore it... */
1037 CDEBUG(D_NET, "sending "LPSZ" bytes in %d frags to nid:"LPX64
1038 " pid %d\n", payload_nob, payload_niov, nid , pid);
1040 LASSERT (payload_nob == 0 || payload_niov > 0);
1041 LASSERT (payload_niov <= PTL_MD_MAX_IOV);
1043 /* It must be OK to kmap() if required */
1044 LASSERT (payload_kiov == NULL || !in_interrupt ());
1045 /* payload is either all vaddrs or all pages */
1046 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1048 if (payload_iov != NULL)
1049 desc_size = offsetof(ksock_ltx_t, ltx_iov[1 + payload_niov]);
1051 desc_size = offsetof(ksock_ltx_t, ltx_kiov[payload_niov]);
1053 if (in_interrupt() ||
1054 type == PTL_MSG_ACK ||
1055 type == PTL_MSG_REPLY) {
1056 /* Can't block if in interrupt or responding to an incoming
1058 PORTAL_ALLOC_ATOMIC(ltx, desc_size);
1060 PORTAL_ALLOC(ltx, desc_size);
1064 CERROR("Can't allocate tx desc type %d size %d %s\n",
1065 type, desc_size, in_interrupt() ? "(intr)" : "");
1066 return (PTL_NOSPACE);
1069 atomic_inc(&ksocknal_data.ksnd_nactive_ltxs);
1071 ltx->ltx_desc_size = desc_size;
1073 /* We always have 1 mapped frag for the header */
1074 ltx->ltx_tx.tx_iov = ltx->ltx_iov;
1075 ltx->ltx_iov[0].iov_base = <x->ltx_hdr;
1076 ltx->ltx_iov[0].iov_len = sizeof(*hdr);
1077 ltx->ltx_hdr = *hdr;
1079 ltx->ltx_private = private;
1080 ltx->ltx_cookie = cookie;
1082 ltx->ltx_tx.tx_isfwd = 0;
1083 ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_nob;
1085 if (payload_iov != NULL) {
1086 /* payload is all mapped */
1087 ltx->ltx_tx.tx_kiov = NULL;
1088 ltx->ltx_tx.tx_nkiov = 0;
1090 ltx->ltx_tx.tx_niov =
1091 1 + lib_extract_iov(payload_niov, <x->ltx_iov[1],
1092 payload_niov, payload_iov,
1093 payload_offset, payload_nob);
1095 /* payload is all pages */
1096 ltx->ltx_tx.tx_niov = 1;
1098 ltx->ltx_tx.tx_kiov = ltx->ltx_kiov;
1099 ltx->ltx_tx.tx_nkiov =
1100 lib_extract_kiov(payload_niov, ltx->ltx_kiov,
1101 payload_niov, payload_kiov,
1102 payload_offset, payload_nob);
1105 rc = ksocknal_launch_packet(<x->ltx_tx, nid);
1109 ksocknal_free_ltx(ltx);
1114 ksocknal_send (nal_cb_t *nal, void *private, lib_msg_t *cookie,
1115 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
1116 unsigned int payload_niov, struct iovec *payload_iov,
1117 size_t payload_offset, size_t payload_len)
1119 return (ksocknal_sendmsg(nal, private, cookie,
1120 hdr, type, nid, pid,
1121 payload_niov, payload_iov, NULL,
1122 payload_offset, payload_len));
1126 ksocknal_send_pages (nal_cb_t *nal, void *private, lib_msg_t *cookie,
1127 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
1128 unsigned int payload_niov, ptl_kiov_t *payload_kiov,
1129 size_t payload_offset, size_t payload_len)
1131 return (ksocknal_sendmsg(nal, private, cookie,
1132 hdr, type, nid, pid,
1133 payload_niov, NULL, payload_kiov,
1134 payload_offset, payload_len));
1138 ksocknal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd)
1140 ptl_nid_t nid = fwd->kprfd_gateway_nid;
1141 ksock_ftx_t *ftx = (ksock_ftx_t *)&fwd->kprfd_scratch;
1144 CDEBUG (D_NET, "Forwarding [%p] -> "LPX64" ("LPX64"))\n", fwd,
1145 fwd->kprfd_gateway_nid, fwd->kprfd_target_nid);
1147 /* I'm the gateway; must be the last hop */
1148 if (nid == ksocknal_lib.ni.nid)
1149 nid = fwd->kprfd_target_nid;
1151 /* setup iov for hdr */
1152 ftx->ftx_iov.iov_base = fwd->kprfd_hdr;
1153 ftx->ftx_iov.iov_len = sizeof(ptl_hdr_t);
1155 ftx->ftx_tx.tx_isfwd = 1; /* This is a forwarding packet */
1156 ftx->ftx_tx.tx_nob = sizeof(ptl_hdr_t) + fwd->kprfd_nob;
1157 ftx->ftx_tx.tx_niov = 1;
1158 ftx->ftx_tx.tx_iov = &ftx->ftx_iov;
1159 ftx->ftx_tx.tx_nkiov = fwd->kprfd_niov;
1160 ftx->ftx_tx.tx_kiov = fwd->kprfd_kiov;
1162 rc = ksocknal_launch_packet (&ftx->ftx_tx, nid);
1164 kpr_fwd_done (&ksocknal_data.ksnd_router, fwd, rc);
1168 ksocknal_thread_start (int (*fn)(void *arg), void *arg)
1170 long pid = kernel_thread (fn, arg, 0);
1175 atomic_inc (&ksocknal_data.ksnd_nthreads);
1180 ksocknal_thread_fini (void)
1182 atomic_dec (&ksocknal_data.ksnd_nthreads);
1186 ksocknal_fmb_callback (void *arg, int error)
1188 ksock_fmb_t *fmb = (ksock_fmb_t *)arg;
1189 ksock_fmb_pool_t *fmp = fmb->fmb_pool;
1190 ptl_hdr_t *hdr = &fmb->fmb_hdr;
1191 ksock_conn_t *conn = NULL;
1192 ksock_sched_t *sched;
1193 unsigned long flags;
1194 char ipbuf[PTL_NALFMT_SIZE];
1195 char ipbuf2[PTL_NALFMT_SIZE];
1198 CERROR("Failed to route packet from "
1199 LPX64" %s to "LPX64" %s: %d\n",
1200 NTOH__u64(hdr->src_nid),
1201 portals_nid2str(SOCKNAL, NTOH__u64(hdr->src_nid), ipbuf),
1202 NTOH__u64(hdr->dest_nid),
1203 portals_nid2str(SOCKNAL, NTOH__u64(hdr->dest_nid), ipbuf2),
1206 CDEBUG (D_NET, "routed packet from "LPX64" to "LPX64": OK\n",
1207 NTOH__u64 (hdr->src_nid), NTOH__u64 (hdr->dest_nid));
1209 /* drop peer ref taken on init */
1210 ksocknal_put_peer (fmb->fmb_peer);
1212 spin_lock_irqsave (&fmp->fmp_lock, flags);
1214 list_add (&fmb->fmb_list, &fmp->fmp_idle_fmbs);
1215 fmp->fmp_nactive_fmbs--;
1217 if (!list_empty (&fmp->fmp_blocked_conns)) {
1218 conn = list_entry (fmb->fmb_pool->fmp_blocked_conns.next,
1219 ksock_conn_t, ksnc_rx_list);
1220 list_del (&conn->ksnc_rx_list);
1223 spin_unlock_irqrestore (&fmp->fmp_lock, flags);
1228 CDEBUG (D_NET, "Scheduling conn %p\n", conn);
1229 LASSERT (conn->ksnc_rx_scheduled);
1230 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_FMB_SLEEP);
1232 conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB;
1234 sched = conn->ksnc_scheduler;
1236 spin_lock_irqsave (&sched->kss_lock, flags);
1238 list_add_tail (&conn->ksnc_rx_list, &sched->kss_rx_conns);
1239 wake_up (&sched->kss_waitq);
1241 spin_unlock_irqrestore (&sched->kss_lock, flags);
1245 ksocknal_get_idle_fmb (ksock_conn_t *conn)
1247 int payload_nob = conn->ksnc_rx_nob_left;
1248 unsigned long flags;
1249 ksock_fmb_pool_t *pool;
1252 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB);
1253 LASSERT (kpr_routing(&ksocknal_data.ksnd_router));
1255 if (payload_nob <= SOCKNAL_SMALL_FWD_PAGES * PAGE_SIZE)
1256 pool = &ksocknal_data.ksnd_small_fmp;
1258 pool = &ksocknal_data.ksnd_large_fmp;
1260 spin_lock_irqsave (&pool->fmp_lock, flags);
1262 if (!list_empty (&pool->fmp_idle_fmbs)) {
1263 fmb = list_entry(pool->fmp_idle_fmbs.next,
1264 ksock_fmb_t, fmb_list);
1265 list_del (&fmb->fmb_list);
1266 pool->fmp_nactive_fmbs++;
1267 spin_unlock_irqrestore (&pool->fmp_lock, flags);
1272 /* deschedule until fmb free */
1274 conn->ksnc_rx_state = SOCKNAL_RX_FMB_SLEEP;
1276 list_add_tail (&conn->ksnc_rx_list,
1277 &pool->fmp_blocked_conns);
1279 spin_unlock_irqrestore (&pool->fmp_lock, flags);
1284 ksocknal_init_fmb (ksock_conn_t *conn, ksock_fmb_t *fmb)
1286 int payload_nob = conn->ksnc_rx_nob_left;
1287 ptl_nid_t dest_nid = NTOH__u64 (conn->ksnc_hdr.dest_nid);
1289 int nob = payload_nob;
1291 LASSERT (conn->ksnc_rx_scheduled);
1292 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB);
1293 LASSERT (conn->ksnc_rx_nob_wanted == conn->ksnc_rx_nob_left);
1294 LASSERT (payload_nob >= 0);
1295 LASSERT (payload_nob <= fmb->fmb_pool->fmp_buff_pages * PAGE_SIZE);
1296 LASSERT (sizeof (ptl_hdr_t) < PAGE_SIZE);
1297 LASSERT (fmb->fmb_kiov[0].kiov_offset == 0);
1299 /* Take a ref on the conn's peer to prevent module unload before
1300 * forwarding completes. */
1301 fmb->fmb_peer = conn->ksnc_peer;
1302 atomic_inc (&conn->ksnc_peer->ksnp_refcount);
1304 /* Copy the header we just read into the forwarding buffer. If
1305 * there's payload, start reading reading it into the buffer,
1306 * otherwise the forwarding buffer can be kicked off
1308 fmb->fmb_hdr = conn->ksnc_hdr;
1311 LASSERT (niov < fmb->fmb_pool->fmp_buff_pages);
1312 LASSERT (fmb->fmb_kiov[niov].kiov_offset == 0);
1313 fmb->fmb_kiov[niov].kiov_len = MIN (PAGE_SIZE, nob);
1318 kpr_fwd_init(&fmb->fmb_fwd, dest_nid, &fmb->fmb_hdr,
1319 payload_nob, niov, fmb->fmb_kiov,
1320 ksocknal_fmb_callback, fmb);
1322 if (payload_nob == 0) { /* got complete packet already */
1323 CDEBUG (D_NET, "%p "LPX64"->"LPX64" fwd_start (immediate)\n",
1324 conn, NTOH__u64 (conn->ksnc_hdr.src_nid), dest_nid);
1326 kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd);
1328 ksocknal_new_packet (conn, 0); /* on to next packet */
1332 conn->ksnc_cookie = fmb; /* stash fmb for later */
1333 conn->ksnc_rx_state = SOCKNAL_RX_BODY_FWD; /* read in the payload */
1335 /* Set up conn->ksnc_rx_kiov to read the payload into fmb's kiov-ed
1337 LASSERT (niov <= sizeof(conn->ksnc_rx_iov_space)/sizeof(ptl_kiov_t));
1339 conn->ksnc_rx_niov = 0;
1340 conn->ksnc_rx_nkiov = niov;
1341 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1342 memcpy(conn->ksnc_rx_kiov, fmb->fmb_kiov, niov * sizeof(ptl_kiov_t));
1344 CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d reading body\n", conn,
1345 NTOH__u64 (conn->ksnc_hdr.src_nid), dest_nid, payload_nob);
1350 ksocknal_fwd_parse (ksock_conn_t *conn)
1353 ptl_nid_t dest_nid = NTOH__u64 (conn->ksnc_hdr.dest_nid);
1354 ptl_nid_t src_nid = NTOH__u64 (conn->ksnc_hdr.src_nid);
1355 int body_len = NTOH__u32 (conn->ksnc_hdr.payload_length);
1356 char str[PTL_NALFMT_SIZE];
1357 char str2[PTL_NALFMT_SIZE];
1359 CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d parsing header\n", conn,
1360 src_nid, dest_nid, conn->ksnc_rx_nob_left);
1362 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER);
1363 LASSERT (conn->ksnc_rx_scheduled);
1365 if (body_len < 0) { /* length corrupt (overflow) */
1366 CERROR("dropping packet from "LPX64" (%s) for "LPX64" (%s): "
1367 "packet size %d illegal\n",
1368 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1369 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2),
1372 ksocknal_new_packet (conn, 0); /* on to new packet */
1376 if (!kpr_routing(&ksocknal_data.ksnd_router)) { /* not forwarding */
1377 CERROR("dropping packet from "LPX64" (%s) for "LPX64
1378 " (%s): not forwarding\n",
1379 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1380 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2));
1381 /* on to new packet (skip this one's body) */
1382 ksocknal_new_packet (conn, body_len);
1386 if (body_len > PTL_MTU) { /* too big to forward */
1387 CERROR ("dropping packet from "LPX64" (%s) for "LPX64
1388 "(%s): packet size %d too big\n",
1389 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1390 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2),
1392 /* on to new packet (skip this one's body) */
1393 ksocknal_new_packet (conn, body_len);
1397 /* should have gone direct */
1398 peer = ksocknal_get_peer (conn->ksnc_hdr.dest_nid);
1400 CERROR ("dropping packet from "LPX64" (%s) for "LPX64
1401 "(%s): target is a peer\n",
1402 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1403 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2));
1404 ksocknal_put_peer (peer); /* drop ref from get above */
1406 /* on to next packet (skip this one's body) */
1407 ksocknal_new_packet (conn, body_len);
1411 conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB; /* Getting FMB now */
1412 conn->ksnc_rx_nob_left = body_len; /* stash packet size */
1413 conn->ksnc_rx_nob_wanted = body_len; /* (no slop) */
1417 ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
1419 static char ksocknal_slop_buffer[4096];
1425 if (nob_to_skip == 0) { /* right at next packet boundary now */
1426 conn->ksnc_rx_started = 0;
1427 mb (); /* racing with timeout thread */
1429 conn->ksnc_rx_state = SOCKNAL_RX_HEADER;
1430 conn->ksnc_rx_nob_wanted = sizeof (ptl_hdr_t);
1431 conn->ksnc_rx_nob_left = sizeof (ptl_hdr_t);
1433 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1434 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_hdr;
1435 conn->ksnc_rx_iov[0].iov_len = sizeof (ptl_hdr_t);
1436 conn->ksnc_rx_niov = 1;
1438 conn->ksnc_rx_kiov = NULL;
1439 conn->ksnc_rx_nkiov = 0;
1443 /* Set up to skip as much a possible now. If there's more left
1444 * (ran out of iov entries) we'll get called again */
1446 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1447 conn->ksnc_rx_nob_left = nob_to_skip;
1448 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1453 nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
1455 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1456 conn->ksnc_rx_iov[niov].iov_len = nob;
1461 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1462 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
1464 conn->ksnc_rx_niov = niov;
1465 conn->ksnc_rx_kiov = NULL;
1466 conn->ksnc_rx_nkiov = 0;
1467 conn->ksnc_rx_nob_wanted = skipped;
1472 ksocknal_process_receive (ksock_conn_t *conn)
1477 LASSERT (atomic_read (&conn->ksnc_refcount) > 0);
1479 /* doesn't need a forwarding buffer */
1480 if (conn->ksnc_rx_state != SOCKNAL_RX_GET_FMB)
1484 fmb = ksocknal_get_idle_fmb (conn);
1486 /* conn descheduled waiting for idle fmb */
1490 if (ksocknal_init_fmb (conn, fmb)) {
1491 /* packet forwarded */
1496 /* NB: sched lock NOT held */
1497 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER ||
1498 conn->ksnc_rx_state == SOCKNAL_RX_BODY ||
1499 conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD ||
1500 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1502 LASSERT (conn->ksnc_rx_nob_wanted > 0);
1504 rc = ksocknal_receive(conn);
1507 LASSERT (rc != -EAGAIN);
1510 CWARN ("[%p] EOF from "LPX64" ip %d.%d.%d.%d:%d\n",
1511 conn, conn->ksnc_peer->ksnp_nid,
1512 HIPQUAD(conn->ksnc_ipaddr),
1514 else if (!conn->ksnc_closing)
1515 CERROR ("[%p] Error %d on read from "LPX64
1516 " ip %d.%d.%d.%d:%d\n",
1517 conn, rc, conn->ksnc_peer->ksnp_nid,
1518 HIPQUAD(conn->ksnc_ipaddr),
1521 ksocknal_close_conn_and_siblings (conn, rc);
1522 return (rc == 0 ? -ESHUTDOWN : rc);
1525 if (conn->ksnc_rx_nob_wanted != 0) {
1530 switch (conn->ksnc_rx_state) {
1531 case SOCKNAL_RX_HEADER:
1532 if (conn->ksnc_hdr.type != HTON__u32(PTL_MSG_HELLO) &&
1533 NTOH__u64(conn->ksnc_hdr.dest_nid) != ksocknal_lib.ni.nid) {
1534 /* This packet isn't for me */
1535 ksocknal_fwd_parse (conn);
1536 switch (conn->ksnc_rx_state) {
1537 case SOCKNAL_RX_HEADER: /* skipped (zero payload) */
1538 return (0); /* => come back later */
1539 case SOCKNAL_RX_SLOP: /* skipping packet's body */
1540 goto try_read; /* => go read it */
1541 case SOCKNAL_RX_GET_FMB: /* forwarding */
1542 goto get_fmb; /* => go get a fwd msg buffer */
1549 /* sets wanted_len, iovs etc */
1550 lib_parse(&ksocknal_lib, &conn->ksnc_hdr, conn);
1552 if (conn->ksnc_rx_nob_wanted != 0) { /* need to get payload? */
1553 conn->ksnc_rx_state = SOCKNAL_RX_BODY;
1554 goto try_read; /* go read the payload */
1556 /* Fall through (completed packet for me) */
1558 case SOCKNAL_RX_BODY:
1559 /* payload all received */
1560 lib_finalize(&ksocknal_lib, NULL, conn->ksnc_cookie, PTL_OK);
1563 case SOCKNAL_RX_SLOP:
1564 /* starting new packet? */
1565 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1566 return (0); /* come back later */
1567 goto try_read; /* try to finish reading slop now */
1569 case SOCKNAL_RX_BODY_FWD:
1570 /* payload all received */
1571 CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d fwd_start (got body)\n",
1572 conn, NTOH__u64 (conn->ksnc_hdr.src_nid),
1573 NTOH__u64 (conn->ksnc_hdr.dest_nid),
1574 conn->ksnc_rx_nob_left);
1576 /* forward the packet. NB ksocknal_init_fmb() put fmb into
1577 * conn->ksnc_cookie */
1578 fmb = (ksock_fmb_t *)conn->ksnc_cookie;
1579 kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd);
1581 /* no slop in forwarded packets */
1582 LASSERT (conn->ksnc_rx_nob_left == 0);
1584 ksocknal_new_packet (conn, 0); /* on to next packet */
1585 return (0); /* (later) */
1593 return (-EINVAL); /* keep gcc happy */
1597 ksocknal_recv (nal_cb_t *nal, void *private, lib_msg_t *msg,
1598 unsigned int niov, struct iovec *iov,
1599 size_t offset, size_t mlen, size_t rlen)
1601 ksock_conn_t *conn = (ksock_conn_t *)private;
1603 LASSERT (mlen <= rlen);
1604 LASSERT (niov <= PTL_MD_MAX_IOV);
1606 conn->ksnc_cookie = msg;
1607 conn->ksnc_rx_nob_wanted = mlen;
1608 conn->ksnc_rx_nob_left = rlen;
1610 conn->ksnc_rx_nkiov = 0;
1611 conn->ksnc_rx_kiov = NULL;
1612 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1613 conn->ksnc_rx_niov =
1614 lib_extract_iov(PTL_MD_MAX_IOV, conn->ksnc_rx_iov,
1615 niov, iov, offset, mlen);
1618 lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1619 lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1625 ksocknal_recv_pages (nal_cb_t *nal, void *private, lib_msg_t *msg,
1626 unsigned int niov, ptl_kiov_t *kiov,
1627 size_t offset, size_t mlen, size_t rlen)
1629 ksock_conn_t *conn = (ksock_conn_t *)private;
1631 LASSERT (mlen <= rlen);
1632 LASSERT (niov <= PTL_MD_MAX_IOV);
1634 conn->ksnc_cookie = msg;
1635 conn->ksnc_rx_nob_wanted = mlen;
1636 conn->ksnc_rx_nob_left = rlen;
1638 conn->ksnc_rx_niov = 0;
1639 conn->ksnc_rx_iov = NULL;
1640 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1641 conn->ksnc_rx_nkiov =
1642 lib_extract_kiov(PTL_MD_MAX_IOV, conn->ksnc_rx_kiov,
1643 niov, kiov, offset, mlen);
1646 lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1647 lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1652 int ksocknal_scheduler (void *arg)
1654 ksock_sched_t *sched = (ksock_sched_t *)arg;
1657 unsigned long flags;
1660 int id = sched - ksocknal_data.ksnd_schedulers;
1663 snprintf (name, sizeof (name),"ksocknald_%02d", id);
1664 kportal_daemonize (name);
1665 kportal_blockallsigs ();
1667 #if (CONFIG_SMP && CPU_AFFINITY)
1668 if (cpu_online(id)) {
1671 set_cpus_allowed(current, m);
1673 CERROR ("Can't set CPU affinity for %s\n", name);
1675 #endif /* CONFIG_SMP && CPU_AFFINITY */
1677 spin_lock_irqsave (&sched->kss_lock, flags);
1679 while (!ksocknal_data.ksnd_shuttingdown) {
1680 int did_something = 0;
1682 /* Ensure I progress everything semi-fairly */
1684 if (!list_empty (&sched->kss_rx_conns)) {
1685 conn = list_entry(sched->kss_rx_conns.next,
1686 ksock_conn_t, ksnc_rx_list);
1687 list_del(&conn->ksnc_rx_list);
1689 LASSERT(conn->ksnc_rx_scheduled);
1690 LASSERT(conn->ksnc_rx_ready);
1692 /* clear rx_ready in case receive isn't complete.
1693 * Do it BEFORE we call process_recv, since
1694 * data_ready can set it any time after we release
1696 conn->ksnc_rx_ready = 0;
1697 spin_unlock_irqrestore(&sched->kss_lock, flags);
1699 rc = ksocknal_process_receive(conn);
1701 spin_lock_irqsave(&sched->kss_lock, flags);
1703 /* I'm the only one that can clear this flag */
1704 LASSERT(conn->ksnc_rx_scheduled);
1706 /* Did process_receive get everything it wanted? */
1708 conn->ksnc_rx_ready = 1;
1710 if (conn->ksnc_rx_state == SOCKNAL_RX_FMB_SLEEP ||
1711 conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB) {
1712 /* Conn blocked for a forwarding buffer.
1713 * It will get queued for my attention when
1714 * one becomes available (and it might just
1715 * already have been!). Meanwhile my ref
1716 * on it stays put. */
1717 } else if (conn->ksnc_rx_ready) {
1718 /* reschedule for rx */
1719 list_add_tail (&conn->ksnc_rx_list,
1720 &sched->kss_rx_conns);
1722 conn->ksnc_rx_scheduled = 0;
1724 ksocknal_put_conn(conn);
1730 if (!list_empty (&sched->kss_tx_conns)) {
1731 conn = list_entry(sched->kss_tx_conns.next,
1732 ksock_conn_t, ksnc_tx_list);
1733 list_del (&conn->ksnc_tx_list);
1735 LASSERT(conn->ksnc_tx_scheduled);
1736 LASSERT(conn->ksnc_tx_ready);
1737 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1739 tx = list_entry(conn->ksnc_tx_queue.next,
1740 ksock_tx_t, tx_list);
1741 /* dequeue now so empty list => more to send */
1742 list_del(&tx->tx_list);
1744 /* Clear tx_ready in case send isn't complete. Do
1745 * it BEFORE we call process_transmit, since
1746 * write_space can set it any time after we release
1748 conn->ksnc_tx_ready = 0;
1749 spin_unlock_irqrestore (&sched->kss_lock, flags);
1751 rc = ksocknal_process_transmit(conn, tx);
1753 spin_lock_irqsave (&sched->kss_lock, flags);
1755 if (rc == -ENOMEM || rc == -EAGAIN) {
1756 /* Incomplete send: replace tx on HEAD of tx_queue */
1757 list_add (&tx->tx_list, &conn->ksnc_tx_queue);
1759 /* Complete send; assume space for more */
1760 conn->ksnc_tx_ready = 1;
1763 if (rc == -ENOMEM) {
1764 /* Do nothing; after a short timeout, this
1765 * conn will be reposted on kss_tx_conns. */
1766 } else if (conn->ksnc_tx_ready &&
1767 !list_empty (&conn->ksnc_tx_queue)) {
1768 /* reschedule for tx */
1769 list_add_tail (&conn->ksnc_tx_list,
1770 &sched->kss_tx_conns);
1772 conn->ksnc_tx_scheduled = 0;
1774 ksocknal_put_conn (conn);
1780 if (!list_empty (&sched->kss_zctxdone_list)) {
1782 list_entry(sched->kss_zctxdone_list.next,
1783 ksock_tx_t, tx_list);
1786 list_del (&tx->tx_list);
1787 spin_unlock_irqrestore (&sched->kss_lock, flags);
1789 ksocknal_tx_done (tx, 1);
1791 spin_lock_irqsave (&sched->kss_lock, flags);
1794 if (!did_something || /* nothing to do */
1795 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1796 spin_unlock_irqrestore (&sched->kss_lock, flags);
1800 if (!did_something) { /* wait for something to do */
1802 rc = wait_event_interruptible (sched->kss_waitq,
1803 ksocknal_data.ksnd_shuttingdown ||
1804 !list_empty(&sched->kss_rx_conns) ||
1805 !list_empty(&sched->kss_tx_conns) ||
1806 !list_empty(&sched->kss_zctxdone_list));
1808 rc = wait_event_interruptible (sched->kss_waitq,
1809 ksocknal_data.ksnd_shuttingdown ||
1810 !list_empty(&sched->kss_rx_conns) ||
1811 !list_empty(&sched->kss_tx_conns));
1817 spin_lock_irqsave (&sched->kss_lock, flags);
1821 spin_unlock_irqrestore (&sched->kss_lock, flags);
1822 ksocknal_thread_fini ();
1827 ksocknal_data_ready (struct sock *sk, int n)
1829 unsigned long flags;
1831 ksock_sched_t *sched;
1834 /* interleave correctly with closing sockets... */
1835 read_lock (&ksocknal_data.ksnd_global_lock);
1837 conn = sk->sk_user_data;
1838 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
1839 LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
1840 sk->sk_data_ready (sk, n);
1842 sched = conn->ksnc_scheduler;
1844 spin_lock_irqsave (&sched->kss_lock, flags);
1846 conn->ksnc_rx_ready = 1;
1848 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1849 list_add_tail(&conn->ksnc_rx_list,
1850 &sched->kss_rx_conns);
1851 conn->ksnc_rx_scheduled = 1;
1852 /* extra ref for scheduler */
1853 atomic_inc (&conn->ksnc_refcount);
1855 wake_up (&sched->kss_waitq);
1858 spin_unlock_irqrestore (&sched->kss_lock, flags);
1861 read_unlock (&ksocknal_data.ksnd_global_lock);
1867 ksocknal_write_space (struct sock *sk)
1869 unsigned long flags;
1871 ksock_sched_t *sched;
1873 /* interleave correctly with closing sockets... */
1874 read_lock (&ksocknal_data.ksnd_global_lock);
1876 conn = sk->sk_user_data;
1878 CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
1879 sk, tcp_wspace(sk), SOCKNAL_TX_LOW_WATER(sk), conn,
1880 (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
1881 " ready" : " blocked"),
1882 (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
1883 " scheduled" : " idle"),
1884 (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
1885 " empty" : " queued"));
1887 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
1888 LASSERT (sk->sk_write_space != &ksocknal_write_space);
1889 sk->sk_write_space (sk);
1891 read_unlock (&ksocknal_data.ksnd_global_lock);
1895 if (tcp_wspace(sk) >= SOCKNAL_TX_LOW_WATER(sk)) { /* got enough space */
1896 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
1898 sched = conn->ksnc_scheduler;
1900 spin_lock_irqsave (&sched->kss_lock, flags);
1902 conn->ksnc_tx_ready = 1;
1904 if (!conn->ksnc_tx_scheduled && // not being progressed
1905 !list_empty(&conn->ksnc_tx_queue)){//packets to send
1906 list_add_tail (&conn->ksnc_tx_list,
1907 &sched->kss_tx_conns);
1908 conn->ksnc_tx_scheduled = 1;
1909 /* extra ref for scheduler */
1910 atomic_inc (&conn->ksnc_refcount);
1912 wake_up (&sched->kss_waitq);
1915 spin_unlock_irqrestore (&sched->kss_lock, flags);
1918 read_unlock (&ksocknal_data.ksnd_global_lock);
1922 ksocknal_sock_write (struct socket *sock, void *buffer, int nob)
1925 mm_segment_t oldmm = get_fs();
1928 struct iovec iov = {
1932 struct msghdr msg = {
1937 .msg_control = NULL,
1938 .msg_controllen = 0,
1943 rc = sock_sendmsg (sock, &msg, iov.iov_len);
1950 CERROR ("Unexpected zero rc\n");
1951 return (-ECONNABORTED);
1954 buffer = ((char *)buffer) + rc;
1962 ksocknal_sock_read (struct socket *sock, void *buffer, int nob)
1965 mm_segment_t oldmm = get_fs();
1968 struct iovec iov = {
1972 struct msghdr msg = {
1977 .msg_control = NULL,
1978 .msg_controllen = 0,
1983 rc = sock_recvmsg (sock, &msg, iov.iov_len, 0);
1990 return (-ECONNABORTED);
1992 buffer = ((char *)buffer) + rc;
2000 ksocknal_hello (struct socket *sock, ptl_nid_t *nid, int *type,
2005 ptl_magicversion_t *hmv = (ptl_magicversion_t *)&hdr.dest_nid;
2006 char ipbuf[PTL_NALFMT_SIZE];
2007 char ipbuf2[PTL_NALFMT_SIZE];
2009 LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid));
2011 memset (&hdr, 0, sizeof (hdr));
2012 hmv->magic = __cpu_to_le32 (PORTALS_PROTO_MAGIC);
2013 hmv->version_major = __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR);
2014 hmv->version_minor = __cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR);
2016 hdr.src_nid = __cpu_to_le64 (ksocknal_lib.ni.nid);
2017 hdr.type = __cpu_to_le32 (PTL_MSG_HELLO);
2019 hdr.msg.hello.type = __cpu_to_le32 (*type);
2020 hdr.msg.hello.incarnation =
2021 __cpu_to_le64 (ksocknal_data.ksnd_incarnation);
2023 /* Assume sufficient socket buffering for this message */
2024 rc = ksocknal_sock_write (sock, &hdr, sizeof (hdr));
2026 CERROR ("Error %d sending HELLO to "LPX64" %s\n",
2027 rc, *nid, portals_nid2str(SOCKNAL, *nid, ipbuf));
2031 rc = ksocknal_sock_read (sock, hmv, sizeof (*hmv));
2033 CERROR ("Error %d reading HELLO from "LPX64" %s\n",
2034 rc, *nid, portals_nid2str(SOCKNAL, *nid, ipbuf));
2038 if (hmv->magic != __le32_to_cpu (PORTALS_PROTO_MAGIC)) {
2039 CERROR ("Bad magic %#08x (%#08x expected) from "LPX64" %s\n",
2040 __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC, *nid,
2041 portals_nid2str(SOCKNAL, *nid, ipbuf));
2045 if (hmv->version_major != __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR) ||
2046 hmv->version_minor != __cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR)) {
2047 CERROR ("Incompatible protocol version %d.%d (%d.%d expected)"
2048 " from "LPX64" %s\n",
2049 __le16_to_cpu (hmv->version_major),
2050 __le16_to_cpu (hmv->version_minor),
2051 PORTALS_PROTO_VERSION_MAJOR,
2052 PORTALS_PROTO_VERSION_MINOR,
2053 *nid, portals_nid2str(SOCKNAL, *nid, ipbuf));
2057 #if (PORTALS_PROTO_VERSION_MAJOR != 0)
2058 # error "This code only understands protocol version 0.x"
2060 /* version 0 sends magic/version as the dest_nid of a 'hello' header,
2061 * so read the rest of it in now... */
2063 rc = ksocknal_sock_read (sock, hmv + 1, sizeof (hdr) - sizeof (*hmv));
2065 CERROR ("Error %d reading rest of HELLO hdr from "LPX64" %s\n",
2066 rc, *nid, portals_nid2str(SOCKNAL, *nid, ipbuf));
2070 /* ...and check we got what we expected */
2071 if (hdr.type != __cpu_to_le32 (PTL_MSG_HELLO) ||
2072 hdr.payload_length != __cpu_to_le32 (0)) {
2073 CERROR ("Expecting a HELLO hdr with 0 payload,"
2074 " but got type %d with %d payload from "LPX64" %s\n",
2075 __le32_to_cpu (hdr.type),
2076 __le32_to_cpu (hdr.payload_length), *nid,
2077 portals_nid2str(SOCKNAL, *nid, ipbuf));
2081 if (__le64_to_cpu(hdr.src_nid) == PTL_NID_ANY) {
2082 CERROR("Expecting a HELLO hdr with a NID, but got PTL_NID_ANY\n");
2086 if (*nid == PTL_NID_ANY) { /* don't know peer's nid yet */
2087 *nid = __le64_to_cpu(hdr.src_nid);
2088 } else if (*nid != __le64_to_cpu (hdr.src_nid)) {
2089 CERROR ("Connected to nid "LPX64" %s, but expecting "LPX64" %s\n",
2090 __le64_to_cpu (hdr.src_nid),
2091 portals_nid2str(SOCKNAL,
2092 __le64_to_cpu(hdr.src_nid),
2094 *nid, portals_nid2str(SOCKNAL, *nid, ipbuf2));
2098 if (*type == SOCKNAL_CONN_NONE) {
2099 /* I've accepted this connection; peer determines type */
2100 *type = __le32_to_cpu(hdr.msg.hello.type);
2102 case SOCKNAL_CONN_ANY:
2103 case SOCKNAL_CONN_CONTROL:
2105 case SOCKNAL_CONN_BULK_IN:
2106 *type = SOCKNAL_CONN_BULK_OUT;
2108 case SOCKNAL_CONN_BULK_OUT:
2109 *type = SOCKNAL_CONN_BULK_IN;
2112 CERROR ("Unexpected type %d from "LPX64" %s\n",
2114 portals_nid2str(SOCKNAL, *nid, ipbuf));
2117 } else if (__le32_to_cpu(hdr.msg.hello.type) != SOCKNAL_CONN_NONE) {
2118 CERROR ("Mismatched types: me %d "LPX64" %s %d\n",
2119 *type, *nid, portals_nid2str(SOCKNAL, *nid, ipbuf),
2120 __le32_to_cpu(hdr.msg.hello.type));
2124 *incarnation = __le64_to_cpu(hdr.msg.hello.incarnation);
2130 ksocknal_setup_sock (struct socket *sock)
2132 mm_segment_t oldmm = get_fs ();
2135 struct linger linger;
2137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
2138 sock->sk->sk_allocation = GFP_NOFS;
2140 sock->sk->allocation = GFP_NOFS;
2143 /* Ensure this socket aborts active sends immediately when we close
2147 linger.l_linger = 0;
2150 rc = sock_setsockopt (sock, SOL_SOCKET, SO_LINGER,
2151 (char *)&linger, sizeof (linger));
2154 CERROR ("Can't set SO_LINGER: %d\n", rc);
2160 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_LINGER2,
2161 (char *)&option, sizeof (option));
2164 CERROR ("Can't set SO_LINGER2: %d\n", rc);
2168 #if SOCKNAL_USE_KEEPALIVES
2169 /* Keepalives: If 3/4 of the timeout elapses, start probing every
2170 * second until the timeout elapses. */
2172 option = (ksocknal_data.ksnd_io_timeout * 3) / 4;
2174 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE,
2175 (char *)&option, sizeof (option));
2178 CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc);
2184 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPINTVL,
2185 (char *)&option, sizeof (option));
2188 CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
2192 option = ksocknal_data.ksnd_io_timeout / 4;
2194 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT,
2195 (char *)&option, sizeof (option));
2198 CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
2204 rc = sock_setsockopt (sock, SOL_SOCKET, SO_KEEPALIVE,
2205 (char *)&option, sizeof (option));
2208 CERROR ("Can't set SO_KEEPALIVE: %d\n", rc);
2216 ksocknal_connect_sock(struct socket **sockp, int *may_retry,
2217 ksock_route_t *route, int local_port)
2219 struct sockaddr_in locaddr;
2220 struct sockaddr_in srvaddr;
2221 struct socket *sock;
2224 mm_segment_t oldmm = get_fs();
2227 memset(&locaddr, 0, sizeof(locaddr));
2228 locaddr.sin_family = AF_INET;
2229 locaddr.sin_port = htons(local_port);
2230 locaddr.sin_addr.s_addr = INADDR_ANY;
2232 memset (&srvaddr, 0, sizeof (srvaddr));
2233 srvaddr.sin_family = AF_INET;
2234 srvaddr.sin_port = htons (route->ksnr_port);
2235 srvaddr.sin_addr.s_addr = htonl (route->ksnr_ipaddr);
2239 rc = sock_create (PF_INET, SOCK_STREAM, 0, &sock);
2242 CERROR ("Can't create autoconnect socket: %d\n", rc);
2246 /* Ugh; have to map_fd for compatibility with sockets passed in
2247 * from userspace. And we actually need the sock->file refcounting
2248 * that this gives you :) */
2250 rc = sock_map_fd (sock);
2252 sock_release (sock);
2253 CERROR ("sock_map_fd error %d\n", rc);
2257 /* NB the file descriptor (rc) now owns the ref on sock->file */
2258 LASSERT (sock->file != NULL);
2259 LASSERT (file_count(sock->file) == 1);
2261 get_file(sock->file); /* extra ref makes sock->file */
2262 sys_close(rc); /* survive this close */
2264 /* Still got a single ref on sock->file */
2265 LASSERT (file_count(sock->file) == 1);
2267 /* Set the socket timeouts, so our connection attempt completes in
2269 tv.tv_sec = ksocknal_data.ksnd_io_timeout;
2273 rc = sock_setsockopt (sock, SOL_SOCKET, SO_SNDTIMEO,
2274 (char *)&tv, sizeof (tv));
2277 CERROR ("Can't set send timeout %d: %d\n",
2278 ksocknal_data.ksnd_io_timeout, rc);
2283 rc = sock_setsockopt (sock, SOL_SOCKET, SO_RCVTIMEO,
2284 (char *)&tv, sizeof (tv));
2287 CERROR ("Can't set receive timeout %d: %d\n",
2288 ksocknal_data.ksnd_io_timeout, rc);
2294 rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
2295 (char *)&option, sizeof (option));
2298 CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc);
2302 if (route->ksnr_buffer_size != 0) {
2303 option = route->ksnr_buffer_size;
2305 rc = sock_setsockopt (sock, SOL_SOCKET, SO_SNDBUF,
2306 (char *)&option, sizeof (option));
2309 CERROR ("Can't set send buffer %d: %d\n",
2310 route->ksnr_buffer_size, rc);
2315 rc = sock_setsockopt (sock, SOL_SOCKET, SO_RCVBUF,
2316 (char *)&option, sizeof (option));
2319 CERROR ("Can't set receive buffer %d: %d\n",
2320 route->ksnr_buffer_size, rc);
2325 rc = sock->ops->bind(sock,
2326 (struct sockaddr *)&locaddr, sizeof(locaddr));
2327 if (rc == -EADDRINUSE) {
2328 CDEBUG(D_NET, "Port %d already in use\n", local_port);
2333 CERROR("Error trying to bind to reserved port %d: %d\n",
2338 rc = sock->ops->connect(sock,
2339 (struct sockaddr *)&srvaddr, sizeof(srvaddr),
2340 sock->file->f_flags);
2344 /* EADDRNOTAVAIL probably means we're already connected to the same
2345 * peer/port on the same local port on a differently typed
2346 * connection. Let our caller retry with a different local
2348 *may_retry = (rc == -EADDRNOTAVAIL);
2350 CDEBUG(*may_retry ? D_NET : D_ERROR,
2351 "Error %d connecting to %u.%u.%u.%u/%d\n", rc,
2352 HIPQUAD(route->ksnr_ipaddr), route->ksnr_port);
2360 ksocknal_connect_peer (ksock_route_t *route, int type)
2362 struct socket *sock;
2367 /* Iterate through reserved ports. When typed connections are
2368 * used, we will need to bind to multiple ports, but we only know
2369 * this at connect time. But, by that time we've already called
2370 * bind() so we need a new socket. */
2372 for (port = 1023; port > 512; --port) {
2374 rc = ksocknal_connect_sock(&sock, &may_retry, route, port);
2377 rc = ksocknal_create_conn(route, sock,
2378 route->ksnr_irq_affinity, type);
2387 CERROR("Out of ports trying to bind to a reserved port\n");
2388 return (-EADDRINUSE);
2392 ksocknal_autoconnect (ksock_route_t *route)
2394 LIST_HEAD (zombies);
2397 unsigned long flags;
2402 for (type = 0; type < SOCKNAL_CONN_NTYPES; type++)
2403 if ((route->ksnr_connecting & (1 << type)) != 0)
2405 LASSERT (type < SOCKNAL_CONN_NTYPES);
2407 rc = ksocknal_connect_peer (route, type);
2412 /* successfully autoconnected: create_conn did the
2413 * route/conn binding and scheduled any blocked packets */
2415 if (route->ksnr_connecting == 0) {
2416 /* No more connections required */
2421 /* Connection attempt failed */
2423 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
2425 peer = route->ksnr_peer;
2426 route->ksnr_connecting = 0;
2428 /* This is a retry rather than a new connection */
2429 LASSERT (route->ksnr_retry_interval != 0);
2430 route->ksnr_timeout = jiffies + route->ksnr_retry_interval;
2431 route->ksnr_retry_interval = MIN (route->ksnr_retry_interval * 2,
2432 SOCKNAL_MAX_RECONNECT_INTERVAL);
2434 if (!list_empty (&peer->ksnp_tx_queue) &&
2435 ksocknal_find_connecting_route_locked (peer) == NULL) {
2436 LASSERT (list_empty (&peer->ksnp_conns));
2438 /* None of the connections that the blocked packets are
2439 * waiting for have been successful. Complete them now... */
2441 tx = list_entry (peer->ksnp_tx_queue.next,
2442 ksock_tx_t, tx_list);
2443 list_del (&tx->tx_list);
2444 list_add_tail (&tx->tx_list, &zombies);
2445 } while (!list_empty (&peer->ksnp_tx_queue));
2448 /* make this route least-favourite for re-selection */
2449 if (!route->ksnr_deleted) {
2450 list_del(&route->ksnr_list);
2451 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
2454 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
2456 while (!list_empty (&zombies)) {
2457 char ipbuf[PTL_NALFMT_SIZE];
2458 char ipbuf2[PTL_NALFMT_SIZE];
2459 tx = list_entry (zombies.next, ksock_tx_t, tx_list);
2461 CERROR ("Deleting packet type %d len %d ("LPX64" %s->"LPX64" %s)\n",
2462 NTOH__u32 (tx->tx_hdr->type),
2463 NTOH__u32 (tx->tx_hdr->payload_length),
2464 NTOH__u64 (tx->tx_hdr->src_nid),
2465 portals_nid2str(SOCKNAL,
2466 NTOH__u64(tx->tx_hdr->src_nid),
2468 NTOH__u64 (tx->tx_hdr->dest_nid),
2469 portals_nid2str(SOCKNAL,
2470 NTOH__u64(tx->tx_hdr->src_nid),
2473 list_del (&tx->tx_list);
2475 ksocknal_tx_done (tx, 0);
2480 ksocknal_autoconnectd (void *arg)
2482 long id = (long)arg;
2484 unsigned long flags;
2485 ksock_route_t *route;
2488 snprintf (name, sizeof (name), "ksocknal_ad%02ld", id);
2489 kportal_daemonize (name);
2490 kportal_blockallsigs ();
2492 spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2494 while (!ksocknal_data.ksnd_shuttingdown) {
2496 if (!list_empty (&ksocknal_data.ksnd_autoconnectd_routes)) {
2497 route = list_entry (ksocknal_data.ksnd_autoconnectd_routes.next,
2498 ksock_route_t, ksnr_connect_list);
2500 list_del (&route->ksnr_connect_list);
2501 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2503 ksocknal_autoconnect (route);
2504 ksocknal_put_route (route);
2506 spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2510 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2512 rc = wait_event_interruptible (ksocknal_data.ksnd_autoconnectd_waitq,
2513 ksocknal_data.ksnd_shuttingdown ||
2514 !list_empty (&ksocknal_data.ksnd_autoconnectd_routes));
2516 spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2519 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2521 ksocknal_thread_fini ();
2526 ksocknal_find_timed_out_conn (ksock_peer_t *peer)
2528 /* We're called with a shared lock on ksnd_global_lock */
2530 struct list_head *ctmp;
2531 ksock_sched_t *sched;
2533 list_for_each (ctmp, &peer->ksnp_conns) {
2534 conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
2535 sched = conn->ksnc_scheduler;
2537 /* Don't need the {get,put}connsock dance to deref ksnc_sock... */
2538 LASSERT (!conn->ksnc_closing);
2540 if (conn->ksnc_rx_started &&
2541 time_after_eq (jiffies, conn->ksnc_rx_deadline)) {
2542 /* Timed out incomplete incoming message */
2543 atomic_inc (&conn->ksnc_refcount);
2544 CERROR ("Timed out RX from "LPX64" %p %d.%d.%d.%d\n",
2545 peer->ksnp_nid, conn, HIPQUAD(conn->ksnc_ipaddr));
2549 if ((!list_empty (&conn->ksnc_tx_queue) ||
2550 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2551 time_after_eq (jiffies, conn->ksnc_tx_deadline)) {
2552 /* Timed out messages queued for sending, or
2553 * messages buffered in the socket's send buffer */
2554 atomic_inc (&conn->ksnc_refcount);
2555 CERROR ("Timed out TX to "LPX64" %s%d %p %d.%d.%d.%d\n",
2557 list_empty (&conn->ksnc_tx_queue) ? "" : "Q ",
2558 conn->ksnc_sock->sk->sk_wmem_queued, conn,
2559 HIPQUAD(conn->ksnc_ipaddr));
2568 ksocknal_check_peer_timeouts (int idx)
2570 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2571 struct list_head *ptmp;
2576 /* NB. We expect to have a look at all the peers and not find any
2577 * connections to time out, so we just use a shared lock while we
2579 read_lock (&ksocknal_data.ksnd_global_lock);
2581 list_for_each (ptmp, peers) {
2582 peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
2583 conn = ksocknal_find_timed_out_conn (peer);
2586 read_unlock (&ksocknal_data.ksnd_global_lock);
2588 CERROR ("Timeout out conn->"LPX64" ip %d.%d.%d.%d:%d\n",
2590 HIPQUAD(conn->ksnc_ipaddr),
2592 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2594 /* NB we won't find this one again, but we can't
2595 * just proceed with the next peer, since we dropped
2596 * ksnd_global_lock and it might be dead already! */
2597 ksocknal_put_conn (conn);
2602 read_unlock (&ksocknal_data.ksnd_global_lock);
2606 ksocknal_reaper (void *arg)
2609 unsigned long flags;
2611 ksock_sched_t *sched;
2612 struct list_head enomem_conns;
2617 unsigned long deadline = jiffies;
2619 kportal_daemonize ("ksocknal_reaper");
2620 kportal_blockallsigs ();
2622 INIT_LIST_HEAD(&enomem_conns);
2623 init_waitqueue_entry (&wait, current);
2625 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2627 while (!ksocknal_data.ksnd_shuttingdown) {
2629 if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
2630 conn = list_entry (ksocknal_data.ksnd_deathrow_conns.next,
2631 ksock_conn_t, ksnc_list);
2632 list_del (&conn->ksnc_list);
2634 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2636 ksocknal_terminate_conn (conn);
2637 ksocknal_put_conn (conn);
2639 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2643 if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
2644 conn = list_entry (ksocknal_data.ksnd_zombie_conns.next,
2645 ksock_conn_t, ksnc_list);
2646 list_del (&conn->ksnc_list);
2648 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2650 ksocknal_destroy_conn (conn);
2652 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2656 if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
2657 list_add(&enomem_conns, &ksocknal_data.ksnd_enomem_conns);
2658 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2661 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2663 /* reschedule all the connections that stalled with ENOMEM... */
2665 while (!list_empty (&enomem_conns)) {
2666 conn = list_entry (enomem_conns.next,
2667 ksock_conn_t, ksnc_tx_list);
2668 list_del (&conn->ksnc_tx_list);
2670 sched = conn->ksnc_scheduler;
2672 spin_lock_irqsave (&sched->kss_lock, flags);
2674 LASSERT (conn->ksnc_tx_scheduled);
2675 conn->ksnc_tx_ready = 1;
2676 list_add_tail (&conn->ksnc_tx_list, &sched->kss_tx_conns);
2677 wake_up (&sched->kss_waitq);
2679 spin_unlock_irqrestore (&sched->kss_lock, flags);
2683 /* careful with the jiffy wrap... */
2684 while ((timeout = (int)(deadline - jiffies)) <= 0) {
2687 int chunk = ksocknal_data.ksnd_peer_hash_size;
2689 /* Time to check for timeouts on a few more peers: I do
2690 * checks every 'p' seconds on a proportion of the peer
2691 * table and I need to check every connection 'n' times
2692 * within a timeout interval, to ensure I detect a
2693 * timeout on any connection within (n+1)/n times the
2694 * timeout interval. */
2696 if (ksocknal_data.ksnd_io_timeout > n * p)
2697 chunk = (chunk * n * p) /
2698 ksocknal_data.ksnd_io_timeout;
2702 for (i = 0; i < chunk; i++) {
2703 ksocknal_check_peer_timeouts (peer_index);
2704 peer_index = (peer_index + 1) %
2705 ksocknal_data.ksnd_peer_hash_size;
2711 if (nenomem_conns != 0) {
2712 /* Reduce my timeout if I rescheduled ENOMEM conns.
2713 * This also prevents me getting woken immediately
2714 * if any go back on my enomem list. */
2715 timeout = SOCKNAL_ENOMEM_RETRY;
2717 ksocknal_data.ksnd_reaper_waketime = jiffies + timeout;
2719 add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
2720 set_current_state (TASK_INTERRUPTIBLE);
2722 if (!ksocknal_data.ksnd_shuttingdown &&
2723 list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
2724 list_empty (&ksocknal_data.ksnd_zombie_conns))
2725 schedule_timeout (timeout);
2727 set_current_state (TASK_RUNNING);
2728 remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
2730 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2733 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2735 ksocknal_thread_fini ();
2739 nal_cb_t ksocknal_lib = {
2740 nal_data: &ksocknal_data, /* NAL private data */
2741 cb_send: ksocknal_send,
2742 cb_send_pages: ksocknal_send_pages,
2743 cb_recv: ksocknal_recv,
2744 cb_recv_pages: ksocknal_recv_pages,
2745 cb_read: ksocknal_read,
2746 cb_write: ksocknal_write,
2747 cb_malloc: ksocknal_malloc,
2748 cb_free: ksocknal_free,
2749 cb_printf: ksocknal_printf,
2750 cb_cli: ksocknal_cli,
2751 cb_sti: ksocknal_sti,
2752 cb_dist: ksocknal_dist