1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@zabbo.net>
6 * Author: Peter J. Braam <braam@clusterfs.com>
7 * Author: Phil Schwan <phil@clusterfs.com>
8 * Author: Eric Barton <eric@bartonsoftware.com>
10 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 * Portals is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Portals is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Portals; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 * LIB functions follow
33 ksocknal_dist(lib_nal_t *nal, ptl_nid_t nid, unsigned long *dist)
35 /* I would guess that if ksocknal_get_peer (nid) == NULL,
36 and we're not routing, then 'nid' is very distant :) */
37 if (nal->libnal_ni.ni_pid.nid == nid) {
47 ksocknal_free_ltx (ksock_ltx_t *ltx)
49 atomic_dec(&ksocknal_data.ksnd_nactive_ltxs);
50 PORTAL_FREE(ltx, ltx->ltx_desc_size);
54 ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
56 struct iovec *iov = tx->tx_iov;
60 LASSERT (tx->tx_niov > 0);
62 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
63 rc = ksocknal_lib_send_iov(conn, tx);
65 if (rc <= 0) /* sent nothing? */
69 LASSERT (nob <= tx->tx_resid);
74 LASSERT (tx->tx_niov > 0);
76 if (nob < iov->iov_len) {
77 iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob);
91 ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
93 ptl_kiov_t *kiov = tx->tx_kiov;
97 LASSERT (tx->tx_niov == 0);
98 LASSERT (tx->tx_nkiov > 0);
100 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
101 rc = ksocknal_lib_send_kiov(conn, tx);
103 if (rc <= 0) /* sent nothing? */
107 LASSERT (nob <= tx->tx_resid);
112 LASSERT(tx->tx_nkiov > 0);
114 if (nob < kiov->kiov_len) {
115 kiov->kiov_offset += nob;
116 kiov->kiov_len -= nob;
120 nob -= kiov->kiov_len;
121 tx->tx_kiov = ++kiov;
129 ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
134 if (ksocknal_data.ksnd_stall_tx != 0) {
135 set_current_state (TASK_UNINTERRUPTIBLE);
136 schedule_timeout (cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
139 LASSERT (tx->tx_resid != 0);
141 rc = ksocknal_getconnsock (conn);
143 LASSERT (conn->ksnc_closing);
148 if (ksocknal_data.ksnd_enomem_tx > 0) {
150 ksocknal_data.ksnd_enomem_tx--;
152 } else if (tx->tx_niov != 0) {
153 rc = ksocknal_send_iov (conn, tx);
155 rc = ksocknal_send_kiov (conn, tx);
158 bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
159 if (rc > 0) /* sent something? */
160 conn->ksnc_tx_bufnob += rc; /* account it */
162 if (bufnob < conn->ksnc_tx_bufnob) {
163 /* allocated send buffer bytes < computed; infer
164 * something got ACKed */
165 conn->ksnc_tx_deadline = cfs_time_shift(ksocknal_tunables.ksnd_io_timeout);
166 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
167 conn->ksnc_tx_bufnob = bufnob;
171 if (rc <= 0) { /* Didn't write anything? */
173 ksock_sched_t *sched;
175 if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
181 /* Check if EAGAIN is due to memory pressure */
183 sched = conn->ksnc_scheduler;
184 spin_lock_irqsave(&sched->kss_lock, flags);
186 if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
187 !conn->ksnc_tx_ready) {
188 /* SOCK_NOSPACE is set when the socket fills
189 * and cleared in the write_space callback
190 * (which also sets ksnc_tx_ready). If
191 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
192 * zero, I didn't fill the socket and
193 * write_space won't reschedule me, so I
194 * return -ENOMEM to get my caller to retry
199 spin_unlock_irqrestore(&sched->kss_lock, flags);
203 /* socket's wmem_queued now includes 'rc' bytes */
204 atomic_sub (rc, &conn->ksnc_tx_nob);
207 } while (tx->tx_resid != 0);
209 ksocknal_putconnsock (conn);
214 ksocknal_recv_iov (ksock_conn_t *conn)
216 struct iovec *iov = conn->ksnc_rx_iov;
220 LASSERT (conn->ksnc_rx_niov > 0);
222 /* Never touch conn->ksnc_rx_iov or change connection
223 * status inside ksocknal_lib_recv_iov */
224 rc = ksocknal_lib_recv_iov(conn);
229 /* received something... */
232 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
233 conn->ksnc_rx_deadline = cfs_time_shift (ksocknal_tunables.ksnd_io_timeout);
234 mb(); /* order with setting rx_started */
235 conn->ksnc_rx_started = 1;
237 conn->ksnc_rx_nob_wanted -= nob;
238 conn->ksnc_rx_nob_left -= nob;
241 LASSERT (conn->ksnc_rx_niov > 0);
243 if (nob < iov->iov_len) {
245 iov->iov_base = (void *)(((unsigned long)iov->iov_base) + nob);
250 conn->ksnc_rx_iov = ++iov;
251 conn->ksnc_rx_niov--;
258 ksocknal_recv_kiov (ksock_conn_t *conn)
260 ptl_kiov_t *kiov = conn->ksnc_rx_kiov;
263 LASSERT (conn->ksnc_rx_nkiov > 0);
265 /* Never touch conn->ksnc_rx_kiov or change connection
266 * status inside ksocknal_lib_recv_iov */
267 rc = ksocknal_lib_recv_kiov(conn);
272 /* received something... */
275 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
276 conn->ksnc_rx_deadline = cfs_time_shift (ksocknal_tunables.ksnd_io_timeout);
277 mb(); /* order with setting rx_started */
278 conn->ksnc_rx_started = 1;
280 conn->ksnc_rx_nob_wanted -= nob;
281 conn->ksnc_rx_nob_left -= nob;
284 LASSERT (conn->ksnc_rx_nkiov > 0);
286 if (nob < kiov->kiov_len) {
287 kiov->kiov_offset += nob;
288 kiov->kiov_len -= nob;
292 nob -= kiov->kiov_len;
293 conn->ksnc_rx_kiov = ++kiov;
294 conn->ksnc_rx_nkiov--;
301 ksocknal_receive (ksock_conn_t *conn)
303 /* Return 1 on success, 0 on EOF, < 0 on error.
304 * Caller checks ksnc_rx_nob_wanted to determine
305 * progress/completion. */
309 if (ksocknal_data.ksnd_stall_rx != 0) {
310 set_current_state (TASK_UNINTERRUPTIBLE);
311 schedule_timeout(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
314 rc = ksocknal_getconnsock (conn);
316 LASSERT (conn->ksnc_closing);
321 if (conn->ksnc_rx_niov != 0)
322 rc = ksocknal_recv_iov (conn);
324 rc = ksocknal_recv_kiov (conn);
327 /* error/EOF or partial receive */
330 } else if (rc == 0 && conn->ksnc_rx_started) {
331 /* EOF in the middle of a message */
337 /* Completed a fragment */
339 if (conn->ksnc_rx_nob_wanted == 0) {
340 /* Completed a message segment (header or payload) */
341 if ((ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0 &&
342 (conn->ksnc_rx_state == SOCKNAL_RX_BODY ||
343 conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD)) {
344 /* Remind the socket to ack eagerly... */
345 ksocknal_lib_eager_ack(conn);
352 ksocknal_putconnsock (conn);
358 ksocknal_zc_callback (zccd_t *zcd)
360 ksock_tx_t *tx = KSOCK_ZCCD_2_TX(zcd);
361 ksock_sched_t *sched = tx->tx_conn->ksnc_scheduler;
365 /* Schedule tx for cleanup (can't do it now due to lock conflicts) */
367 spin_lock_irqsave (&sched->kss_lock, flags);
369 list_add_tail (&tx->tx_list, &sched->kss_zctxdone_list);
370 cfs_waitq_signal (&sched->kss_waitq);
372 spin_unlock_irqrestore (&sched->kss_lock, flags);
378 ksocknal_tx_done (ksock_tx_t *tx, int asynch)
383 if (tx->tx_conn != NULL) {
385 /* zero copy completion isn't always from
386 * process_transmit() so it needs to keep a ref on
389 ksocknal_put_conn (tx->tx_conn);
395 if (tx->tx_isfwd) { /* was a forwarded packet? */
396 kpr_fwd_done (&ksocknal_data.ksnd_router,
397 KSOCK_TX_2_KPR_FWD_DESC (tx),
398 (tx->tx_resid == 0) ? 0 : -ECONNABORTED);
404 ltx = KSOCK_TX_2_KSOCK_LTX (tx);
406 lib_finalize (&ksocknal_lib, ltx->ltx_private, ltx->ltx_cookie,
407 (tx->tx_resid == 0) ? PTL_OK : PTL_FAIL);
409 ksocknal_free_ltx (ltx);
414 ksocknal_tx_launched (ksock_tx_t *tx)
417 if (atomic_read (&tx->tx_zccd.zccd_count) != 1) {
418 ksock_conn_t *conn = tx->tx_conn;
420 /* zccd skbufs are still in-flight. First take a ref on
421 * conn, so it hangs about for ksocknal_tx_done... */
422 atomic_inc (&conn->ksnc_refcount);
424 /* ...then drop the initial ref on zccd, so the zero copy
425 * callback can occur */
426 zccd_put (&tx->tx_zccd);
430 /* Any zero-copy-ness (if any) has completed; I can complete the
431 * transmit now, avoiding an extra schedule */
432 ksocknal_tx_done (tx, 0);
436 ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
441 rc = ksocknal_transmit (conn, tx);
443 CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
445 if (tx->tx_resid == 0) {
446 /* Sent everything OK */
449 ksocknal_tx_launched (tx);
459 counter++; /* exponential backoff warnings */
460 if ((counter & (-counter)) == counter)
461 CWARN("%d ENOMEM tx %p\n", counter, conn);
463 /* Queue on ksnd_enomem_conns for retry after a timeout */
464 spin_lock_irqsave(&ksocknal_data.ksnd_reaper_lock, flags);
466 /* enomem list takes over scheduler's ref... */
467 LASSERT (conn->ksnc_tx_scheduled);
468 list_add_tail(&conn->ksnc_tx_list,
469 &ksocknal_data.ksnd_enomem_conns);
470 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
471 SOCKNAL_ENOMEM_RETRY),
472 ksocknal_data.ksnd_reaper_waketime))
473 cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
475 spin_unlock_irqrestore(&ksocknal_data.ksnd_reaper_lock, flags);
482 if (!conn->ksnc_closing) {
485 LCONSOLE_WARN("Host %u.%u.%u.%u reset our connection "
486 "while we were sending data; it may have "
488 HIPQUAD(conn->ksnc_ipaddr));
491 LCONSOLE_WARN("There was an unexpected network error "
492 "while writing to %u.%u.%u.%u: %d.\n",
493 HIPQUAD(conn->ksnc_ipaddr), rc);
496 CERROR("[%p] Error %d on write to "LPX64
497 " ip %d.%d.%d.%d:%d\n", conn, rc,
498 conn->ksnc_peer->ksnp_nid,
499 HIPQUAD(conn->ksnc_ipaddr),
503 ksocknal_close_conn_and_siblings (conn, rc);
504 ksocknal_tx_launched (tx);
510 ksocknal_launch_autoconnect_locked (ksock_route_t *route)
514 /* called holding write lock on ksnd_global_lock */
516 LASSERT (!route->ksnr_deleted);
517 LASSERT ((route->ksnr_connected & (1 << SOCKNAL_CONN_ANY)) == 0);
518 LASSERT ((route->ksnr_connected & KSNR_TYPED_ROUTES) != KSNR_TYPED_ROUTES);
519 LASSERT (route->ksnr_connecting == 0);
521 if (ksocknal_tunables.ksnd_typed_conns)
522 route->ksnr_connecting =
523 KSNR_TYPED_ROUTES & ~route->ksnr_connected;
525 route->ksnr_connecting = (1 << SOCKNAL_CONN_ANY);
527 atomic_inc (&route->ksnr_refcount); /* extra ref for asynchd */
529 spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
531 list_add_tail (&route->ksnr_connect_list,
532 &ksocknal_data.ksnd_autoconnectd_routes);
533 cfs_waitq_signal (&ksocknal_data.ksnd_autoconnectd_waitq);
535 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
539 ksocknal_find_target_peer_locked (ksock_tx_t *tx, ptl_nid_t nid)
541 char ipbuf[PTL_NALFMT_SIZE];
542 ptl_nid_t target_nid;
544 ksock_peer_t *peer = ksocknal_find_peer_locked (nid);
550 CERROR ("Can't send packet to "LPX64
551 " %s: routed target is not a peer\n",
552 nid, portals_nid2str(SOCKNAL, nid, ipbuf));
556 rc = kpr_lookup (&ksocknal_data.ksnd_router, nid, tx->tx_nob,
559 CERROR ("Can't route to "LPX64" %s: router error %d\n",
560 nid, portals_nid2str(SOCKNAL, nid, ipbuf), rc);
564 peer = ksocknal_find_peer_locked (target_nid);
568 CERROR ("Can't send packet to "LPX64" %s: no peer entry\n",
569 target_nid, portals_nid2str(SOCKNAL, target_nid, ipbuf));
574 ksocknal_find_conn_locked (ksock_tx_t *tx, ksock_peer_t *peer)
576 struct list_head *tmp;
577 ksock_conn_t *typed = NULL;
579 ksock_conn_t *fallback = NULL;
583 list_for_each (tmp, &peer->ksnp_conns) {
584 ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
585 #if SOCKNAL_ROUND_ROBIN
588 int nob = atomic_read(&c->ksnc_tx_nob) +
589 SOCK_WMEM_QUEUED(c->ksnc_sock);
591 LASSERT (!c->ksnc_closing);
593 if (fallback == NULL || nob < fnob) {
598 if (!ksocknal_tunables.ksnd_typed_conns)
601 switch (c->ksnc_type) {
604 case SOCKNAL_CONN_ANY:
606 case SOCKNAL_CONN_BULK_IN:
608 case SOCKNAL_CONN_BULK_OUT:
609 if (tx->tx_nob < ksocknal_tunables.ksnd_min_bulk)
612 case SOCKNAL_CONN_CONTROL:
613 if (tx->tx_nob >= ksocknal_tunables.ksnd_min_bulk)
618 if (typed == NULL || nob < tnob) {
624 /* prefer the typed selection */
625 conn = (typed != NULL) ? typed : fallback;
627 #if SOCKNAL_ROUND_ROBIN
629 /* round-robin all else being equal */
630 list_del (&conn->ksnc_list);
631 list_add_tail (&conn->ksnc_list, &peer->ksnp_conns);
638 ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
641 ksock_sched_t *sched = conn->ksnc_scheduler;
643 /* called holding global lock (read or irq-write) and caller may
644 * not have dropped this lock between finding conn and calling me,
645 * so we don't need the {get,put}connsock dance to deref
647 LASSERT(!conn->ksnc_closing);
648 LASSERT(tx->tx_resid == tx->tx_nob);
650 CDEBUG (D_NET, "Sending to "LPX64" ip %d.%d.%d.%d:%d\n",
651 conn->ksnc_peer->ksnp_nid,
652 HIPQUAD(conn->ksnc_ipaddr),
655 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
659 zccd_init (&tx->tx_zccd, ksocknal_zc_callback);
660 /* NB this sets 1 ref on zccd, so the callback can only occur after
661 * I've released this ref. */
663 spin_lock_irqsave (&sched->kss_lock, flags);
665 if (list_empty(&conn->ksnc_tx_queue) &&
666 SOCK_WMEM_QUEUED(conn->ksnc_sock) == 0) {
667 /* First packet starts the timeout */
668 conn->ksnc_tx_deadline = cfs_time_shift(ksocknal_tunables.ksnd_io_timeout);
669 conn->ksnc_tx_bufnob = 0;
670 mb(); /* order with adding to tx_queue */
673 list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
675 if (conn->ksnc_tx_ready && /* able to send */
676 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
677 /* +1 ref for scheduler */
678 atomic_inc (&conn->ksnc_refcount);
679 list_add_tail (&conn->ksnc_tx_list,
680 &sched->kss_tx_conns);
681 conn->ksnc_tx_scheduled = 1;
682 cfs_waitq_signal (&sched->kss_waitq);
685 spin_unlock_irqrestore (&sched->kss_lock, flags);
689 ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
691 struct list_head *tmp;
692 ksock_route_t *route;
695 list_for_each (tmp, &peer->ksnp_routes) {
696 route = list_entry (tmp, ksock_route_t, ksnr_list);
697 bits = route->ksnr_connected;
699 /* All typed connections established? */
700 if ((bits & KSNR_TYPED_ROUTES) == KSNR_TYPED_ROUTES)
703 /* Untyped connection established? */
704 if ((bits & (1 << SOCKNAL_CONN_ANY)) != 0)
707 /* connection being established? */
708 if (route->ksnr_connecting != 0)
711 /* too soon to retry this guy? */
712 if (!cfs_time_aftereq (cfs_time_current(), route->ksnr_timeout))
722 ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
724 struct list_head *tmp;
725 ksock_route_t *route;
727 list_for_each (tmp, &peer->ksnp_routes) {
728 route = list_entry (tmp, ksock_route_t, ksnr_list);
730 if (route->ksnr_connecting != 0)
738 ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid)
743 ksock_route_t *route;
746 /* Ensure the frags we've been given EXACTLY match the number of
747 * bytes we want to send. Many TCP/IP stacks disregard any total
748 * size parameters passed to them and just look at the frags.
750 * We always expect at least 1 mapped fragment containing the
751 * complete portals header. */
752 LASSERT (lib_iov_nob (tx->tx_niov, tx->tx_iov) +
753 lib_kiov_nob (tx->tx_nkiov, tx->tx_kiov) == tx->tx_nob);
754 LASSERT (tx->tx_niov >= 1);
755 LASSERT (tx->tx_iov[0].iov_len >= sizeof (ptl_hdr_t));
757 CDEBUG (D_NET, "packet %p type %d, nob %d niov %d nkiov %d\n",
758 tx, ((ptl_hdr_t *)tx->tx_iov[0].iov_base)->type,
759 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
761 tx->tx_conn = NULL; /* only set when assigned a conn */
762 tx->tx_resid = tx->tx_nob;
763 tx->tx_hdr = (ptl_hdr_t *)tx->tx_iov[0].iov_base;
765 g_lock = &ksocknal_data.ksnd_global_lock;
766 #if !SOCKNAL_ROUND_ROBIN
769 peer = ksocknal_find_target_peer_locked (tx, nid);
771 read_unlock (g_lock);
772 return (-EHOSTUNREACH);
775 if (ksocknal_find_connectable_route_locked(peer) == NULL) {
776 conn = ksocknal_find_conn_locked (tx, peer);
778 /* I've got no autoconnect routes that need to be
779 * connecting and I do have an actual connection... */
780 ksocknal_queue_tx_locked (tx, conn);
781 read_unlock (g_lock);
786 /* I'll need a write lock... */
787 read_unlock (g_lock);
789 write_lock_irqsave(g_lock, flags);
791 peer = ksocknal_find_target_peer_locked (tx, nid);
793 write_unlock_irqrestore(g_lock, flags);
794 return (-EHOSTUNREACH);
798 /* launch any/all autoconnections that need it */
799 route = ksocknal_find_connectable_route_locked (peer);
803 ksocknal_launch_autoconnect_locked (route);
806 conn = ksocknal_find_conn_locked (tx, peer);
808 /* Connection exists; queue message on it */
809 ksocknal_queue_tx_locked (tx, conn);
810 write_unlock_irqrestore (g_lock, flags);
814 route = ksocknal_find_connecting_route_locked (peer);
816 /* At least 1 connection is being established; queue the
818 list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
819 write_unlock_irqrestore (g_lock, flags);
823 write_unlock_irqrestore (g_lock, flags);
824 return (-EHOSTUNREACH);
828 ksocknal_sendmsg(lib_nal_t *nal,
835 unsigned int payload_niov,
836 struct iovec *payload_iov,
837 ptl_kiov_t *payload_kiov,
838 size_t payload_offset,
845 /* NB 'private' is different depending on what we're sending.
846 * Just ignore it... */
848 CDEBUG(D_NET, "sending "LPSZ" bytes in %d frags to nid:"LPX64
849 " pid %d\n", payload_nob, payload_niov, nid , pid);
851 LASSERT (payload_nob == 0 || payload_niov > 0);
852 LASSERT (payload_niov <= PTL_MD_MAX_IOV);
854 /* It must be OK to kmap() if required */
855 LASSERT (payload_kiov == NULL || !in_interrupt ());
856 /* payload is either all vaddrs or all pages */
857 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
859 if (payload_iov != NULL)
860 desc_size = offsetof(ksock_ltx_t, ltx_iov[1 + payload_niov]);
862 desc_size = offsetof(ksock_ltx_t, ltx_kiov[payload_niov]);
864 if (in_interrupt() ||
865 type == PTL_MSG_ACK ||
866 type == PTL_MSG_REPLY) {
867 /* Can't block if in interrupt or responding to an incoming
869 PORTAL_ALLOC_ATOMIC(ltx, desc_size);
871 PORTAL_ALLOC(ltx, desc_size);
875 CERROR("Can't allocate tx desc type %d size %d %s\n",
876 type, desc_size, in_interrupt() ? "(intr)" : "");
877 return (PTL_NO_SPACE);
880 atomic_inc(&ksocknal_data.ksnd_nactive_ltxs);
882 ltx->ltx_desc_size = desc_size;
884 /* We always have 1 mapped frag for the header */
885 ltx->ltx_tx.tx_iov = ltx->ltx_iov;
886 ltx->ltx_iov[0].iov_base = <x->ltx_hdr;
887 ltx->ltx_iov[0].iov_len = sizeof(*hdr);
890 ltx->ltx_private = private;
891 ltx->ltx_cookie = cookie;
893 ltx->ltx_tx.tx_isfwd = 0;
894 ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_nob;
896 if (payload_iov != NULL) {
897 /* payload is all mapped */
898 ltx->ltx_tx.tx_kiov = NULL;
899 ltx->ltx_tx.tx_nkiov = 0;
901 ltx->ltx_tx.tx_niov =
902 1 + lib_extract_iov(payload_niov, <x->ltx_iov[1],
903 payload_niov, payload_iov,
904 payload_offset, payload_nob);
906 /* payload is all pages */
907 ltx->ltx_tx.tx_niov = 1;
909 ltx->ltx_tx.tx_kiov = ltx->ltx_kiov;
910 ltx->ltx_tx.tx_nkiov =
911 lib_extract_kiov(payload_niov, ltx->ltx_kiov,
912 payload_niov, payload_kiov,
913 payload_offset, payload_nob);
916 rc = ksocknal_launch_packet(<x->ltx_tx, nid);
920 ksocknal_free_ltx(ltx);
925 ksocknal_send (lib_nal_t *nal, void *private, lib_msg_t *cookie,
926 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
927 unsigned int payload_niov, struct iovec *payload_iov,
928 size_t payload_offset, size_t payload_len)
930 return (ksocknal_sendmsg(nal, private, cookie,
932 payload_niov, payload_iov, NULL,
933 payload_offset, payload_len));
937 ksocknal_send_pages (lib_nal_t *nal, void *private, lib_msg_t *cookie,
938 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
939 unsigned int payload_niov, ptl_kiov_t *payload_kiov,
940 size_t payload_offset, size_t payload_len)
942 return (ksocknal_sendmsg(nal, private, cookie,
944 payload_niov, NULL, payload_kiov,
945 payload_offset, payload_len));
949 ksocknal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd)
951 ptl_nid_t nid = fwd->kprfd_gateway_nid;
952 ksock_ftx_t *ftx = (ksock_ftx_t *)&fwd->kprfd_scratch;
955 CDEBUG (D_NET, "Forwarding [%p] -> "LPX64" ("LPX64"))\n", fwd,
956 fwd->kprfd_gateway_nid, fwd->kprfd_target_nid);
958 /* I'm the gateway; must be the last hop */
959 if (nid == ksocknal_lib.libnal_ni.ni_pid.nid)
960 nid = fwd->kprfd_target_nid;
962 /* setup iov for hdr */
963 ftx->ftx_iov.iov_base = fwd->kprfd_hdr;
964 ftx->ftx_iov.iov_len = sizeof(ptl_hdr_t);
966 ftx->ftx_tx.tx_isfwd = 1; /* This is a forwarding packet */
967 ftx->ftx_tx.tx_nob = sizeof(ptl_hdr_t) + fwd->kprfd_nob;
968 ftx->ftx_tx.tx_niov = 1;
969 ftx->ftx_tx.tx_iov = &ftx->ftx_iov;
970 ftx->ftx_tx.tx_nkiov = fwd->kprfd_niov;
971 ftx->ftx_tx.tx_kiov = fwd->kprfd_kiov;
973 rc = ksocknal_launch_packet (&ftx->ftx_tx, nid);
975 kpr_fwd_done (&ksocknal_data.ksnd_router, fwd, rc);
979 ksocknal_thread_start (int (*fn)(void *arg), void *arg)
981 long pid = cfs_kernel_thread (fn, arg, 0);
987 write_lock_irqsave(&ksocknal_data.ksnd_global_lock, flags);
988 ksocknal_data.ksnd_nthreads++;
989 write_unlock_irqrestore(&ksocknal_data.ksnd_global_lock, flags);
994 ksocknal_thread_fini (void)
998 write_lock_irqsave(&ksocknal_data.ksnd_global_lock, flags);
999 ksocknal_data.ksnd_nthreads--;
1000 write_unlock_irqrestore(&ksocknal_data.ksnd_global_lock, flags);
1004 ksocknal_fmb_callback (void *arg, int error)
1006 ksock_fmb_t *fmb = (ksock_fmb_t *)arg;
1007 ksock_fmb_pool_t *fmp = fmb->fmb_pool;
1008 ptl_hdr_t *hdr = &fmb->fmb_hdr;
1009 ksock_conn_t *conn = NULL;
1010 ksock_sched_t *sched;
1011 unsigned long flags;
1012 char ipbuf[PTL_NALFMT_SIZE];
1013 char ipbuf2[PTL_NALFMT_SIZE];
1016 CERROR("Failed to route packet from "
1017 LPX64" %s to "LPX64" %s: %d\n",
1018 le64_to_cpu(hdr->src_nid),
1019 portals_nid2str(SOCKNAL, le64_to_cpu(hdr->src_nid), ipbuf),
1020 le64_to_cpu(hdr->dest_nid),
1021 portals_nid2str(SOCKNAL, le64_to_cpu(hdr->dest_nid), ipbuf2),
1024 CDEBUG (D_NET, "routed packet from "LPX64" to "LPX64": OK\n",
1025 le64_to_cpu(hdr->src_nid), le64_to_cpu(hdr->dest_nid));
1027 /* drop peer ref taken on init */
1028 ksocknal_put_peer (fmb->fmb_peer);
1030 spin_lock_irqsave (&fmp->fmp_lock, flags);
1032 list_add (&fmb->fmb_list, &fmp->fmp_idle_fmbs);
1033 fmp->fmp_nactive_fmbs--;
1035 if (!list_empty (&fmp->fmp_blocked_conns)) {
1036 conn = list_entry (fmb->fmb_pool->fmp_blocked_conns.next,
1037 ksock_conn_t, ksnc_rx_list);
1038 list_del (&conn->ksnc_rx_list);
1041 spin_unlock_irqrestore (&fmp->fmp_lock, flags);
1046 CDEBUG (D_NET, "Scheduling conn %p\n", conn);
1047 LASSERT (conn->ksnc_rx_scheduled);
1048 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_FMB_SLEEP);
1050 conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB;
1052 sched = conn->ksnc_scheduler;
1054 spin_lock_irqsave (&sched->kss_lock, flags);
1056 list_add_tail (&conn->ksnc_rx_list, &sched->kss_rx_conns);
1057 cfs_waitq_signal (&sched->kss_waitq);
1059 spin_unlock_irqrestore (&sched->kss_lock, flags);
1063 ksocknal_get_idle_fmb (ksock_conn_t *conn)
1065 int payload_nob = conn->ksnc_rx_nob_left;
1066 unsigned long flags;
1067 ksock_fmb_pool_t *pool;
1070 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB);
1071 LASSERT (kpr_routing(&ksocknal_data.ksnd_router));
1073 if (payload_nob <= SOCKNAL_SMALL_FWD_PAGES * CFS_PAGE_SIZE)
1074 pool = &ksocknal_data.ksnd_small_fmp;
1076 pool = &ksocknal_data.ksnd_large_fmp;
1078 spin_lock_irqsave (&pool->fmp_lock, flags);
1080 if (!list_empty (&pool->fmp_idle_fmbs)) {
1081 fmb = list_entry(pool->fmp_idle_fmbs.next,
1082 ksock_fmb_t, fmb_list);
1083 list_del (&fmb->fmb_list);
1084 pool->fmp_nactive_fmbs++;
1085 spin_unlock_irqrestore (&pool->fmp_lock, flags);
1090 /* deschedule until fmb free */
1092 conn->ksnc_rx_state = SOCKNAL_RX_FMB_SLEEP;
1094 list_add_tail (&conn->ksnc_rx_list,
1095 &pool->fmp_blocked_conns);
1097 spin_unlock_irqrestore (&pool->fmp_lock, flags);
1102 ksocknal_init_fmb (ksock_conn_t *conn, ksock_fmb_t *fmb)
1104 int payload_nob = conn->ksnc_rx_nob_left;
1105 ptl_nid_t dest_nid = le64_to_cpu(conn->ksnc_hdr.dest_nid);
1107 int nob = payload_nob;
1109 LASSERT (conn->ksnc_rx_scheduled);
1110 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB);
1111 LASSERT (conn->ksnc_rx_nob_wanted == conn->ksnc_rx_nob_left);
1112 LASSERT (payload_nob >= 0);
1113 LASSERT (payload_nob <= fmb->fmb_pool->fmp_buff_pages * CFS_PAGE_SIZE);
1114 LASSERT (sizeof (ptl_hdr_t) < CFS_PAGE_SIZE);
1115 LASSERT (fmb->fmb_kiov[0].kiov_offset == 0);
1117 /* Take a ref on the conn's peer to prevent module unload before
1118 * forwarding completes. */
1119 fmb->fmb_peer = conn->ksnc_peer;
1120 atomic_inc (&conn->ksnc_peer->ksnp_refcount);
1122 /* Copy the header we just read into the forwarding buffer. If
1123 * there's payload, start reading reading it into the buffer,
1124 * otherwise the forwarding buffer can be kicked off
1126 fmb->fmb_hdr = conn->ksnc_hdr;
1129 LASSERT (niov < fmb->fmb_pool->fmp_buff_pages);
1130 LASSERT (fmb->fmb_kiov[niov].kiov_offset == 0);
1131 fmb->fmb_kiov[niov].kiov_len = MIN (CFS_PAGE_SIZE, nob);
1132 nob -= CFS_PAGE_SIZE;
1136 kpr_fwd_init(&fmb->fmb_fwd, dest_nid, &fmb->fmb_hdr,
1137 payload_nob, niov, fmb->fmb_kiov,
1138 ksocknal_fmb_callback, fmb);
1140 if (payload_nob == 0) { /* got complete packet already */
1141 CDEBUG (D_NET, "%p "LPX64"->"LPX64" fwd_start (immediate)\n",
1142 conn, le64_to_cpu(conn->ksnc_hdr.src_nid), dest_nid);
1144 kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd);
1146 ksocknal_new_packet (conn, 0); /* on to next packet */
1150 conn->ksnc_cookie = fmb; /* stash fmb for later */
1151 conn->ksnc_rx_state = SOCKNAL_RX_BODY_FWD; /* read in the payload */
1153 /* Set up conn->ksnc_rx_kiov to read the payload into fmb's kiov-ed
1155 LASSERT (niov <= sizeof(conn->ksnc_rx_iov_space)/sizeof(ptl_kiov_t));
1157 conn->ksnc_rx_niov = 0;
1158 conn->ksnc_rx_nkiov = niov;
1159 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1160 memcpy(conn->ksnc_rx_kiov, fmb->fmb_kiov, niov * sizeof(ptl_kiov_t));
1162 CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d reading body\n", conn,
1163 le64_to_cpu(conn->ksnc_hdr.src_nid), dest_nid, payload_nob);
1168 ksocknal_fwd_parse (ksock_conn_t *conn)
1171 ptl_nid_t dest_nid = le64_to_cpu(conn->ksnc_hdr.dest_nid);
1172 ptl_nid_t src_nid = le64_to_cpu(conn->ksnc_hdr.src_nid);
1173 int body_len = le32_to_cpu(conn->ksnc_hdr.payload_length);
1174 char str[PTL_NALFMT_SIZE];
1175 char str2[PTL_NALFMT_SIZE];
1177 CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d parsing header\n", conn,
1178 src_nid, dest_nid, conn->ksnc_rx_nob_left);
1180 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER);
1181 LASSERT (conn->ksnc_rx_scheduled);
1183 if (body_len < 0) { /* length corrupt (overflow) */
1184 CERROR("dropping packet from "LPX64" (%s) for "LPX64" (%s): "
1185 "packet size %d illegal\n",
1186 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1187 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2),
1190 ksocknal_new_packet (conn, 0); /* on to new packet */
1194 if (!kpr_routing(&ksocknal_data.ksnd_router)) { /* not forwarding */
1195 CERROR("dropping packet from "LPX64" (%s) for "LPX64
1196 " (%s): not forwarding\n",
1197 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1198 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2));
1199 /* on to new packet (skip this one's body) */
1200 ksocknal_new_packet (conn, body_len);
1204 if (body_len > PTL_MTU) { /* too big to forward */
1205 CERROR ("dropping packet from "LPX64" (%s) for "LPX64
1206 "(%s): packet size %d too big\n",
1207 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1208 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2),
1210 /* on to new packet (skip this one's body) */
1211 ksocknal_new_packet (conn, body_len);
1215 /* should have gone direct */
1216 peer = ksocknal_get_peer (conn->ksnc_hdr.dest_nid);
1218 CERROR ("dropping packet from "LPX64" (%s) for "LPX64
1219 "(%s): target is a peer\n",
1220 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1221 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2));
1222 ksocknal_put_peer (peer); /* drop ref from get above */
1224 /* on to next packet (skip this one's body) */
1225 ksocknal_new_packet (conn, body_len);
1229 conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB; /* Getting FMB now */
1230 conn->ksnc_rx_nob_left = body_len; /* stash packet size */
1231 conn->ksnc_rx_nob_wanted = body_len; /* (no slop) */
1235 ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
1237 static char ksocknal_slop_buffer[4096];
1243 if (nob_to_skip == 0) { /* right at next packet boundary now */
1244 conn->ksnc_rx_started = 0;
1245 mb (); /* racing with timeout thread */
1247 conn->ksnc_rx_state = SOCKNAL_RX_HEADER;
1248 conn->ksnc_rx_nob_wanted = sizeof (ptl_hdr_t);
1249 conn->ksnc_rx_nob_left = sizeof (ptl_hdr_t);
1251 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1252 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_hdr;
1253 conn->ksnc_rx_iov[0].iov_len = sizeof (ptl_hdr_t);
1254 conn->ksnc_rx_niov = 1;
1256 conn->ksnc_rx_kiov = NULL;
1257 conn->ksnc_rx_nkiov = 0;
1261 /* Set up to skip as much a possible now. If there's more left
1262 * (ran out of iov entries) we'll get called again */
1264 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1265 conn->ksnc_rx_nob_left = nob_to_skip;
1266 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1271 nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
1273 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1274 conn->ksnc_rx_iov[niov].iov_len = nob;
1279 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1280 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
1282 conn->ksnc_rx_niov = niov;
1283 conn->ksnc_rx_kiov = NULL;
1284 conn->ksnc_rx_nkiov = 0;
1285 conn->ksnc_rx_nob_wanted = skipped;
1290 ksocknal_process_receive (ksock_conn_t *conn)
1295 LASSERT (atomic_read (&conn->ksnc_refcount) > 0);
1297 /* doesn't need a forwarding buffer */
1298 if (conn->ksnc_rx_state != SOCKNAL_RX_GET_FMB)
1302 fmb = ksocknal_get_idle_fmb (conn);
1304 /* conn descheduled waiting for idle fmb */
1308 if (ksocknal_init_fmb (conn, fmb)) {
1309 /* packet forwarded */
1314 /* NB: sched lock NOT held */
1315 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER ||
1316 conn->ksnc_rx_state == SOCKNAL_RX_BODY ||
1317 conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD ||
1318 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1320 LASSERT (conn->ksnc_rx_nob_wanted > 0);
1322 rc = ksocknal_receive(conn);
1325 LASSERT (rc != -EAGAIN);
1328 CWARN ("[%p] EOF from "LPX64" ip %d.%d.%d.%d:%d\n",
1329 conn, conn->ksnc_peer->ksnp_nid,
1330 HIPQUAD(conn->ksnc_ipaddr),
1332 else if (!conn->ksnc_closing)
1333 CERROR ("[%p] Error %d on read from "LPX64
1334 " ip %d.%d.%d.%d:%d\n",
1335 conn, rc, conn->ksnc_peer->ksnp_nid,
1336 HIPQUAD(conn->ksnc_ipaddr),
1339 ksocknal_close_conn_and_siblings (conn, rc);
1340 return (rc == 0 ? -ESHUTDOWN : rc);
1343 if (conn->ksnc_rx_nob_wanted != 0) {
1348 switch (conn->ksnc_rx_state) {
1349 case SOCKNAL_RX_HEADER:
1350 if (conn->ksnc_hdr.type != cpu_to_le32(PTL_MSG_HELLO) &&
1351 le64_to_cpu(conn->ksnc_hdr.dest_nid) !=
1352 ksocknal_lib.libnal_ni.ni_pid.nid) {
1353 /* This packet isn't for me */
1354 ksocknal_fwd_parse (conn);
1355 switch (conn->ksnc_rx_state) {
1356 case SOCKNAL_RX_HEADER: /* skipped (zero payload) */
1357 return (0); /* => come back later */
1358 case SOCKNAL_RX_SLOP: /* skipping packet's body */
1359 goto try_read; /* => go read it */
1360 case SOCKNAL_RX_GET_FMB: /* forwarding */
1361 goto get_fmb; /* => go get a fwd msg buffer */
1368 /* sets wanted_len, iovs etc */
1369 rc = lib_parse(&ksocknal_lib, &conn->ksnc_hdr, conn);
1372 /* I just received garbage: give up on this conn */
1373 ksocknal_close_conn_and_siblings (conn, rc);
1377 if (conn->ksnc_rx_nob_wanted != 0) { /* need to get payload? */
1378 conn->ksnc_rx_state = SOCKNAL_RX_BODY;
1379 goto try_read; /* go read the payload */
1381 /* Fall through (completed packet for me) */
1383 case SOCKNAL_RX_BODY:
1384 /* payload all received */
1385 lib_finalize(&ksocknal_lib, NULL, conn->ksnc_cookie, PTL_OK);
1388 case SOCKNAL_RX_SLOP:
1389 /* starting new packet? */
1390 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1391 return (0); /* come back later */
1392 goto try_read; /* try to finish reading slop now */
1394 case SOCKNAL_RX_BODY_FWD:
1395 /* payload all received */
1396 CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d fwd_start (got body)\n",
1397 conn, le64_to_cpu(conn->ksnc_hdr.src_nid),
1398 le64_to_cpu(conn->ksnc_hdr.dest_nid),
1399 conn->ksnc_rx_nob_left);
1401 /* forward the packet. NB ksocknal_init_fmb() put fmb into
1402 * conn->ksnc_cookie */
1403 fmb = (ksock_fmb_t *)conn->ksnc_cookie;
1404 kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd);
1406 /* no slop in forwarded packets */
1407 LASSERT (conn->ksnc_rx_nob_left == 0);
1409 ksocknal_new_packet (conn, 0); /* on to next packet */
1410 return (0); /* (later) */
1418 return (-EINVAL); /* keep gcc happy */
1422 ksocknal_recv (lib_nal_t *nal, void *private, lib_msg_t *msg,
1423 unsigned int niov, struct iovec *iov,
1424 size_t offset, size_t mlen, size_t rlen)
1426 ksock_conn_t *conn = (ksock_conn_t *)private;
1428 LASSERT (mlen <= rlen);
1429 LASSERT (niov <= PTL_MD_MAX_IOV);
1431 conn->ksnc_cookie = msg;
1432 conn->ksnc_rx_nob_wanted = mlen;
1433 conn->ksnc_rx_nob_left = rlen;
1435 conn->ksnc_rx_nkiov = 0;
1436 conn->ksnc_rx_kiov = NULL;
1437 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1438 conn->ksnc_rx_niov =
1439 lib_extract_iov(PTL_MD_MAX_IOV, conn->ksnc_rx_iov,
1440 niov, iov, offset, mlen);
1443 lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1444 lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1450 ksocknal_recv_pages (lib_nal_t *nal, void *private, lib_msg_t *msg,
1451 unsigned int niov, ptl_kiov_t *kiov,
1452 size_t offset, size_t mlen, size_t rlen)
1454 ksock_conn_t *conn = (ksock_conn_t *)private;
1456 LASSERT (mlen <= rlen);
1457 LASSERT (niov <= PTL_MD_MAX_IOV);
1459 conn->ksnc_cookie = msg;
1460 conn->ksnc_rx_nob_wanted = mlen;
1461 conn->ksnc_rx_nob_left = rlen;
1463 conn->ksnc_rx_niov = 0;
1464 conn->ksnc_rx_iov = NULL;
1465 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1466 conn->ksnc_rx_nkiov =
1467 lib_extract_kiov(PTL_MD_MAX_IOV, conn->ksnc_rx_kiov,
1468 niov, kiov, offset, mlen);
1471 lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1472 lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1478 ksocknal_sched_cansleep(ksock_sched_t *sched)
1480 unsigned long flags;
1483 spin_lock_irqsave(&sched->kss_lock, flags);
1485 rc = (!ksocknal_data.ksnd_shuttingdown &&
1487 list_empty(&sched->kss_zctxdone_list) &&
1489 list_empty(&sched->kss_rx_conns) &&
1490 list_empty(&sched->kss_tx_conns));
1492 spin_unlock_irqrestore(&sched->kss_lock, flags);
1496 int ksocknal_scheduler (void *arg)
1498 ksock_sched_t *sched = (ksock_sched_t *)arg;
1501 unsigned long flags;
1504 int id = sched - ksocknal_data.ksnd_schedulers;
1507 snprintf (name, sizeof (name),"ksocknald_%02d", id);
1508 kportal_daemonize (name);
1509 kportal_blockallsigs ();
1511 #if (CONFIG_SMP && CPU_AFFINITY)
1512 id = ksocknal_sched2cpu(id);
1513 if (cpu_online(id)) {
1516 set_cpus_allowed(current, m);
1518 CERROR ("Can't set CPU affinity for %s to %d\n", name, id);
1520 #endif /* CONFIG_SMP && CPU_AFFINITY */
1522 spin_lock_irqsave (&sched->kss_lock, flags);
1524 while (!ksocknal_data.ksnd_shuttingdown) {
1525 int did_something = 0;
1527 /* Ensure I progress everything semi-fairly */
1529 if (!list_empty (&sched->kss_rx_conns)) {
1530 conn = list_entry(sched->kss_rx_conns.next,
1531 ksock_conn_t, ksnc_rx_list);
1532 list_del(&conn->ksnc_rx_list);
1534 LASSERT(conn->ksnc_rx_scheduled);
1535 LASSERT(conn->ksnc_rx_ready);
1537 /* clear rx_ready in case receive isn't complete.
1538 * Do it BEFORE we call process_recv, since
1539 * data_ready can set it any time after we release
1541 conn->ksnc_rx_ready = 0;
1542 spin_unlock_irqrestore(&sched->kss_lock, flags);
1544 rc = ksocknal_process_receive(conn);
1546 spin_lock_irqsave(&sched->kss_lock, flags);
1548 /* I'm the only one that can clear this flag */
1549 LASSERT(conn->ksnc_rx_scheduled);
1551 /* Did process_receive get everything it wanted? */
1553 conn->ksnc_rx_ready = 1;
1555 if (conn->ksnc_rx_state == SOCKNAL_RX_FMB_SLEEP ||
1556 conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB) {
1557 /* Conn blocked for a forwarding buffer.
1558 * It will get queued for my attention when
1559 * one becomes available (and it might just
1560 * already have been!). Meanwhile my ref
1561 * on it stays put. */
1562 } else if (conn->ksnc_rx_ready) {
1563 /* reschedule for rx */
1564 list_add_tail (&conn->ksnc_rx_list,
1565 &sched->kss_rx_conns);
1567 conn->ksnc_rx_scheduled = 0;
1569 ksocknal_put_conn(conn);
1575 if (!list_empty (&sched->kss_tx_conns)) {
1576 conn = list_entry(sched->kss_tx_conns.next,
1577 ksock_conn_t, ksnc_tx_list);
1578 list_del (&conn->ksnc_tx_list);
1580 LASSERT(conn->ksnc_tx_scheduled);
1581 LASSERT(conn->ksnc_tx_ready);
1582 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1584 tx = list_entry(conn->ksnc_tx_queue.next,
1585 ksock_tx_t, tx_list);
1586 /* dequeue now so empty list => more to send */
1587 list_del(&tx->tx_list);
1589 /* Clear tx_ready in case send isn't complete. Do
1590 * it BEFORE we call process_transmit, since
1591 * write_space can set it any time after we release
1593 conn->ksnc_tx_ready = 0;
1594 spin_unlock_irqrestore (&sched->kss_lock, flags);
1596 rc = ksocknal_process_transmit(conn, tx);
1598 spin_lock_irqsave (&sched->kss_lock, flags);
1600 if (rc == -ENOMEM || rc == -EAGAIN) {
1601 /* Incomplete send: replace tx on HEAD of tx_queue */
1602 list_add (&tx->tx_list, &conn->ksnc_tx_queue);
1604 /* Complete send; assume space for more */
1605 conn->ksnc_tx_ready = 1;
1608 if (rc == -ENOMEM) {
1609 /* Do nothing; after a short timeout, this
1610 * conn will be reposted on kss_tx_conns. */
1611 } else if (conn->ksnc_tx_ready &&
1612 !list_empty (&conn->ksnc_tx_queue)) {
1613 /* reschedule for tx */
1614 list_add_tail (&conn->ksnc_tx_list,
1615 &sched->kss_tx_conns);
1617 conn->ksnc_tx_scheduled = 0;
1619 ksocknal_put_conn (conn);
1625 if (!list_empty (&sched->kss_zctxdone_list)) {
1627 list_entry(sched->kss_zctxdone_list.next,
1628 ksock_tx_t, tx_list);
1631 list_del (&tx->tx_list);
1632 spin_unlock_irqrestore (&sched->kss_lock, flags);
1634 ksocknal_tx_done (tx, 1);
1636 spin_lock_irqsave (&sched->kss_lock, flags);
1639 if (!did_something || /* nothing to do */
1640 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1641 spin_unlock_irqrestore (&sched->kss_lock, flags);
1645 if (!did_something) { /* wait for something to do */
1646 rc = wait_event_interruptible (sched->kss_waitq,
1647 !ksocknal_sched_cansleep(sched));
1652 spin_lock_irqsave (&sched->kss_lock, flags);
1656 spin_unlock_irqrestore (&sched->kss_lock, flags);
1657 ksocknal_thread_fini ();
1662 * Add connection to kss_rx_conns of scheduler
1663 * and wakeup the scheduler.
1665 void ksocknal_read_callback (ksock_conn_t *conn)
1667 ksock_sched_t *sched;
1668 unsigned long flags;
1671 sched = conn->ksnc_scheduler;
1673 spin_lock_irqsave (&sched->kss_lock, flags);
1675 conn->ksnc_rx_ready = 1;
1677 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1678 list_add_tail(&conn->ksnc_rx_list,
1679 &sched->kss_rx_conns);
1680 conn->ksnc_rx_scheduled = 1;
1681 /* extra ref for scheduler */
1682 atomic_inc (&conn->ksnc_refcount);
1684 cfs_waitq_signal (&sched->kss_waitq);
1686 spin_unlock_irqrestore (&sched->kss_lock, flags);
1692 * Add connection to kss_tx_conns of scheduler
1693 * and wakeup the scheduler.
1695 void ksocknal_write_callback (ksock_conn_t *conn)
1697 ksock_sched_t *sched;
1698 unsigned long flags;
1701 sched = conn->ksnc_scheduler;
1703 spin_lock_irqsave (&sched->kss_lock, flags);
1705 conn->ksnc_tx_ready = 1;
1707 if (!conn->ksnc_tx_scheduled && // not being progressed
1708 !list_empty(&conn->ksnc_tx_queue)){//packets to send
1709 list_add_tail (&conn->ksnc_tx_list,
1710 &sched->kss_tx_conns);
1711 conn->ksnc_tx_scheduled = 1;
1712 /* extra ref for scheduler */
1713 atomic_inc (&conn->ksnc_refcount);
1715 cfs_waitq_signal (&sched->kss_waitq);
1718 spin_unlock_irqrestore (&sched->kss_lock, flags);
1724 ksocknal_sock_write (struct socket *sock, void *buffer, int nob)
1726 return ksocknal_lib_sock_write(sock, buffer, nob);
1730 ksocknal_sock_read (struct socket *sock, void *buffer, int nob)
1732 return ksocknal_lib_sock_read(sock, buffer, nob);
1736 ksocknal_send_hello (ksock_conn_t *conn, __u32 *ipaddrs, int nipaddrs)
1738 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1739 struct socket *sock = conn->ksnc_sock;
1741 ptl_magicversion_t *hmv = (ptl_magicversion_t *)&hdr.dest_nid;
1745 LASSERT (conn->ksnc_type != SOCKNAL_CONN_NONE);
1746 LASSERT (nipaddrs <= SOCKNAL_MAX_INTERFACES);
1748 /* No need for getconnsock/putconnsock */
1749 LASSERT (!conn->ksnc_closing);
1751 LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid));
1752 hmv->magic = cpu_to_le32 (PORTALS_PROTO_MAGIC);
1753 hmv->version_major = cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR);
1754 hmv->version_minor = cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR);
1756 hdr.src_nid = cpu_to_le64 (ksocknal_lib.libnal_ni.ni_pid.nid);
1757 hdr.type = cpu_to_le32 (PTL_MSG_HELLO);
1758 hdr.payload_length = cpu_to_le32 (nipaddrs * sizeof(*ipaddrs));
1760 hdr.msg.hello.type = cpu_to_le32 (conn->ksnc_type);
1761 hdr.msg.hello.incarnation =
1762 cpu_to_le64 (ksocknal_data.ksnd_incarnation);
1764 /* Receiver is eager */
1765 rc = ksocknal_sock_write (sock, &hdr, sizeof(hdr));
1767 CERROR ("Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n",
1768 rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
1775 for (i = 0; i < nipaddrs; i++) {
1776 ipaddrs[i] = __cpu_to_le32 (ipaddrs[i]);
1779 rc = ksocknal_sock_write (sock, ipaddrs, nipaddrs * sizeof(*ipaddrs));
1781 CERROR ("Error %d sending HELLO payload (%d)"
1782 " to %u.%u.%u.%u/%d\n", rc, nipaddrs,
1783 HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
1788 ksocknal_invert_type(int type)
1792 case SOCKNAL_CONN_ANY:
1793 case SOCKNAL_CONN_CONTROL:
1795 case SOCKNAL_CONN_BULK_IN:
1796 return SOCKNAL_CONN_BULK_OUT;
1797 case SOCKNAL_CONN_BULK_OUT:
1798 return SOCKNAL_CONN_BULK_IN;
1800 return (SOCKNAL_CONN_NONE);
1805 ksocknal_recv_hello (ksock_conn_t *conn, ptl_nid_t *nid,
1806 __u64 *incarnation, __u32 *ipaddrs)
1808 struct socket *sock = conn->ksnc_sock;
1814 ptl_magicversion_t *hmv;
1815 char ipbuf[PTL_NALFMT_SIZE];
1817 hmv = (ptl_magicversion_t *)&hdr.dest_nid;
1818 LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid));
1820 rc = ksocknal_sock_read (sock, hmv, sizeof (*hmv));
1822 CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
1823 rc, HIPQUAD(conn->ksnc_ipaddr));
1827 if (hmv->magic != le32_to_cpu (PORTALS_PROTO_MAGIC)) {
1828 CERROR ("Bad magic %#08x (%#08x expected) from %u.%u.%u.%u\n",
1829 __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC,
1830 HIPQUAD(conn->ksnc_ipaddr));
1834 if (hmv->version_major != cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR) ||
1835 hmv->version_minor != cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR)) {
1836 CERROR ("Incompatible protocol version %d.%d (%d.%d expected)"
1837 " from %u.%u.%u.%u\n",
1838 le16_to_cpu (hmv->version_major),
1839 le16_to_cpu (hmv->version_minor),
1840 PORTALS_PROTO_VERSION_MAJOR,
1841 PORTALS_PROTO_VERSION_MINOR,
1842 HIPQUAD(conn->ksnc_ipaddr));
1846 #if (PORTALS_PROTO_VERSION_MAJOR != 1)
1847 # error "This code only understands protocol version 1.x"
1849 /* version 1 sends magic/version as the dest_nid of a 'hello'
1850 * header, followed by payload full of interface IP addresses.
1851 * Read the rest of it in now... */
1853 rc = ksocknal_sock_read (sock, hmv + 1, sizeof (hdr) - sizeof (*hmv));
1855 CERROR ("Error %d reading rest of HELLO hdr from %u.%u.%u.%u\n",
1856 rc, HIPQUAD(conn->ksnc_ipaddr));
1860 /* ...and check we got what we expected */
1861 if (hdr.type != cpu_to_le32 (PTL_MSG_HELLO)) {
1862 CERROR ("Expecting a HELLO hdr,"
1863 " but got type %d from %u.%u.%u.%u\n",
1864 le32_to_cpu (hdr.type),
1865 HIPQUAD(conn->ksnc_ipaddr));
1869 if (le64_to_cpu(hdr.src_nid) == PTL_NID_ANY) {
1870 CERROR("Expecting a HELLO hdr with a NID, but got PTL_NID_ANY"
1871 "from %u.%u.%u.%u\n", HIPQUAD(conn->ksnc_ipaddr));
1875 if (*nid == PTL_NID_ANY) { /* don't know peer's nid yet */
1876 *nid = le64_to_cpu(hdr.src_nid);
1877 } else if (*nid != le64_to_cpu (hdr.src_nid)) {
1878 LCONSOLE_ERROR("Connected successfully to nid "LPX64" on host "
1879 "%u.%u.%u.%u, but they claimed they were nid "
1880 LPX64" (%s); please check your Lustre "
1882 *nid, HIPQUAD(conn->ksnc_ipaddr),
1883 le64_to_cpu(hdr.src_nid),
1884 portals_nid2str(SOCKNAL,
1885 le64_to_cpu(hdr.src_nid),
1888 CERROR ("Connected to nid "LPX64"@%u.%u.%u.%u "
1889 "but expecting "LPX64"\n",
1890 le64_to_cpu (hdr.src_nid),
1891 HIPQUAD(conn->ksnc_ipaddr), *nid);
1895 type = __le32_to_cpu(hdr.msg.hello.type);
1897 if (conn->ksnc_type == SOCKNAL_CONN_NONE) {
1898 /* I've accepted this connection; peer determines type */
1899 conn->ksnc_type = ksocknal_invert_type(type);
1900 if (conn->ksnc_type == SOCKNAL_CONN_NONE) {
1901 CERROR ("Unexpected type %d from "LPX64"@%u.%u.%u.%u\n",
1902 type, *nid, HIPQUAD(conn->ksnc_ipaddr));
1905 } else if (ksocknal_invert_type(type) != conn->ksnc_type) {
1906 CERROR ("Mismatched types: me %d, "LPX64"@%u.%u.%u.%u %d\n",
1907 conn->ksnc_type, *nid, HIPQUAD(conn->ksnc_ipaddr),
1908 le32_to_cpu(hdr.msg.hello.type));
1912 *incarnation = le64_to_cpu(hdr.msg.hello.incarnation);
1914 nips = __le32_to_cpu (hdr.payload_length) / sizeof (__u32);
1916 if (nips > SOCKNAL_MAX_INTERFACES ||
1917 nips * sizeof(__u32) != __le32_to_cpu (hdr.payload_length)) {
1918 CERROR("Bad payload length %d from "LPX64"@%u.%u.%u.%u\n",
1919 __le32_to_cpu (hdr.payload_length),
1920 *nid, HIPQUAD(conn->ksnc_ipaddr));
1926 rc = ksocknal_sock_read (sock, ipaddrs, nips * sizeof(*ipaddrs));
1928 CERROR ("Error %d reading IPs from "LPX64"@%u.%u.%u.%u\n",
1929 rc, *nid, HIPQUAD(conn->ksnc_ipaddr));
1933 for (i = 0; i < nips; i++) {
1934 ipaddrs[i] = __le32_to_cpu(ipaddrs[i]);
1936 if (ipaddrs[i] == 0) {
1937 CERROR("Zero IP[%d] from "LPX64"@%u.%u.%u.%u\n",
1938 i, *nid, HIPQUAD(conn->ksnc_ipaddr));
1947 ksocknal_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
1949 return ksocknal_lib_get_conn_tunables(conn, txmem, rxmem, nagle);
1953 ksocknal_connect_peer (ksock_route_t *route, int type)
1955 struct socket *sock;
1960 /* Iterate through reserved ports. When typed connections are
1961 * used, we will need to bind to multiple ports, but we only know
1962 * this at connect time. But, by that time we've already called
1963 * bind() so we need a new socket. */
1965 for (port = 1023; port > 512; --port) {
1967 rc = ksocknal_lib_connect_sock(&sock, &may_retry, route, port);
1970 rc = ksocknal_create_conn(route, sock, type);
1971 cfs_put_file(KSN_SOCK2FILE(sock));
1979 CERROR("Out of ports trying to bind to a reserved port\n");
1980 return (-EADDRINUSE);
1984 ksocknal_autoconnect (ksock_route_t *route)
1986 CFS_LIST_HEAD (zombies);
1989 unsigned long flags;
1992 char *err_msg = NULL;
1995 for (type = 0; type < SOCKNAL_CONN_NTYPES; type++)
1996 if ((route->ksnr_connecting & (1 << type)) != 0)
1998 LASSERT (type < SOCKNAL_CONN_NTYPES);
2000 rc = ksocknal_connect_peer (route, type);
2004 /* successfully autoconnected: create_conn did the
2005 * route/conn binding and scheduled any blocked packets */
2007 if (route->ksnr_connecting == 0) {
2008 /* No more connections required */
2014 /* "normal" errors */
2016 LCONSOLE_ERROR("Connection was refused by host %u.%u.%u.%u on "
2017 "port %d; check that Lustre is running on that "
2019 HIPQUAD(route->ksnr_ipaddr),
2024 LCONSOLE_ERROR("Host %u.%u.%u.%u was unreachable; the network "
2025 "or that node may be down, or Lustre may be "
2027 HIPQUAD(route->ksnr_ipaddr));
2030 LCONSOLE_ERROR("Connecting to host %u.%u.%u.%u on port %d took "
2031 "too long; that node may be hung or "
2032 "experiencing high load.\n",
2033 HIPQUAD(route->ksnr_ipaddr),
2036 /* errors that should be rare */
2038 err_msg = "Portals could not negotiate a connection";
2042 /* -EAGAIN is out of ports, but we specify the ports
2043 * manually. we really should never get this */
2044 err_msg = "no privileged ports were available";
2047 err_msg = "unknown error";
2052 LCONSOLE_ERROR("There was an unexpected error connecting to host "
2053 "%u.%u.%u.%u on port %d: %s (error code %d).\n",
2054 HIPQUAD(route->ksnr_ipaddr),
2059 /* Connection attempt failed */
2061 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
2063 peer = route->ksnr_peer;
2064 route->ksnr_connecting = 0;
2066 /* This is a retry rather than a new connection */
2067 LASSERT (route->ksnr_retry_interval != 0);
2068 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
2069 route->ksnr_retry_interval);
2070 route->ksnr_retry_interval = MIN (route->ksnr_retry_interval * 2,
2071 SOCKNAL_MAX_RECONNECT_INTERVAL);
2073 if (!list_empty (&peer->ksnp_tx_queue) &&
2074 ksocknal_find_connecting_route_locked (peer) == NULL) {
2075 LASSERT (list_empty (&peer->ksnp_conns));
2077 /* None of the connections that the blocked packets are
2078 * waiting for have been successful. Complete them now... */
2080 tx = list_entry (peer->ksnp_tx_queue.next,
2081 ksock_tx_t, tx_list);
2082 list_del (&tx->tx_list);
2083 list_add_tail (&tx->tx_list, &zombies);
2084 } while (!list_empty (&peer->ksnp_tx_queue));
2087 #if 0 /* irrelevent with only eager routes */
2088 if (!route->ksnr_deleted) {
2089 /* make this route least-favourite for re-selection */
2090 list_del(&route->ksnr_list);
2091 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
2094 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
2096 while (!list_empty (&zombies)) {
2097 char ipbuf[PTL_NALFMT_SIZE];
2098 char ipbuf2[PTL_NALFMT_SIZE];
2099 tx = list_entry (zombies.next, ksock_tx_t, tx_list);
2101 CERROR ("Deleting packet type %d len %d ("LPX64" %s->"LPX64" %s)\n",
2102 le32_to_cpu (tx->tx_hdr->type),
2103 le32_to_cpu (tx->tx_hdr->payload_length),
2104 le64_to_cpu (tx->tx_hdr->src_nid),
2105 portals_nid2str(SOCKNAL,
2106 le64_to_cpu(tx->tx_hdr->src_nid),
2108 le64_to_cpu (tx->tx_hdr->dest_nid),
2109 portals_nid2str(SOCKNAL,
2110 le64_to_cpu(tx->tx_hdr->src_nid),
2113 list_del (&tx->tx_list);
2115 ksocknal_tx_done (tx, 0);
2120 ksocknal_autoconnectd (void *arg)
2122 long id = (long)arg;
2124 unsigned long flags;
2125 ksock_route_t *route;
2128 snprintf (name, sizeof (name), "ksocknal_ad%02ld", id);
2129 kportal_daemonize (name);
2130 kportal_blockallsigs ();
2132 spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2134 while (!ksocknal_data.ksnd_shuttingdown) {
2136 if (!list_empty (&ksocknal_data.ksnd_autoconnectd_routes)) {
2137 route = list_entry (ksocknal_data.ksnd_autoconnectd_routes.next,
2138 ksock_route_t, ksnr_connect_list);
2140 list_del (&route->ksnr_connect_list);
2141 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2143 ksocknal_autoconnect (route);
2144 ksocknal_put_route (route);
2146 spin_lock_irqsave(&ksocknal_data.ksnd_autoconnectd_lock,
2151 spin_unlock_irqrestore(&ksocknal_data.ksnd_autoconnectd_lock,
2154 rc = wait_event_interruptible(ksocknal_data.ksnd_autoconnectd_waitq,
2155 ksocknal_data.ksnd_shuttingdown ||
2156 !list_empty(&ksocknal_data.ksnd_autoconnectd_routes));
2158 spin_lock_irqsave(&ksocknal_data.ksnd_autoconnectd_lock, flags);
2161 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2163 ksocknal_thread_fini ();
2168 ksocknal_find_timed_out_conn (ksock_peer_t *peer)
2170 /* We're called with a shared lock on ksnd_global_lock */
2172 struct list_head *ctmp;
2174 list_for_each (ctmp, &peer->ksnp_conns) {
2175 conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
2177 /* Don't need the {get,put}connsock dance to deref ksnc_sock... */
2178 LASSERT (!conn->ksnc_closing);
2180 if (SOCK_ERROR(conn->ksnc_sock) != 0) {
2181 atomic_inc (&conn->ksnc_refcount);
2183 switch (SOCK_ERROR(conn->ksnc_sock)) {
2185 LCONSOLE_WARN("A connection with %u.%u.%u.%u "
2186 "was reset; they may have "
2188 HIPQUAD(conn->ksnc_ipaddr));
2191 LCONSOLE_WARN("A connection with %u.%u.%u.%u "
2192 "timed out; the network or that "
2193 "node may be down.\n",
2194 HIPQUAD(conn->ksnc_ipaddr));
2197 LCONSOLE_WARN("An unexpected network error "
2198 "occurred with %u.%u.%u.%u: %d.\n",
2199 HIPQUAD(conn->ksnc_ipaddr),
2200 SOCK_ERROR(conn->ksnc_sock));
2204 /* Something (e.g. failed keepalive) set the socket error */
2205 CERROR ("Socket error %d: "LPX64" %p %d.%d.%d.%d\n",
2206 SOCK_ERROR(conn->ksnc_sock), peer->ksnp_nid,
2207 conn, HIPQUAD(conn->ksnc_ipaddr));
2212 if (conn->ksnc_rx_started &&
2213 cfs_time_aftereq (cfs_time_current(),
2214 conn->ksnc_rx_deadline)) {
2215 /* Timed out incomplete incoming message */
2216 atomic_inc (&conn->ksnc_refcount);
2217 LCONSOLE_ERROR("A timeout occurred receiving data from "
2218 "%u.%u.%u.%u; the network or that node "
2220 HIPQUAD(conn->ksnc_ipaddr));
2221 CERROR ("Timed out RX from "LPX64" %p %d.%d.%d.%d\n",
2222 peer->ksnp_nid,conn,HIPQUAD(conn->ksnc_ipaddr));
2226 if ((!list_empty (&conn->ksnc_tx_queue) ||
2227 SOCK_WMEM_QUEUED(conn->ksnc_sock) != 0) &&
2228 cfs_time_aftereq (cfs_time_current(),
2229 conn->ksnc_tx_deadline)) {
2230 /* Timed out messages queued for sending or
2231 * buffered in the socket's send buffer */
2232 atomic_inc (&conn->ksnc_refcount);
2233 LCONSOLE_ERROR("A timeout occurred sending data to "
2234 "%u.%u.%u.%u; the network or that node "
2236 HIPQUAD(conn->ksnc_ipaddr));
2237 CERROR ("Timed out TX to "LPX64" %s%d %p %d.%d.%d.%d\n",
2239 list_empty (&conn->ksnc_tx_queue) ? "" : "Q ",
2240 SOCK_WMEM_QUEUED(conn->ksnc_sock), conn,
2241 HIPQUAD(conn->ksnc_ipaddr));
2250 ksocknal_check_peer_timeouts (int idx)
2252 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2253 struct list_head *ptmp;
2258 /* NB. We expect to have a look at all the peers and not find any
2259 * connections to time out, so we just use a shared lock while we
2261 read_lock (&ksocknal_data.ksnd_global_lock);
2263 list_for_each (ptmp, peers) {
2264 peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
2265 conn = ksocknal_find_timed_out_conn (peer);
2268 read_unlock (&ksocknal_data.ksnd_global_lock);
2270 CERROR ("Timeout out conn->"LPX64" ip %d.%d.%d.%d:%d\n",
2272 HIPQUAD(conn->ksnc_ipaddr),
2274 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2276 /* NB we won't find this one again, but we can't
2277 * just proceed with the next peer, since we dropped
2278 * ksnd_global_lock and it might be dead already! */
2279 ksocknal_put_conn (conn);
2284 read_unlock (&ksocknal_data.ksnd_global_lock);
2288 ksocknal_reaper (void *arg)
2290 cfs_waitlink_t wait;
2291 unsigned long flags;
2293 ksock_sched_t *sched;
2294 struct list_head enomem_conns;
2296 cfs_duration_t timeout;
2299 cfs_time_t deadline = cfs_time_current();
2301 kportal_daemonize ("ksocknal_reaper");
2302 kportal_blockallsigs ();
2304 CFS_INIT_LIST_HEAD(&enomem_conns);
2305 cfs_waitlink_init (&wait);
2307 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2309 while (!ksocknal_data.ksnd_shuttingdown) {
2311 if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
2312 conn = list_entry (ksocknal_data.ksnd_deathrow_conns.next,
2313 ksock_conn_t, ksnc_list);
2314 list_del (&conn->ksnc_list);
2316 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2318 ksocknal_terminate_conn (conn);
2319 ksocknal_put_conn (conn);
2321 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2325 if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
2326 conn = list_entry (ksocknal_data.ksnd_zombie_conns.next,
2327 ksock_conn_t, ksnc_list);
2328 list_del (&conn->ksnc_list);
2330 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2332 ksocknal_destroy_conn (conn);
2334 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2338 if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
2339 list_add(&enomem_conns, &ksocknal_data.ksnd_enomem_conns);
2340 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2343 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2345 /* reschedule all the connections that stalled with ENOMEM... */
2347 while (!list_empty (&enomem_conns)) {
2348 conn = list_entry (enomem_conns.next,
2349 ksock_conn_t, ksnc_tx_list);
2350 list_del (&conn->ksnc_tx_list);
2352 sched = conn->ksnc_scheduler;
2354 spin_lock_irqsave (&sched->kss_lock, flags);
2356 LASSERT (conn->ksnc_tx_scheduled);
2357 conn->ksnc_tx_ready = 1;
2358 list_add_tail (&conn->ksnc_tx_list, &sched->kss_tx_conns);
2359 cfs_waitq_signal (&sched->kss_waitq);
2361 spin_unlock_irqrestore (&sched->kss_lock, flags);
2365 /* careful with the jiffy wrap... */
2366 while ((timeout = cfs_time_sub(deadline,
2367 cfs_time_current())) <= 0) {
2370 int chunk = ksocknal_data.ksnd_peer_hash_size;
2372 /* Time to check for timeouts on a few more peers: I do
2373 * checks every 'p' seconds on a proportion of the peer
2374 * table and I need to check every connection 'n' times
2375 * within a timeout interval, to ensure I detect a
2376 * timeout on any connection within (n+1)/n times the
2377 * timeout interval. */
2379 if (ksocknal_tunables.ksnd_io_timeout > n * p)
2380 chunk = (chunk * n * p) /
2381 ksocknal_tunables.ksnd_io_timeout;
2385 for (i = 0; i < chunk; i++) {
2386 ksocknal_check_peer_timeouts (peer_index);
2387 peer_index = (peer_index + 1) %
2388 ksocknal_data.ksnd_peer_hash_size;
2391 deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2394 if (nenomem_conns != 0) {
2395 /* Reduce my timeout if I rescheduled ENOMEM conns.
2396 * This also prevents me getting woken immediately
2397 * if any go back on my enomem list. */
2398 timeout = SOCKNAL_ENOMEM_RETRY;
2400 ksocknal_data.ksnd_reaper_waketime =
2401 cfs_time_add(cfs_time_current(), timeout);
2403 set_current_state (TASK_INTERRUPTIBLE);
2404 cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
2406 if (!ksocknal_data.ksnd_shuttingdown &&
2407 list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
2408 list_empty (&ksocknal_data.ksnd_zombie_conns))
2409 cfs_waitq_timedwait (&wait, timeout);
2411 set_current_state (TASK_RUNNING);
2412 cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
2414 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2417 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2419 ksocknal_thread_fini ();
2423 lib_nal_t ksocknal_lib = {
2424 libnal_data: &ksocknal_data, /* NAL private data */
2425 libnal_send: ksocknal_send,
2426 libnal_send_pages: ksocknal_send_pages,
2427 libnal_recv: ksocknal_recv,
2428 libnal_recv_pages: ksocknal_recv_pages,
2429 libnal_dist: ksocknal_dist