1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@zabbo.net>
6 * Author: Peter J. Braam <braam@clusterfs.com>
7 * Author: Phil Schwan <phil@clusterfs.com>
8 * Author: Eric Barton <eric@bartonsoftware.com>
10 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 * Portals is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Portals is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Portals; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 * LIB functions follow
33 ksocknal_dist(lib_nal_t *nal, ptl_nid_t nid, unsigned long *dist)
35 /* I would guess that if ksocknal_get_peer (nid) == NULL,
36 and we're not routing, then 'nid' is very distant :) */
37 if (nal->libnal_ni.ni_pid.nid == nid) {
47 ksocknal_free_ltx (ksock_ltx_t *ltx)
49 atomic_dec(&ksocknal_data.ksnd_nactive_ltxs);
50 PORTAL_FREE(ltx, ltx->ltx_desc_size);
54 ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
56 struct iovec *iov = tx->tx_iov;
60 LASSERT (tx->tx_niov > 0);
62 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
63 rc = ksocknal_lib_send_iov(conn, tx);
65 if (rc <= 0) /* sent nothing? */
69 LASSERT (nob <= tx->tx_resid);
74 LASSERT (tx->tx_niov > 0);
76 if (nob < iov->iov_len) {
77 iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob);
91 ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
93 ptl_kiov_t *kiov = tx->tx_kiov;
97 LASSERT (tx->tx_niov == 0);
98 LASSERT (tx->tx_nkiov > 0);
100 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
101 rc = ksocknal_lib_send_kiov(conn, tx);
103 if (rc <= 0) /* sent nothing? */
107 LASSERT (nob <= tx->tx_resid);
112 LASSERT(tx->tx_nkiov > 0);
114 if (nob < kiov->kiov_len) {
115 kiov->kiov_offset += nob;
116 kiov->kiov_len -= nob;
120 nob -= kiov->kiov_len;
121 tx->tx_kiov = ++kiov;
129 ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
134 if (ksocknal_data.ksnd_stall_tx != 0) {
135 set_current_state (TASK_UNINTERRUPTIBLE);
136 schedule_timeout (cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
139 LASSERT (tx->tx_resid != 0);
141 rc = ksocknal_getconnsock (conn);
143 LASSERT (conn->ksnc_closing);
148 if (ksocknal_data.ksnd_enomem_tx > 0) {
150 ksocknal_data.ksnd_enomem_tx--;
152 } else if (tx->tx_niov != 0) {
153 rc = ksocknal_send_iov (conn, tx);
155 rc = ksocknal_send_kiov (conn, tx);
158 bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
159 if (rc > 0) /* sent something? */
160 conn->ksnc_tx_bufnob += rc; /* account it */
162 if (bufnob < conn->ksnc_tx_bufnob) {
163 /* allocated send buffer bytes < computed; infer
164 * something got ACKed */
165 conn->ksnc_tx_deadline = cfs_time_shift(ksocknal_tunables.ksnd_io_timeout);
166 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
167 conn->ksnc_tx_bufnob = bufnob;
171 if (rc <= 0) { /* Didn't write anything? */
173 ksock_sched_t *sched;
175 if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
181 /* Check if EAGAIN is due to memory pressure */
183 sched = conn->ksnc_scheduler;
184 spin_lock_irqsave(&sched->kss_lock, flags);
186 if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
187 !conn->ksnc_tx_ready) {
188 /* SOCK_NOSPACE is set when the socket fills
189 * and cleared in the write_space callback
190 * (which also sets ksnc_tx_ready). If
191 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
192 * zero, I didn't fill the socket and
193 * write_space won't reschedule me, so I
194 * return -ENOMEM to get my caller to retry
199 spin_unlock_irqrestore(&sched->kss_lock, flags);
203 /* socket's wmem_queued now includes 'rc' bytes */
204 atomic_sub (rc, &conn->ksnc_tx_nob);
207 } while (tx->tx_resid != 0);
209 ksocknal_putconnsock (conn);
214 ksocknal_recv_iov (ksock_conn_t *conn)
216 struct iovec *iov = conn->ksnc_rx_iov;
220 LASSERT (conn->ksnc_rx_niov > 0);
222 /* Never touch conn->ksnc_rx_iov or change connection
223 * status inside ksocknal_lib_recv_iov */
224 rc = ksocknal_lib_recv_iov(conn);
229 /* received something... */
232 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
233 conn->ksnc_rx_deadline = cfs_time_shift (ksocknal_tunables.ksnd_io_timeout);
234 mb(); /* order with setting rx_started */
235 conn->ksnc_rx_started = 1;
237 conn->ksnc_rx_nob_wanted -= nob;
238 conn->ksnc_rx_nob_left -= nob;
241 LASSERT (conn->ksnc_rx_niov > 0);
243 if (nob < iov->iov_len) {
245 iov->iov_base = (void *)(((unsigned long)iov->iov_base) + nob);
250 conn->ksnc_rx_iov = ++iov;
251 conn->ksnc_rx_niov--;
258 ksocknal_recv_kiov (ksock_conn_t *conn)
260 ptl_kiov_t *kiov = conn->ksnc_rx_kiov;
263 LASSERT (conn->ksnc_rx_nkiov > 0);
265 /* Never touch conn->ksnc_rx_kiov or change connection
266 * status inside ksocknal_lib_recv_iov */
267 rc = ksocknal_lib_recv_kiov(conn);
272 /* received something... */
275 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
276 conn->ksnc_rx_deadline = cfs_time_shift (ksocknal_tunables.ksnd_io_timeout);
277 mb(); /* order with setting rx_started */
278 conn->ksnc_rx_started = 1;
280 conn->ksnc_rx_nob_wanted -= nob;
281 conn->ksnc_rx_nob_left -= nob;
284 LASSERT (conn->ksnc_rx_nkiov > 0);
286 if (nob < kiov->kiov_len) {
287 kiov->kiov_offset += nob;
288 kiov->kiov_len -= nob;
292 nob -= kiov->kiov_len;
293 conn->ksnc_rx_kiov = ++kiov;
294 conn->ksnc_rx_nkiov--;
301 ksocknal_receive (ksock_conn_t *conn)
303 /* Return 1 on success, 0 on EOF, < 0 on error.
304 * Caller checks ksnc_rx_nob_wanted to determine
305 * progress/completion. */
309 if (ksocknal_data.ksnd_stall_rx != 0) {
310 set_current_state (TASK_UNINTERRUPTIBLE);
311 schedule_timeout(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
314 rc = ksocknal_getconnsock (conn);
316 LASSERT (conn->ksnc_closing);
321 if (conn->ksnc_rx_niov != 0)
322 rc = ksocknal_recv_iov (conn);
324 rc = ksocknal_recv_kiov (conn);
327 /* error/EOF or partial receive */
330 } else if (rc == 0 && conn->ksnc_rx_started) {
331 /* EOF in the middle of a message */
337 /* Completed a fragment */
339 if (conn->ksnc_rx_nob_wanted == 0) {
340 /* Completed a message segment (header or payload) */
341 if ((ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0 &&
342 (conn->ksnc_rx_state == SOCKNAL_RX_BODY ||
343 conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD)) {
344 /* Remind the socket to ack eagerly... */
345 ksocknal_lib_eager_ack(conn);
352 ksocknal_putconnsock (conn);
358 ksocknal_zc_callback (zccd_t *zcd)
360 ksock_tx_t *tx = KSOCK_ZCCD_2_TX(zcd);
361 ksock_sched_t *sched = tx->tx_conn->ksnc_scheduler;
365 /* Schedule tx for cleanup (can't do it now due to lock conflicts) */
367 spin_lock_irqsave (&sched->kss_lock, flags);
369 list_add_tail (&tx->tx_list, &sched->kss_zctxdone_list);
370 cfs_waitq_signal (&sched->kss_waitq);
372 spin_unlock_irqrestore (&sched->kss_lock, flags);
378 ksocknal_tx_done (ksock_tx_t *tx, int asynch)
383 if (tx->tx_conn != NULL) {
385 /* zero copy completion isn't always from
386 * process_transmit() so it needs to keep a ref on
389 ksocknal_put_conn (tx->tx_conn);
395 if (tx->tx_isfwd) { /* was a forwarded packet? */
396 kpr_fwd_done (&ksocknal_data.ksnd_router,
397 KSOCK_TX_2_KPR_FWD_DESC (tx),
398 (tx->tx_resid == 0) ? 0 : -ECONNABORTED);
404 ltx = KSOCK_TX_2_KSOCK_LTX (tx);
406 lib_finalize (&ksocknal_lib, ltx->ltx_private, ltx->ltx_cookie,
407 (tx->tx_resid == 0) ? PTL_OK : PTL_FAIL);
409 ksocknal_free_ltx (ltx);
414 ksocknal_tx_launched (ksock_tx_t *tx)
417 if (atomic_read (&tx->tx_zccd.zccd_count) != 1) {
418 ksock_conn_t *conn = tx->tx_conn;
420 /* zccd skbufs are still in-flight. First take a ref on
421 * conn, so it hangs about for ksocknal_tx_done... */
422 atomic_inc (&conn->ksnc_refcount);
424 /* ...then drop the initial ref on zccd, so the zero copy
425 * callback can occur */
426 zccd_put (&tx->tx_zccd);
430 /* Any zero-copy-ness (if any) has completed; I can complete the
431 * transmit now, avoiding an extra schedule */
432 ksocknal_tx_done (tx, 0);
436 ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
441 rc = ksocknal_transmit (conn, tx);
443 CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
445 if (tx->tx_resid == 0) {
446 /* Sent everything OK */
449 ksocknal_tx_launched (tx);
459 counter++; /* exponential backoff warnings */
460 if ((counter & (-counter)) == counter)
461 CWARN("%d ENOMEM tx %p\n", counter, conn);
463 /* Queue on ksnd_enomem_conns for retry after a timeout */
464 spin_lock_irqsave(&ksocknal_data.ksnd_reaper_lock, flags);
466 /* enomem list takes over scheduler's ref... */
467 LASSERT (conn->ksnc_tx_scheduled);
468 list_add_tail(&conn->ksnc_tx_list,
469 &ksocknal_data.ksnd_enomem_conns);
470 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
471 SOCKNAL_ENOMEM_RETRY),
472 ksocknal_data.ksnd_reaper_waketime))
473 cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
475 spin_unlock_irqrestore(&ksocknal_data.ksnd_reaper_lock, flags);
482 if (!conn->ksnc_closing) {
485 LCONSOLE_WARN("Host %u.%u.%u.%u reset our connection "
486 "while we were sending data; it may have "
488 HIPQUAD(conn->ksnc_ipaddr));
491 LCONSOLE_WARN("There was an unexpected network error "
492 "while writing to %u.%u.%u.%u: %d.\n",
493 HIPQUAD(conn->ksnc_ipaddr), rc);
496 CERROR("[%p] Error %d on write to "LPX64
497 " ip %d.%d.%d.%d:%d\n", conn, rc,
498 conn->ksnc_peer->ksnp_nid,
499 HIPQUAD(conn->ksnc_ipaddr),
503 ksocknal_close_conn_and_siblings (conn, rc);
504 ksocknal_tx_launched (tx);
510 ksocknal_launch_autoconnect_locked (ksock_route_t *route)
514 /* called holding write lock on ksnd_global_lock */
516 LASSERT (!route->ksnr_deleted);
517 LASSERT ((route->ksnr_connected & (1 << SOCKNAL_CONN_ANY)) == 0);
518 LASSERT ((route->ksnr_connected & KSNR_TYPED_ROUTES) != KSNR_TYPED_ROUTES);
519 LASSERT (route->ksnr_connecting == 0);
521 if (ksocknal_tunables.ksnd_typed_conns)
522 route->ksnr_connecting =
523 KSNR_TYPED_ROUTES & ~route->ksnr_connected;
525 route->ksnr_connecting = (1 << SOCKNAL_CONN_ANY);
527 atomic_inc (&route->ksnr_refcount); /* extra ref for asynchd */
529 spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
531 list_add_tail (&route->ksnr_connect_list,
532 &ksocknal_data.ksnd_autoconnectd_routes);
533 cfs_waitq_signal (&ksocknal_data.ksnd_autoconnectd_waitq);
535 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
539 ksocknal_find_target_peer_locked (ksock_tx_t *tx, ptl_nid_t nid)
541 char ipbuf[PTL_NALFMT_SIZE];
542 ptl_nid_t target_nid;
544 ksock_peer_t *peer = ksocknal_find_peer_locked (nid);
550 CERROR ("Can't send packet to "LPX64
551 " %s: routed target is not a peer\n",
552 nid, portals_nid2str(SOCKNAL, nid, ipbuf));
556 rc = kpr_lookup (&ksocknal_data.ksnd_router, nid, tx->tx_nob,
559 CERROR ("Can't route to "LPX64" %s: router error %d\n",
560 nid, portals_nid2str(SOCKNAL, nid, ipbuf), rc);
564 peer = ksocknal_find_peer_locked (target_nid);
568 CERROR ("Can't send packet to "LPX64" %s: no peer entry\n",
569 target_nid, portals_nid2str(SOCKNAL, target_nid, ipbuf));
574 ksocknal_find_conn_locked (ksock_tx_t *tx, ksock_peer_t *peer)
576 struct list_head *tmp;
577 ksock_conn_t *typed = NULL;
579 ksock_conn_t *fallback = NULL;
583 list_for_each (tmp, &peer->ksnp_conns) {
584 ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
585 #if SOCKNAL_ROUND_ROBIN
588 int nob = atomic_read(&c->ksnc_tx_nob) +
589 SOCK_WMEM_QUEUED(c->ksnc_sock);
591 LASSERT (!c->ksnc_closing);
593 if (fallback == NULL || nob < fnob) {
598 if (!ksocknal_tunables.ksnd_typed_conns)
601 switch (c->ksnc_type) {
603 CERROR("ksnc_type bad: %u\n", c->ksnc_type);
605 case SOCKNAL_CONN_ANY:
607 case SOCKNAL_CONN_BULK_IN:
609 case SOCKNAL_CONN_BULK_OUT:
610 if (tx->tx_nob < ksocknal_tunables.ksnd_min_bulk)
613 case SOCKNAL_CONN_CONTROL:
614 if (tx->tx_nob >= ksocknal_tunables.ksnd_min_bulk)
619 if (typed == NULL || nob < tnob) {
625 /* prefer the typed selection */
626 conn = (typed != NULL) ? typed : fallback;
628 #if SOCKNAL_ROUND_ROBIN
630 /* round-robin all else being equal */
631 list_del (&conn->ksnc_list);
632 list_add_tail (&conn->ksnc_list, &peer->ksnp_conns);
639 ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
642 ksock_sched_t *sched = conn->ksnc_scheduler;
644 /* called holding global lock (read or irq-write) and caller may
645 * not have dropped this lock between finding conn and calling me,
646 * so we don't need the {get,put}connsock dance to deref
648 LASSERT(!conn->ksnc_closing);
649 LASSERT(tx->tx_resid == tx->tx_nob);
651 CDEBUG (D_NET, "Sending to "LPX64" ip %d.%d.%d.%d:%d\n",
652 conn->ksnc_peer->ksnp_nid,
653 HIPQUAD(conn->ksnc_ipaddr),
656 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
660 zccd_init (&tx->tx_zccd, ksocknal_zc_callback);
661 /* NB this sets 1 ref on zccd, so the callback can only occur after
662 * I've released this ref. */
664 spin_lock_irqsave (&sched->kss_lock, flags);
666 if (list_empty(&conn->ksnc_tx_queue) &&
667 SOCK_WMEM_QUEUED(conn->ksnc_sock) == 0) {
668 /* First packet starts the timeout */
669 conn->ksnc_tx_deadline = cfs_time_shift(ksocknal_tunables.ksnd_io_timeout);
670 conn->ksnc_tx_bufnob = 0;
671 mb(); /* order with adding to tx_queue */
674 list_add_tail (&tx->tx_list, &conn->ksnc_tx_queue);
676 if (conn->ksnc_tx_ready && /* able to send */
677 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
678 /* +1 ref for scheduler */
679 atomic_inc (&conn->ksnc_refcount);
680 list_add_tail (&conn->ksnc_tx_list,
681 &sched->kss_tx_conns);
682 conn->ksnc_tx_scheduled = 1;
683 cfs_waitq_signal (&sched->kss_waitq);
686 spin_unlock_irqrestore (&sched->kss_lock, flags);
690 ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
692 struct list_head *tmp;
693 ksock_route_t *route;
696 list_for_each (tmp, &peer->ksnp_routes) {
697 route = list_entry (tmp, ksock_route_t, ksnr_list);
698 bits = route->ksnr_connected;
700 /* All typed connections established? */
701 if ((bits & KSNR_TYPED_ROUTES) == KSNR_TYPED_ROUTES)
704 /* Untyped connection established? */
705 if ((bits & (1 << SOCKNAL_CONN_ANY)) != 0)
708 /* connection being established? */
709 if (route->ksnr_connecting != 0)
712 /* too soon to retry this guy? */
713 if (!cfs_time_aftereq (cfs_time_current(), route->ksnr_timeout))
723 ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
725 struct list_head *tmp;
726 ksock_route_t *route;
728 list_for_each (tmp, &peer->ksnp_routes) {
729 route = list_entry (tmp, ksock_route_t, ksnr_list);
731 if (route->ksnr_connecting != 0)
739 ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid)
744 ksock_route_t *route;
747 /* Ensure the frags we've been given EXACTLY match the number of
748 * bytes we want to send. Many TCP/IP stacks disregard any total
749 * size parameters passed to them and just look at the frags.
751 * We always expect at least 1 mapped fragment containing the
752 * complete portals header. */
753 LASSERT (lib_iov_nob (tx->tx_niov, tx->tx_iov) +
754 lib_kiov_nob (tx->tx_nkiov, tx->tx_kiov) == tx->tx_nob);
755 LASSERT (tx->tx_niov >= 1);
756 LASSERT (tx->tx_iov[0].iov_len >= sizeof (ptl_hdr_t));
758 CDEBUG (D_NET, "packet %p type %d, nob %d niov %d nkiov %d\n",
759 tx, ((ptl_hdr_t *)tx->tx_iov[0].iov_base)->type,
760 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
762 tx->tx_conn = NULL; /* only set when assigned a conn */
763 tx->tx_resid = tx->tx_nob;
764 tx->tx_hdr = (ptl_hdr_t *)tx->tx_iov[0].iov_base;
766 g_lock = &ksocknal_data.ksnd_global_lock;
767 #if !SOCKNAL_ROUND_ROBIN
770 peer = ksocknal_find_target_peer_locked (tx, nid);
772 read_unlock (g_lock);
773 return (-EHOSTUNREACH);
776 if (ksocknal_find_connectable_route_locked(peer) == NULL) {
777 conn = ksocknal_find_conn_locked (tx, peer);
779 /* I've got no autoconnect routes that need to be
780 * connecting and I do have an actual connection... */
781 ksocknal_queue_tx_locked (tx, conn);
782 read_unlock (g_lock);
787 /* I'll need a write lock... */
788 read_unlock (g_lock);
790 write_lock_irqsave(g_lock, flags);
792 peer = ksocknal_find_target_peer_locked (tx, nid);
794 write_unlock_irqrestore(g_lock, flags);
795 return (-EHOSTUNREACH);
799 /* launch any/all autoconnections that need it */
800 route = ksocknal_find_connectable_route_locked (peer);
804 ksocknal_launch_autoconnect_locked (route);
807 conn = ksocknal_find_conn_locked (tx, peer);
809 /* Connection exists; queue message on it */
810 ksocknal_queue_tx_locked (tx, conn);
811 write_unlock_irqrestore (g_lock, flags);
815 route = ksocknal_find_connecting_route_locked (peer);
817 /* At least 1 connection is being established; queue the
819 list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
820 write_unlock_irqrestore (g_lock, flags);
824 write_unlock_irqrestore (g_lock, flags);
825 return (-EHOSTUNREACH);
829 ksocknal_sendmsg(lib_nal_t *nal,
836 unsigned int payload_niov,
837 struct iovec *payload_iov,
838 ptl_kiov_t *payload_kiov,
839 size_t payload_offset,
846 /* NB 'private' is different depending on what we're sending.
847 * Just ignore it... */
849 CDEBUG(D_NET, "sending "LPSZ" bytes in %d frags to nid:"LPX64
850 " pid %d\n", payload_nob, payload_niov, nid , pid);
852 LASSERT (payload_nob == 0 || payload_niov > 0);
853 LASSERT (payload_niov <= PTL_MD_MAX_IOV);
855 /* It must be OK to kmap() if required */
856 LASSERT (payload_kiov == NULL || !in_interrupt ());
857 /* payload is either all vaddrs or all pages */
858 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
860 if (payload_iov != NULL)
861 desc_size = offsetof(ksock_ltx_t, ltx_iov[1 + payload_niov]);
863 desc_size = offsetof(ksock_ltx_t, ltx_kiov[payload_niov]);
865 if (in_interrupt() ||
866 type == PTL_MSG_ACK ||
867 type == PTL_MSG_REPLY) {
868 /* Can't block if in interrupt or responding to an incoming
870 PORTAL_ALLOC_ATOMIC(ltx, desc_size);
872 PORTAL_ALLOC(ltx, desc_size);
876 CERROR("Can't allocate tx desc type %d size %d %s\n",
877 type, desc_size, in_interrupt() ? "(intr)" : "");
878 return (PTL_NO_SPACE);
881 atomic_inc(&ksocknal_data.ksnd_nactive_ltxs);
883 ltx->ltx_desc_size = desc_size;
885 /* We always have 1 mapped frag for the header */
886 ltx->ltx_tx.tx_iov = ltx->ltx_iov;
887 ltx->ltx_iov[0].iov_base = <x->ltx_hdr;
888 ltx->ltx_iov[0].iov_len = sizeof(*hdr);
891 ltx->ltx_private = private;
892 ltx->ltx_cookie = cookie;
894 ltx->ltx_tx.tx_isfwd = 0;
895 ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_nob;
897 if (payload_iov != NULL) {
898 /* payload is all mapped */
899 ltx->ltx_tx.tx_kiov = NULL;
900 ltx->ltx_tx.tx_nkiov = 0;
902 ltx->ltx_tx.tx_niov =
903 1 + lib_extract_iov(payload_niov, <x->ltx_iov[1],
904 payload_niov, payload_iov,
905 payload_offset, payload_nob);
907 /* payload is all pages */
908 ltx->ltx_tx.tx_niov = 1;
910 ltx->ltx_tx.tx_kiov = ltx->ltx_kiov;
911 ltx->ltx_tx.tx_nkiov =
912 lib_extract_kiov(payload_niov, ltx->ltx_kiov,
913 payload_niov, payload_kiov,
914 payload_offset, payload_nob);
917 rc = ksocknal_launch_packet(<x->ltx_tx, nid);
921 ksocknal_free_ltx(ltx);
926 ksocknal_send (lib_nal_t *nal, void *private, lib_msg_t *cookie,
927 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
928 unsigned int payload_niov, struct iovec *payload_iov,
929 size_t payload_offset, size_t payload_len)
931 return (ksocknal_sendmsg(nal, private, cookie,
933 payload_niov, payload_iov, NULL,
934 payload_offset, payload_len));
938 ksocknal_send_pages (lib_nal_t *nal, void *private, lib_msg_t *cookie,
939 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
940 unsigned int payload_niov, ptl_kiov_t *payload_kiov,
941 size_t payload_offset, size_t payload_len)
943 return (ksocknal_sendmsg(nal, private, cookie,
945 payload_niov, NULL, payload_kiov,
946 payload_offset, payload_len));
950 ksocknal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd)
952 ptl_nid_t nid = fwd->kprfd_gateway_nid;
953 ksock_ftx_t *ftx = (ksock_ftx_t *)&fwd->kprfd_scratch;
956 CDEBUG (D_NET, "Forwarding [%p] -> "LPX64" ("LPX64"))\n", fwd,
957 fwd->kprfd_gateway_nid, fwd->kprfd_target_nid);
959 /* I'm the gateway; must be the last hop */
960 if (nid == ksocknal_lib.libnal_ni.ni_pid.nid)
961 nid = fwd->kprfd_target_nid;
963 /* setup iov for hdr */
964 ftx->ftx_iov.iov_base = fwd->kprfd_hdr;
965 ftx->ftx_iov.iov_len = sizeof(ptl_hdr_t);
967 ftx->ftx_tx.tx_isfwd = 1; /* This is a forwarding packet */
968 ftx->ftx_tx.tx_nob = sizeof(ptl_hdr_t) + fwd->kprfd_nob;
969 ftx->ftx_tx.tx_niov = 1;
970 ftx->ftx_tx.tx_iov = &ftx->ftx_iov;
971 ftx->ftx_tx.tx_nkiov = fwd->kprfd_niov;
972 ftx->ftx_tx.tx_kiov = fwd->kprfd_kiov;
974 rc = ksocknal_launch_packet (&ftx->ftx_tx, nid);
976 kpr_fwd_done (&ksocknal_data.ksnd_router, fwd, rc);
980 ksocknal_thread_start (int (*fn)(void *arg), void *arg)
982 long pid = cfs_kernel_thread (fn, arg, 0);
988 write_lock_irqsave(&ksocknal_data.ksnd_global_lock, flags);
989 ksocknal_data.ksnd_nthreads++;
990 write_unlock_irqrestore(&ksocknal_data.ksnd_global_lock, flags);
995 ksocknal_thread_fini (void)
999 write_lock_irqsave(&ksocknal_data.ksnd_global_lock, flags);
1000 ksocknal_data.ksnd_nthreads--;
1001 write_unlock_irqrestore(&ksocknal_data.ksnd_global_lock, flags);
1005 ksocknal_fmb_callback (void *arg, int error)
1007 ksock_fmb_t *fmb = (ksock_fmb_t *)arg;
1008 ksock_fmb_pool_t *fmp = fmb->fmb_pool;
1009 ptl_hdr_t *hdr = &fmb->fmb_hdr;
1010 ksock_conn_t *conn = NULL;
1011 ksock_sched_t *sched;
1012 unsigned long flags;
1013 char ipbuf[PTL_NALFMT_SIZE];
1014 char ipbuf2[PTL_NALFMT_SIZE];
1017 CERROR("Failed to route packet from "
1018 LPX64" %s to "LPX64" %s: %d\n",
1019 le64_to_cpu(hdr->src_nid),
1020 portals_nid2str(SOCKNAL, le64_to_cpu(hdr->src_nid), ipbuf),
1021 le64_to_cpu(hdr->dest_nid),
1022 portals_nid2str(SOCKNAL, le64_to_cpu(hdr->dest_nid), ipbuf2),
1025 CDEBUG (D_NET, "routed packet from "LPX64" to "LPX64": OK\n",
1026 le64_to_cpu(hdr->src_nid), le64_to_cpu(hdr->dest_nid));
1028 /* drop peer ref taken on init */
1029 ksocknal_put_peer (fmb->fmb_peer);
1031 spin_lock_irqsave (&fmp->fmp_lock, flags);
1033 list_add (&fmb->fmb_list, &fmp->fmp_idle_fmbs);
1034 fmp->fmp_nactive_fmbs--;
1036 if (!list_empty (&fmp->fmp_blocked_conns)) {
1037 conn = list_entry (fmb->fmb_pool->fmp_blocked_conns.next,
1038 ksock_conn_t, ksnc_rx_list);
1039 list_del (&conn->ksnc_rx_list);
1042 spin_unlock_irqrestore (&fmp->fmp_lock, flags);
1047 CDEBUG (D_NET, "Scheduling conn %p\n", conn);
1048 LASSERT (conn->ksnc_rx_scheduled);
1049 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_FMB_SLEEP);
1051 conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB;
1053 sched = conn->ksnc_scheduler;
1055 spin_lock_irqsave (&sched->kss_lock, flags);
1057 list_add_tail (&conn->ksnc_rx_list, &sched->kss_rx_conns);
1058 cfs_waitq_signal (&sched->kss_waitq);
1060 spin_unlock_irqrestore (&sched->kss_lock, flags);
1064 ksocknal_get_idle_fmb (ksock_conn_t *conn)
1066 int payload_nob = conn->ksnc_rx_nob_left;
1067 unsigned long flags;
1068 ksock_fmb_pool_t *pool;
1071 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB);
1072 LASSERT (kpr_routing(&ksocknal_data.ksnd_router));
1074 if (payload_nob <= SOCKNAL_SMALL_FWD_PAGES * CFS_PAGE_SIZE)
1075 pool = &ksocknal_data.ksnd_small_fmp;
1077 pool = &ksocknal_data.ksnd_large_fmp;
1079 spin_lock_irqsave (&pool->fmp_lock, flags);
1081 if (!list_empty (&pool->fmp_idle_fmbs)) {
1082 fmb = list_entry(pool->fmp_idle_fmbs.next,
1083 ksock_fmb_t, fmb_list);
1084 list_del (&fmb->fmb_list);
1085 pool->fmp_nactive_fmbs++;
1086 spin_unlock_irqrestore (&pool->fmp_lock, flags);
1091 /* deschedule until fmb free */
1093 conn->ksnc_rx_state = SOCKNAL_RX_FMB_SLEEP;
1095 list_add_tail (&conn->ksnc_rx_list,
1096 &pool->fmp_blocked_conns);
1098 spin_unlock_irqrestore (&pool->fmp_lock, flags);
1103 ksocknal_init_fmb (ksock_conn_t *conn, ksock_fmb_t *fmb)
1105 int payload_nob = conn->ksnc_rx_nob_left;
1106 ptl_nid_t dest_nid = le64_to_cpu(conn->ksnc_hdr.dest_nid);
1108 int nob = payload_nob;
1110 LASSERT (conn->ksnc_rx_scheduled);
1111 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB);
1112 LASSERT (conn->ksnc_rx_nob_wanted == conn->ksnc_rx_nob_left);
1113 LASSERT (payload_nob >= 0);
1114 LASSERT (payload_nob <= fmb->fmb_pool->fmp_buff_pages * CFS_PAGE_SIZE);
1115 LASSERT (sizeof (ptl_hdr_t) < CFS_PAGE_SIZE);
1116 LASSERT (fmb->fmb_kiov[0].kiov_offset == 0);
1118 /* Take a ref on the conn's peer to prevent module unload before
1119 * forwarding completes. */
1120 fmb->fmb_peer = conn->ksnc_peer;
1121 atomic_inc (&conn->ksnc_peer->ksnp_refcount);
1123 /* Copy the header we just read into the forwarding buffer. If
1124 * there's payload, start reading reading it into the buffer,
1125 * otherwise the forwarding buffer can be kicked off
1127 fmb->fmb_hdr = conn->ksnc_hdr;
1130 LASSERT (niov < fmb->fmb_pool->fmp_buff_pages);
1131 LASSERT (fmb->fmb_kiov[niov].kiov_offset == 0);
1132 fmb->fmb_kiov[niov].kiov_len = MIN (CFS_PAGE_SIZE, nob);
1133 nob -= CFS_PAGE_SIZE;
1137 kpr_fwd_init(&fmb->fmb_fwd, dest_nid, &fmb->fmb_hdr,
1138 payload_nob, niov, fmb->fmb_kiov,
1139 ksocknal_fmb_callback, fmb);
1141 if (payload_nob == 0) { /* got complete packet already */
1142 CDEBUG (D_NET, "%p "LPX64"->"LPX64" fwd_start (immediate)\n",
1143 conn, le64_to_cpu(conn->ksnc_hdr.src_nid), dest_nid);
1145 kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd);
1147 ksocknal_new_packet (conn, 0); /* on to next packet */
1151 conn->ksnc_cookie = fmb; /* stash fmb for later */
1152 conn->ksnc_rx_state = SOCKNAL_RX_BODY_FWD; /* read in the payload */
1154 /* Set up conn->ksnc_rx_kiov to read the payload into fmb's kiov-ed
1156 LASSERT (niov <= sizeof(conn->ksnc_rx_iov_space)/sizeof(ptl_kiov_t));
1158 conn->ksnc_rx_niov = 0;
1159 conn->ksnc_rx_nkiov = niov;
1160 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1161 memcpy(conn->ksnc_rx_kiov, fmb->fmb_kiov, niov * sizeof(ptl_kiov_t));
1163 CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d reading body\n", conn,
1164 le64_to_cpu(conn->ksnc_hdr.src_nid), dest_nid, payload_nob);
1169 ksocknal_fwd_parse (ksock_conn_t *conn)
1172 ptl_nid_t dest_nid = le64_to_cpu(conn->ksnc_hdr.dest_nid);
1173 ptl_nid_t src_nid = le64_to_cpu(conn->ksnc_hdr.src_nid);
1174 int body_len = le32_to_cpu(conn->ksnc_hdr.payload_length);
1175 char str[PTL_NALFMT_SIZE];
1176 char str2[PTL_NALFMT_SIZE];
1178 CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d parsing header\n", conn,
1179 src_nid, dest_nid, conn->ksnc_rx_nob_left);
1181 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER);
1182 LASSERT (conn->ksnc_rx_scheduled);
1184 if (body_len < 0) { /* length corrupt (overflow) */
1185 CERROR("dropping packet from "LPX64" (%s) for "LPX64" (%s): "
1186 "packet size %d illegal\n",
1187 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1188 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2),
1191 ksocknal_new_packet (conn, 0); /* on to new packet */
1195 if (!kpr_routing(&ksocknal_data.ksnd_router)) { /* not forwarding */
1196 CERROR("dropping packet from "LPX64" (%s) for "LPX64
1197 " (%s): not forwarding\n",
1198 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1199 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2));
1200 /* on to new packet (skip this one's body) */
1201 ksocknal_new_packet (conn, body_len);
1205 if (body_len > PTL_MTU) { /* too big to forward */
1206 CERROR ("dropping packet from "LPX64" (%s) for "LPX64
1207 "(%s): packet size %d too big\n",
1208 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1209 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2),
1211 /* on to new packet (skip this one's body) */
1212 ksocknal_new_packet (conn, body_len);
1216 /* should have gone direct */
1217 peer = ksocknal_get_peer (conn->ksnc_hdr.dest_nid);
1219 CERROR ("dropping packet from "LPX64" (%s) for "LPX64
1220 "(%s): target is a peer\n",
1221 src_nid, portals_nid2str(TCPNAL, src_nid, str),
1222 dest_nid, portals_nid2str(TCPNAL, dest_nid, str2));
1223 ksocknal_put_peer (peer); /* drop ref from get above */
1225 /* on to next packet (skip this one's body) */
1226 ksocknal_new_packet (conn, body_len);
1230 conn->ksnc_rx_state = SOCKNAL_RX_GET_FMB; /* Getting FMB now */
1231 conn->ksnc_rx_nob_left = body_len; /* stash packet size */
1232 conn->ksnc_rx_nob_wanted = body_len; /* (no slop) */
1236 ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
1238 static char ksocknal_slop_buffer[4096];
1244 if (nob_to_skip == 0) { /* right at next packet boundary now */
1245 conn->ksnc_rx_started = 0;
1246 mb (); /* racing with timeout thread */
1248 conn->ksnc_rx_state = SOCKNAL_RX_HEADER;
1249 conn->ksnc_rx_nob_wanted = sizeof (ptl_hdr_t);
1250 conn->ksnc_rx_nob_left = sizeof (ptl_hdr_t);
1252 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1253 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_hdr;
1254 conn->ksnc_rx_iov[0].iov_len = sizeof (ptl_hdr_t);
1255 conn->ksnc_rx_niov = 1;
1257 conn->ksnc_rx_kiov = NULL;
1258 conn->ksnc_rx_nkiov = 0;
1262 /* Set up to skip as much a possible now. If there's more left
1263 * (ran out of iov entries) we'll get called again */
1265 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1266 conn->ksnc_rx_nob_left = nob_to_skip;
1267 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1272 nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
1274 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1275 conn->ksnc_rx_iov[niov].iov_len = nob;
1280 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1281 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
1283 conn->ksnc_rx_niov = niov;
1284 conn->ksnc_rx_kiov = NULL;
1285 conn->ksnc_rx_nkiov = 0;
1286 conn->ksnc_rx_nob_wanted = skipped;
1291 ksocknal_process_receive (ksock_conn_t *conn)
1296 LASSERT (atomic_read (&conn->ksnc_refcount) > 0);
1298 /* doesn't need a forwarding buffer */
1299 if (conn->ksnc_rx_state != SOCKNAL_RX_GET_FMB)
1303 fmb = ksocknal_get_idle_fmb (conn);
1305 /* conn descheduled waiting for idle fmb */
1309 if (ksocknal_init_fmb (conn, fmb)) {
1310 /* packet forwarded */
1315 /* NB: sched lock NOT held */
1316 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER ||
1317 conn->ksnc_rx_state == SOCKNAL_RX_BODY ||
1318 conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD ||
1319 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1321 LASSERT (conn->ksnc_rx_nob_wanted > 0);
1323 rc = ksocknal_receive(conn);
1326 LASSERT (rc != -EAGAIN);
1329 CWARN ("[%p] EOF from "LPX64" ip %d.%d.%d.%d:%d\n",
1330 conn, conn->ksnc_peer->ksnp_nid,
1331 HIPQUAD(conn->ksnc_ipaddr),
1333 else if (!conn->ksnc_closing)
1334 CERROR ("[%p] Error %d on read from "LPX64
1335 " ip %d.%d.%d.%d:%d\n",
1336 conn, rc, conn->ksnc_peer->ksnp_nid,
1337 HIPQUAD(conn->ksnc_ipaddr),
1340 ksocknal_close_conn_and_siblings (conn, rc);
1341 return (rc == 0 ? -ESHUTDOWN : rc);
1344 if (conn->ksnc_rx_nob_wanted != 0) {
1349 switch (conn->ksnc_rx_state) {
1350 case SOCKNAL_RX_HEADER:
1351 if (conn->ksnc_hdr.type != cpu_to_le32(PTL_MSG_HELLO) &&
1352 le64_to_cpu(conn->ksnc_hdr.dest_nid) !=
1353 ksocknal_lib.libnal_ni.ni_pid.nid) {
1354 /* This packet isn't for me */
1355 ksocknal_fwd_parse (conn);
1356 switch (conn->ksnc_rx_state) {
1357 case SOCKNAL_RX_HEADER: /* skipped (zero payload) */
1358 return (0); /* => come back later */
1359 case SOCKNAL_RX_SLOP: /* skipping packet's body */
1360 goto try_read; /* => go read it */
1361 case SOCKNAL_RX_GET_FMB: /* forwarding */
1362 goto get_fmb; /* => go get a fwd msg buffer */
1369 /* sets wanted_len, iovs etc */
1370 rc = lib_parse(&ksocknal_lib, &conn->ksnc_hdr, conn);
1373 /* I just received garbage: give up on this conn */
1374 ksocknal_close_conn_and_siblings (conn, rc);
1378 if (conn->ksnc_rx_nob_wanted != 0) { /* need to get payload? */
1379 conn->ksnc_rx_state = SOCKNAL_RX_BODY;
1380 goto try_read; /* go read the payload */
1382 /* Fall through (completed packet for me) */
1384 case SOCKNAL_RX_BODY:
1385 /* payload all received */
1386 lib_finalize(&ksocknal_lib, NULL, conn->ksnc_cookie, PTL_OK);
1389 case SOCKNAL_RX_SLOP:
1390 /* starting new packet? */
1391 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1392 return (0); /* come back later */
1393 goto try_read; /* try to finish reading slop now */
1395 case SOCKNAL_RX_BODY_FWD:
1396 /* payload all received */
1397 CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d fwd_start (got body)\n",
1398 conn, le64_to_cpu(conn->ksnc_hdr.src_nid),
1399 le64_to_cpu(conn->ksnc_hdr.dest_nid),
1400 conn->ksnc_rx_nob_left);
1402 /* forward the packet. NB ksocknal_init_fmb() put fmb into
1403 * conn->ksnc_cookie */
1404 fmb = (ksock_fmb_t *)conn->ksnc_cookie;
1405 kpr_fwd_start (&ksocknal_data.ksnd_router, &fmb->fmb_fwd);
1407 /* no slop in forwarded packets */
1408 LASSERT (conn->ksnc_rx_nob_left == 0);
1410 ksocknal_new_packet (conn, 0); /* on to next packet */
1411 return (0); /* (later) */
1419 return (-EINVAL); /* keep gcc happy */
1423 ksocknal_recv (lib_nal_t *nal, void *private, lib_msg_t *msg,
1424 unsigned int niov, struct iovec *iov,
1425 size_t offset, size_t mlen, size_t rlen)
1427 ksock_conn_t *conn = (ksock_conn_t *)private;
1429 LASSERT (mlen <= rlen);
1430 LASSERT (niov <= PTL_MD_MAX_IOV);
1432 conn->ksnc_cookie = msg;
1433 conn->ksnc_rx_nob_wanted = mlen;
1434 conn->ksnc_rx_nob_left = rlen;
1436 conn->ksnc_rx_nkiov = 0;
1437 conn->ksnc_rx_kiov = NULL;
1438 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1439 conn->ksnc_rx_niov =
1440 lib_extract_iov(PTL_MD_MAX_IOV, conn->ksnc_rx_iov,
1441 niov, iov, offset, mlen);
1444 lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1445 lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1451 ksocknal_recv_pages (lib_nal_t *nal, void *private, lib_msg_t *msg,
1452 unsigned int niov, ptl_kiov_t *kiov,
1453 size_t offset, size_t mlen, size_t rlen)
1455 ksock_conn_t *conn = (ksock_conn_t *)private;
1457 LASSERT (mlen <= rlen);
1458 LASSERT (niov <= PTL_MD_MAX_IOV);
1460 conn->ksnc_cookie = msg;
1461 conn->ksnc_rx_nob_wanted = mlen;
1462 conn->ksnc_rx_nob_left = rlen;
1464 conn->ksnc_rx_niov = 0;
1465 conn->ksnc_rx_iov = NULL;
1466 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1467 conn->ksnc_rx_nkiov =
1468 lib_extract_kiov(PTL_MD_MAX_IOV, conn->ksnc_rx_kiov,
1469 niov, kiov, offset, mlen);
1472 lib_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1473 lib_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1479 ksocknal_sched_cansleep(ksock_sched_t *sched)
1481 unsigned long flags;
1484 spin_lock_irqsave(&sched->kss_lock, flags);
1486 rc = (!ksocknal_data.ksnd_shuttingdown &&
1488 list_empty(&sched->kss_zctxdone_list) &&
1490 list_empty(&sched->kss_rx_conns) &&
1491 list_empty(&sched->kss_tx_conns));
1493 spin_unlock_irqrestore(&sched->kss_lock, flags);
1497 int ksocknal_scheduler (void *arg)
1499 ksock_sched_t *sched = (ksock_sched_t *)arg;
1502 unsigned long flags;
1505 int id = sched - ksocknal_data.ksnd_schedulers;
1508 snprintf (name, sizeof (name),"ksocknald_%02d", id);
1509 kportal_daemonize (name);
1510 kportal_blockallsigs ();
1512 #if (CONFIG_SMP && CPU_AFFINITY)
1513 id = ksocknal_sched2cpu(id);
1514 if (cpu_online(id)) {
1517 set_cpus_allowed(current, m);
1519 CERROR ("Can't set CPU affinity for %s to %d\n", name, id);
1521 #endif /* CONFIG_SMP && CPU_AFFINITY */
1523 spin_lock_irqsave (&sched->kss_lock, flags);
1525 while (!ksocknal_data.ksnd_shuttingdown) {
1526 int did_something = 0;
1528 /* Ensure I progress everything semi-fairly */
1530 if (!list_empty (&sched->kss_rx_conns)) {
1531 conn = list_entry(sched->kss_rx_conns.next,
1532 ksock_conn_t, ksnc_rx_list);
1533 list_del(&conn->ksnc_rx_list);
1535 LASSERT(conn->ksnc_rx_scheduled);
1536 LASSERT(conn->ksnc_rx_ready);
1538 /* clear rx_ready in case receive isn't complete.
1539 * Do it BEFORE we call process_recv, since
1540 * data_ready can set it any time after we release
1542 conn->ksnc_rx_ready = 0;
1543 spin_unlock_irqrestore(&sched->kss_lock, flags);
1545 rc = ksocknal_process_receive(conn);
1547 spin_lock_irqsave(&sched->kss_lock, flags);
1549 /* I'm the only one that can clear this flag */
1550 LASSERT(conn->ksnc_rx_scheduled);
1552 /* Did process_receive get everything it wanted? */
1554 conn->ksnc_rx_ready = 1;
1556 if (conn->ksnc_rx_state == SOCKNAL_RX_FMB_SLEEP ||
1557 conn->ksnc_rx_state == SOCKNAL_RX_GET_FMB) {
1558 /* Conn blocked for a forwarding buffer.
1559 * It will get queued for my attention when
1560 * one becomes available (and it might just
1561 * already have been!). Meanwhile my ref
1562 * on it stays put. */
1563 } else if (conn->ksnc_rx_ready) {
1564 /* reschedule for rx */
1565 list_add_tail (&conn->ksnc_rx_list,
1566 &sched->kss_rx_conns);
1568 conn->ksnc_rx_scheduled = 0;
1570 ksocknal_put_conn(conn);
1576 if (!list_empty (&sched->kss_tx_conns)) {
1577 conn = list_entry(sched->kss_tx_conns.next,
1578 ksock_conn_t, ksnc_tx_list);
1579 list_del (&conn->ksnc_tx_list);
1581 LASSERT(conn->ksnc_tx_scheduled);
1582 LASSERT(conn->ksnc_tx_ready);
1583 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1585 tx = list_entry(conn->ksnc_tx_queue.next,
1586 ksock_tx_t, tx_list);
1587 /* dequeue now so empty list => more to send */
1588 list_del(&tx->tx_list);
1590 /* Clear tx_ready in case send isn't complete. Do
1591 * it BEFORE we call process_transmit, since
1592 * write_space can set it any time after we release
1594 conn->ksnc_tx_ready = 0;
1595 spin_unlock_irqrestore (&sched->kss_lock, flags);
1597 rc = ksocknal_process_transmit(conn, tx);
1599 spin_lock_irqsave (&sched->kss_lock, flags);
1601 if (rc == -ENOMEM || rc == -EAGAIN) {
1602 /* Incomplete send: replace tx on HEAD of tx_queue */
1603 list_add (&tx->tx_list, &conn->ksnc_tx_queue);
1605 /* Complete send; assume space for more */
1606 conn->ksnc_tx_ready = 1;
1609 if (rc == -ENOMEM) {
1610 /* Do nothing; after a short timeout, this
1611 * conn will be reposted on kss_tx_conns. */
1612 } else if (conn->ksnc_tx_ready &&
1613 !list_empty (&conn->ksnc_tx_queue)) {
1614 /* reschedule for tx */
1615 list_add_tail (&conn->ksnc_tx_list,
1616 &sched->kss_tx_conns);
1618 conn->ksnc_tx_scheduled = 0;
1620 ksocknal_put_conn (conn);
1626 if (!list_empty (&sched->kss_zctxdone_list)) {
1628 list_entry(sched->kss_zctxdone_list.next,
1629 ksock_tx_t, tx_list);
1632 list_del (&tx->tx_list);
1633 spin_unlock_irqrestore (&sched->kss_lock, flags);
1635 ksocknal_tx_done (tx, 1);
1637 spin_lock_irqsave (&sched->kss_lock, flags);
1640 if (!did_something || /* nothing to do */
1641 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1642 spin_unlock_irqrestore (&sched->kss_lock, flags);
1646 if (!did_something) { /* wait for something to do */
1647 rc = wait_event_interruptible (sched->kss_waitq,
1648 !ksocknal_sched_cansleep(sched));
1653 spin_lock_irqsave (&sched->kss_lock, flags);
1657 spin_unlock_irqrestore (&sched->kss_lock, flags);
1658 ksocknal_thread_fini ();
1663 * Add connection to kss_rx_conns of scheduler
1664 * and wakeup the scheduler.
1666 void ksocknal_read_callback (ksock_conn_t *conn)
1668 ksock_sched_t *sched;
1669 unsigned long flags;
1672 sched = conn->ksnc_scheduler;
1674 spin_lock_irqsave (&sched->kss_lock, flags);
1676 conn->ksnc_rx_ready = 1;
1678 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1679 list_add_tail(&conn->ksnc_rx_list,
1680 &sched->kss_rx_conns);
1681 conn->ksnc_rx_scheduled = 1;
1682 /* extra ref for scheduler */
1683 atomic_inc (&conn->ksnc_refcount);
1685 cfs_waitq_signal (&sched->kss_waitq);
1687 spin_unlock_irqrestore (&sched->kss_lock, flags);
1693 * Add connection to kss_tx_conns of scheduler
1694 * and wakeup the scheduler.
1696 void ksocknal_write_callback (ksock_conn_t *conn)
1698 ksock_sched_t *sched;
1699 unsigned long flags;
1702 sched = conn->ksnc_scheduler;
1704 spin_lock_irqsave (&sched->kss_lock, flags);
1706 conn->ksnc_tx_ready = 1;
1708 if (!conn->ksnc_tx_scheduled && // not being progressed
1709 !list_empty(&conn->ksnc_tx_queue)){//packets to send
1710 list_add_tail (&conn->ksnc_tx_list,
1711 &sched->kss_tx_conns);
1712 conn->ksnc_tx_scheduled = 1;
1713 /* extra ref for scheduler */
1714 atomic_inc (&conn->ksnc_refcount);
1716 cfs_waitq_signal (&sched->kss_waitq);
1719 spin_unlock_irqrestore (&sched->kss_lock, flags);
1725 ksocknal_sock_write (struct socket *sock, void *buffer, int nob)
1727 return ksocknal_lib_sock_write(sock, buffer, nob);
1731 ksocknal_sock_read (struct socket *sock, void *buffer, int nob)
1733 return ksocknal_lib_sock_read(sock, buffer, nob);
1737 ksocknal_send_hello (ksock_conn_t *conn, __u32 *ipaddrs, int nipaddrs)
1739 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1740 struct socket *sock = conn->ksnc_sock;
1742 ptl_magicversion_t *hmv = (ptl_magicversion_t *)&hdr.dest_nid;
1746 LASSERT (conn->ksnc_type != SOCKNAL_CONN_NONE);
1747 LASSERT (nipaddrs <= SOCKNAL_MAX_INTERFACES);
1749 /* No need for getconnsock/putconnsock */
1750 LASSERT (!conn->ksnc_closing);
1752 LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid));
1753 hmv->magic = cpu_to_le32 (PORTALS_PROTO_MAGIC);
1754 hmv->version_major = cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR);
1755 hmv->version_minor = cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR);
1757 hdr.src_nid = cpu_to_le64 (ksocknal_lib.libnal_ni.ni_pid.nid);
1758 hdr.type = cpu_to_le32 (PTL_MSG_HELLO);
1759 hdr.payload_length = cpu_to_le32 (nipaddrs * sizeof(*ipaddrs));
1761 hdr.msg.hello.type = cpu_to_le32 (conn->ksnc_type);
1762 hdr.msg.hello.incarnation =
1763 cpu_to_le64 (ksocknal_data.ksnd_incarnation);
1765 /* Receiver is eager */
1766 rc = ksocknal_sock_write (sock, &hdr, sizeof(hdr));
1768 CERROR ("Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n",
1769 rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
1776 for (i = 0; i < nipaddrs; i++) {
1777 ipaddrs[i] = __cpu_to_le32 (ipaddrs[i]);
1780 rc = ksocknal_sock_write (sock, ipaddrs, nipaddrs * sizeof(*ipaddrs));
1782 CERROR ("Error %d sending HELLO payload (%d)"
1783 " to %u.%u.%u.%u/%d\n", rc, nipaddrs,
1784 HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
1789 ksocknal_invert_type(int type)
1793 case SOCKNAL_CONN_ANY:
1794 case SOCKNAL_CONN_CONTROL:
1796 case SOCKNAL_CONN_BULK_IN:
1797 return SOCKNAL_CONN_BULK_OUT;
1798 case SOCKNAL_CONN_BULK_OUT:
1799 return SOCKNAL_CONN_BULK_IN;
1801 return (SOCKNAL_CONN_NONE);
1806 ksocknal_recv_hello (ksock_conn_t *conn, ptl_nid_t *nid,
1807 __u64 *incarnation, __u32 *ipaddrs)
1809 struct socket *sock = conn->ksnc_sock;
1815 ptl_magicversion_t *hmv;
1816 char ipbuf[PTL_NALFMT_SIZE];
1818 hmv = (ptl_magicversion_t *)&hdr.dest_nid;
1819 LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid));
1821 rc = ksocknal_sock_read (sock, hmv, sizeof (*hmv));
1823 CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
1824 rc, HIPQUAD(conn->ksnc_ipaddr));
1828 if (hmv->magic != le32_to_cpu (PORTALS_PROTO_MAGIC)) {
1829 CERROR ("Bad magic %#08x (%#08x expected) from %u.%u.%u.%u\n",
1830 __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC,
1831 HIPQUAD(conn->ksnc_ipaddr));
1835 if (hmv->version_major != cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR) ||
1836 hmv->version_minor != cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR)) {
1837 CERROR ("Incompatible protocol version %d.%d (%d.%d expected)"
1838 " from %u.%u.%u.%u\n",
1839 le16_to_cpu (hmv->version_major),
1840 le16_to_cpu (hmv->version_minor),
1841 PORTALS_PROTO_VERSION_MAJOR,
1842 PORTALS_PROTO_VERSION_MINOR,
1843 HIPQUAD(conn->ksnc_ipaddr));
1847 #if (PORTALS_PROTO_VERSION_MAJOR != 1)
1848 # error "This code only understands protocol version 1.x"
1850 /* version 1 sends magic/version as the dest_nid of a 'hello'
1851 * header, followed by payload full of interface IP addresses.
1852 * Read the rest of it in now... */
1854 rc = ksocknal_sock_read (sock, hmv + 1, sizeof (hdr) - sizeof (*hmv));
1856 CERROR ("Error %d reading rest of HELLO hdr from %u.%u.%u.%u\n",
1857 rc, HIPQUAD(conn->ksnc_ipaddr));
1861 /* ...and check we got what we expected */
1862 if (hdr.type != cpu_to_le32 (PTL_MSG_HELLO)) {
1863 CERROR ("Expecting a HELLO hdr,"
1864 " but got type %d from %u.%u.%u.%u\n",
1865 le32_to_cpu (hdr.type),
1866 HIPQUAD(conn->ksnc_ipaddr));
1870 if (le64_to_cpu(hdr.src_nid) == PTL_NID_ANY) {
1871 CERROR("Expecting a HELLO hdr with a NID, but got PTL_NID_ANY"
1872 "from %u.%u.%u.%u\n", HIPQUAD(conn->ksnc_ipaddr));
1876 if (*nid == PTL_NID_ANY) { /* don't know peer's nid yet */
1877 *nid = le64_to_cpu(hdr.src_nid);
1878 } else if (*nid != le64_to_cpu (hdr.src_nid)) {
1879 LCONSOLE_ERROR("Connected successfully to nid "LPX64" on host "
1880 "%u.%u.%u.%u, but they claimed they were nid "
1881 LPX64" (%s); please check your Lustre "
1883 *nid, HIPQUAD(conn->ksnc_ipaddr),
1884 le64_to_cpu(hdr.src_nid),
1885 portals_nid2str(SOCKNAL,
1886 le64_to_cpu(hdr.src_nid),
1889 CERROR ("Connected to nid "LPX64"@%u.%u.%u.%u "
1890 "but expecting "LPX64"\n",
1891 le64_to_cpu (hdr.src_nid),
1892 HIPQUAD(conn->ksnc_ipaddr), *nid);
1896 type = __le32_to_cpu(hdr.msg.hello.type);
1898 if (conn->ksnc_type == SOCKNAL_CONN_NONE) {
1899 /* I've accepted this connection; peer determines type */
1900 conn->ksnc_type = ksocknal_invert_type(type);
1901 if (conn->ksnc_type == SOCKNAL_CONN_NONE) {
1902 CERROR ("Unexpected type %d from "LPX64"@%u.%u.%u.%u\n",
1903 type, *nid, HIPQUAD(conn->ksnc_ipaddr));
1906 } else if (ksocknal_invert_type(type) != conn->ksnc_type) {
1907 CERROR ("Mismatched types: me %d, "LPX64"@%u.%u.%u.%u %d\n",
1908 conn->ksnc_type, *nid, HIPQUAD(conn->ksnc_ipaddr),
1909 le32_to_cpu(hdr.msg.hello.type));
1913 *incarnation = le64_to_cpu(hdr.msg.hello.incarnation);
1915 nips = __le32_to_cpu (hdr.payload_length) / sizeof (__u32);
1917 if (nips > SOCKNAL_MAX_INTERFACES ||
1918 nips * sizeof(__u32) != __le32_to_cpu (hdr.payload_length)) {
1919 CERROR("Bad payload length %d from "LPX64"@%u.%u.%u.%u\n",
1920 __le32_to_cpu (hdr.payload_length),
1921 *nid, HIPQUAD(conn->ksnc_ipaddr));
1927 rc = ksocknal_sock_read (sock, ipaddrs, nips * sizeof(*ipaddrs));
1929 CERROR ("Error %d reading IPs from "LPX64"@%u.%u.%u.%u\n",
1930 rc, *nid, HIPQUAD(conn->ksnc_ipaddr));
1934 for (i = 0; i < nips; i++) {
1935 ipaddrs[i] = __le32_to_cpu(ipaddrs[i]);
1937 if (ipaddrs[i] == 0) {
1938 CERROR("Zero IP[%d] from "LPX64"@%u.%u.%u.%u\n",
1939 i, *nid, HIPQUAD(conn->ksnc_ipaddr));
1948 ksocknal_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
1950 return ksocknal_lib_get_conn_tunables(conn, txmem, rxmem, nagle);
1954 ksocknal_connect_peer (ksock_route_t *route, int type)
1956 struct socket *sock;
1961 /* Iterate through reserved ports. When typed connections are
1962 * used, we will need to bind to multiple ports, but we only know
1963 * this at connect time. But, by that time we've already called
1964 * bind() so we need a new socket. */
1966 for (port = 1023; port > 512; --port) {
1968 rc = ksocknal_lib_connect_sock(&sock, &may_retry, route, port);
1971 rc = ksocknal_create_conn(route, sock, type);
1972 cfs_put_file(KSN_SOCK2FILE(sock));
1980 CERROR("Out of ports trying to bind to a reserved port\n");
1981 return (-EADDRINUSE);
1985 ksocknal_autoconnect (ksock_route_t *route)
1987 CFS_LIST_HEAD (zombies);
1990 unsigned long flags;
1993 char *err_msg = NULL;
1996 for (type = 0; type < SOCKNAL_CONN_NTYPES; type++)
1997 if ((route->ksnr_connecting & (1 << type)) != 0)
1999 LASSERT (type < SOCKNAL_CONN_NTYPES);
2001 rc = ksocknal_connect_peer (route, type);
2005 /* successfully autoconnected: create_conn did the
2006 * route/conn binding and scheduled any blocked packets */
2008 if (route->ksnr_connecting == 0) {
2009 /* No more connections required */
2015 /* "normal" errors */
2017 LCONSOLE_ERROR("Connection was refused by host %u.%u.%u.%u on "
2018 "port %d; check that Lustre is running on that "
2020 HIPQUAD(route->ksnr_ipaddr),
2025 LCONSOLE_ERROR("Host %u.%u.%u.%u was unreachable; the network "
2026 "or that node may be down, or Lustre may be "
2028 HIPQUAD(route->ksnr_ipaddr));
2031 LCONSOLE_ERROR("Connecting to host %u.%u.%u.%u on port %d took "
2032 "too long; that node may be hung or "
2033 "experiencing high load.\n",
2034 HIPQUAD(route->ksnr_ipaddr),
2037 /* errors that should be rare */
2039 err_msg = "Portals could not negotiate a connection";
2043 /* -EAGAIN is out of ports, but we specify the ports
2044 * manually. we really should never get this */
2045 err_msg = "no privileged ports were available";
2048 err_msg = "unknown error";
2053 LCONSOLE_ERROR("There was an unexpected error connecting to host "
2054 "%u.%u.%u.%u on port %d: %s (error code %d).\n",
2055 HIPQUAD(route->ksnr_ipaddr),
2060 /* Connection attempt failed */
2062 write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
2064 peer = route->ksnr_peer;
2065 route->ksnr_connecting = 0;
2067 /* This is a retry rather than a new connection */
2068 LASSERT (route->ksnr_retry_interval != 0);
2069 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
2070 route->ksnr_retry_interval);
2071 route->ksnr_retry_interval = MIN (route->ksnr_retry_interval * 2,
2072 SOCKNAL_MAX_RECONNECT_INTERVAL);
2074 if (!list_empty (&peer->ksnp_tx_queue) &&
2075 ksocknal_find_connecting_route_locked (peer) == NULL) {
2076 LASSERT (list_empty (&peer->ksnp_conns));
2078 /* None of the connections that the blocked packets are
2079 * waiting for have been successful. Complete them now... */
2081 tx = list_entry (peer->ksnp_tx_queue.next,
2082 ksock_tx_t, tx_list);
2083 list_del (&tx->tx_list);
2084 list_add_tail (&tx->tx_list, &zombies);
2085 } while (!list_empty (&peer->ksnp_tx_queue));
2088 #if 0 /* irrelevent with only eager routes */
2089 if (!route->ksnr_deleted) {
2090 /* make this route least-favourite for re-selection */
2091 list_del(&route->ksnr_list);
2092 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
2095 write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
2097 while (!list_empty (&zombies)) {
2098 char ipbuf[PTL_NALFMT_SIZE];
2099 char ipbuf2[PTL_NALFMT_SIZE];
2100 tx = list_entry (zombies.next, ksock_tx_t, tx_list);
2102 CERROR ("Deleting packet type %d len %d ("LPX64" %s->"LPX64" %s)\n",
2103 le32_to_cpu (tx->tx_hdr->type),
2104 le32_to_cpu (tx->tx_hdr->payload_length),
2105 le64_to_cpu (tx->tx_hdr->src_nid),
2106 portals_nid2str(SOCKNAL,
2107 le64_to_cpu(tx->tx_hdr->src_nid),
2109 le64_to_cpu (tx->tx_hdr->dest_nid),
2110 portals_nid2str(SOCKNAL,
2111 le64_to_cpu(tx->tx_hdr->src_nid),
2114 list_del (&tx->tx_list);
2116 ksocknal_tx_done (tx, 0);
2121 ksocknal_autoconnectd (void *arg)
2123 long id = (long)arg;
2125 unsigned long flags;
2126 ksock_route_t *route;
2129 snprintf (name, sizeof (name), "ksocknal_ad%02ld", id);
2130 kportal_daemonize (name);
2131 kportal_blockallsigs ();
2133 spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2135 while (!ksocknal_data.ksnd_shuttingdown) {
2137 if (!list_empty (&ksocknal_data.ksnd_autoconnectd_routes)) {
2138 route = list_entry (ksocknal_data.ksnd_autoconnectd_routes.next,
2139 ksock_route_t, ksnr_connect_list);
2141 list_del (&route->ksnr_connect_list);
2142 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2144 ksocknal_autoconnect (route);
2145 ksocknal_put_route (route);
2147 spin_lock_irqsave(&ksocknal_data.ksnd_autoconnectd_lock,
2152 spin_unlock_irqrestore(&ksocknal_data.ksnd_autoconnectd_lock,
2155 rc = wait_event_interruptible(ksocknal_data.ksnd_autoconnectd_waitq,
2156 ksocknal_data.ksnd_shuttingdown ||
2157 !list_empty(&ksocknal_data.ksnd_autoconnectd_routes));
2159 spin_lock_irqsave(&ksocknal_data.ksnd_autoconnectd_lock, flags);
2162 spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
2164 ksocknal_thread_fini ();
2169 ksocknal_find_timed_out_conn (ksock_peer_t *peer)
2171 /* We're called with a shared lock on ksnd_global_lock */
2173 struct list_head *ctmp;
2175 list_for_each (ctmp, &peer->ksnp_conns) {
2176 conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
2178 /* Don't need the {get,put}connsock dance to deref ksnc_sock... */
2179 LASSERT (!conn->ksnc_closing);
2181 if (SOCK_ERROR(conn->ksnc_sock) != 0) {
2182 atomic_inc (&conn->ksnc_refcount);
2184 switch (SOCK_ERROR(conn->ksnc_sock)) {
2186 LCONSOLE_WARN("A connection with %u.%u.%u.%u "
2187 "was reset; they may have "
2189 HIPQUAD(conn->ksnc_ipaddr));
2192 LCONSOLE_WARN("A connection with %u.%u.%u.%u "
2193 "timed out; the network or that "
2194 "node may be down.\n",
2195 HIPQUAD(conn->ksnc_ipaddr));
2198 LCONSOLE_WARN("An unexpected network error "
2199 "occurred with %u.%u.%u.%u: %d.\n",
2200 HIPQUAD(conn->ksnc_ipaddr),
2201 SOCK_ERROR(conn->ksnc_sock));
2205 /* Something (e.g. failed keepalive) set the socket error */
2206 CERROR ("Socket error %d: "LPX64" %p %d.%d.%d.%d\n",
2207 SOCK_ERROR(conn->ksnc_sock), peer->ksnp_nid,
2208 conn, HIPQUAD(conn->ksnc_ipaddr));
2213 if (conn->ksnc_rx_started &&
2214 cfs_time_aftereq (cfs_time_current(),
2215 conn->ksnc_rx_deadline)) {
2216 /* Timed out incomplete incoming message */
2217 atomic_inc (&conn->ksnc_refcount);
2218 LCONSOLE_ERROR("A timeout occurred receiving data from "
2219 "%u.%u.%u.%u; the network or that node "
2221 HIPQUAD(conn->ksnc_ipaddr));
2222 CERROR ("Timed out RX from "LPX64" %p %d.%d.%d.%d\n",
2223 peer->ksnp_nid,conn,HIPQUAD(conn->ksnc_ipaddr));
2227 if ((!list_empty (&conn->ksnc_tx_queue) ||
2228 SOCK_WMEM_QUEUED(conn->ksnc_sock) != 0) &&
2229 cfs_time_aftereq (cfs_time_current(),
2230 conn->ksnc_tx_deadline)) {
2231 /* Timed out messages queued for sending or
2232 * buffered in the socket's send buffer */
2233 atomic_inc (&conn->ksnc_refcount);
2234 LCONSOLE_ERROR("A timeout occurred sending data to "
2235 "%u.%u.%u.%u; the network or that node "
2237 HIPQUAD(conn->ksnc_ipaddr));
2238 CERROR ("Timed out TX to "LPX64" %s%d %p %d.%d.%d.%d\n",
2240 list_empty (&conn->ksnc_tx_queue) ? "" : "Q ",
2241 SOCK_WMEM_QUEUED(conn->ksnc_sock), conn,
2242 HIPQUAD(conn->ksnc_ipaddr));
2251 ksocknal_check_peer_timeouts (int idx)
2253 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2254 struct list_head *ptmp;
2259 /* NB. We expect to have a look at all the peers and not find any
2260 * connections to time out, so we just use a shared lock while we
2262 read_lock (&ksocknal_data.ksnd_global_lock);
2264 list_for_each (ptmp, peers) {
2265 peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
2266 conn = ksocknal_find_timed_out_conn (peer);
2269 read_unlock (&ksocknal_data.ksnd_global_lock);
2271 CERROR ("Timeout out conn->"LPX64" ip %d.%d.%d.%d:%d\n",
2273 HIPQUAD(conn->ksnc_ipaddr),
2275 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2277 /* NB we won't find this one again, but we can't
2278 * just proceed with the next peer, since we dropped
2279 * ksnd_global_lock and it might be dead already! */
2280 ksocknal_put_conn (conn);
2285 read_unlock (&ksocknal_data.ksnd_global_lock);
2289 ksocknal_reaper (void *arg)
2291 cfs_waitlink_t wait;
2292 unsigned long flags;
2294 ksock_sched_t *sched;
2295 struct list_head enomem_conns;
2297 cfs_duration_t timeout;
2300 cfs_time_t deadline = cfs_time_current();
2302 kportal_daemonize ("ksocknal_reaper");
2303 kportal_blockallsigs ();
2305 CFS_INIT_LIST_HEAD(&enomem_conns);
2306 cfs_waitlink_init (&wait);
2308 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2310 while (!ksocknal_data.ksnd_shuttingdown) {
2312 if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
2313 conn = list_entry (ksocknal_data.ksnd_deathrow_conns.next,
2314 ksock_conn_t, ksnc_list);
2315 list_del (&conn->ksnc_list);
2317 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2319 ksocknal_terminate_conn (conn);
2320 ksocknal_put_conn (conn);
2322 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2326 if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
2327 conn = list_entry (ksocknal_data.ksnd_zombie_conns.next,
2328 ksock_conn_t, ksnc_list);
2329 list_del (&conn->ksnc_list);
2331 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2333 ksocknal_destroy_conn (conn);
2335 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2339 if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
2340 list_add(&enomem_conns, &ksocknal_data.ksnd_enomem_conns);
2341 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2344 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2346 /* reschedule all the connections that stalled with ENOMEM... */
2348 while (!list_empty (&enomem_conns)) {
2349 conn = list_entry (enomem_conns.next,
2350 ksock_conn_t, ksnc_tx_list);
2351 list_del (&conn->ksnc_tx_list);
2353 sched = conn->ksnc_scheduler;
2355 spin_lock_irqsave (&sched->kss_lock, flags);
2357 LASSERT (conn->ksnc_tx_scheduled);
2358 conn->ksnc_tx_ready = 1;
2359 list_add_tail (&conn->ksnc_tx_list, &sched->kss_tx_conns);
2360 cfs_waitq_signal (&sched->kss_waitq);
2362 spin_unlock_irqrestore (&sched->kss_lock, flags);
2366 /* careful with the jiffy wrap... */
2367 while ((timeout = cfs_time_sub(deadline,
2368 cfs_time_current())) <= 0) {
2371 int chunk = ksocknal_data.ksnd_peer_hash_size;
2373 /* Time to check for timeouts on a few more peers: I do
2374 * checks every 'p' seconds on a proportion of the peer
2375 * table and I need to check every connection 'n' times
2376 * within a timeout interval, to ensure I detect a
2377 * timeout on any connection within (n+1)/n times the
2378 * timeout interval. */
2380 if (ksocknal_tunables.ksnd_io_timeout > n * p)
2381 chunk = (chunk * n * p) /
2382 ksocknal_tunables.ksnd_io_timeout;
2386 for (i = 0; i < chunk; i++) {
2387 ksocknal_check_peer_timeouts (peer_index);
2388 peer_index = (peer_index + 1) %
2389 ksocknal_data.ksnd_peer_hash_size;
2392 deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2395 if (nenomem_conns != 0) {
2396 /* Reduce my timeout if I rescheduled ENOMEM conns.
2397 * This also prevents me getting woken immediately
2398 * if any go back on my enomem list. */
2399 timeout = SOCKNAL_ENOMEM_RETRY;
2401 ksocknal_data.ksnd_reaper_waketime =
2402 cfs_time_add(cfs_time_current(), timeout);
2404 set_current_state (TASK_INTERRUPTIBLE);
2405 cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
2407 if (!ksocknal_data.ksnd_shuttingdown &&
2408 list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
2409 list_empty (&ksocknal_data.ksnd_zombie_conns))
2410 cfs_waitq_timedwait (&wait, timeout);
2412 set_current_state (TASK_RUNNING);
2413 cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
2415 spin_lock_irqsave (&ksocknal_data.ksnd_reaper_lock, flags);
2418 spin_unlock_irqrestore (&ksocknal_data.ksnd_reaper_lock, flags);
2420 ksocknal_thread_fini ();
2424 lib_nal_t ksocknal_lib = {
2425 libnal_data: &ksocknal_data, /* NAL private data */
2426 libnal_send: ksocknal_send,
2427 libnal_send_pages: ksocknal_send_pages,
2428 libnal_recv: ksocknal_recv,
2429 libnal_recv_pages: ksocknal_recv_pages,
2430 libnal_dist: ksocknal_dist