2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2014, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 ksocknal_alloc_tx(int type, int size)
32 ksock_tx_t *tx = NULL;
34 if (type == KSOCK_MSG_NOOP) {
35 LASSERT(size == KSOCK_NOOP_TX_SIZE);
37 /* searching for a noop tx in free list */
38 spin_lock(&ksocknal_data.ksnd_tx_lock);
40 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
41 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
42 next, ksock_tx_t, tx_list);
43 LASSERT(tx->tx_desc_size == size);
44 list_del(&tx->tx_list);
47 spin_unlock(&ksocknal_data.ksnd_tx_lock);
51 LIBCFS_ALLOC(tx, size);
56 atomic_set(&tx->tx_refcount, 1);
57 tx->tx_zc_aborted = 0;
58 tx->tx_zc_capable = 0;
59 tx->tx_zc_checked = 0;
60 tx->tx_desc_size = size;
62 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
68 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
72 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
74 CERROR("Can't allocate noop tx desc\n");
79 tx->tx_lnetmsg = NULL;
82 tx->tx_iov = tx->tx_frags.virt.iov;
84 tx->tx_nonblk = nonblk;
86 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
87 tx->tx_msg.ksm_zc_cookies[1] = cookie;
94 ksocknal_free_tx (ksock_tx_t *tx)
96 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
98 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
100 spin_lock(&ksocknal_data.ksnd_tx_lock);
102 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
104 spin_unlock(&ksocknal_data.ksnd_tx_lock);
106 LIBCFS_FREE(tx, tx->tx_desc_size);
111 ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
113 struct iovec *iov = tx->tx_iov;
117 LASSERT (tx->tx_niov > 0);
119 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
120 rc = ksocknal_lib_send_iov(conn, tx);
122 if (rc <= 0) /* sent nothing? */
126 LASSERT (nob <= tx->tx_resid);
131 LASSERT (tx->tx_niov > 0);
133 if (nob < (int) iov->iov_len) {
134 iov->iov_base += nob;
148 ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
150 lnet_kiov_t *kiov = tx->tx_kiov;
154 LASSERT (tx->tx_niov == 0);
155 LASSERT (tx->tx_nkiov > 0);
157 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
158 rc = ksocknal_lib_send_kiov(conn, tx);
160 if (rc <= 0) /* sent nothing? */
164 LASSERT (nob <= tx->tx_resid);
169 LASSERT(tx->tx_nkiov > 0);
171 if (nob < (int)kiov->kiov_len) {
172 kiov->kiov_offset += nob;
173 kiov->kiov_len -= nob;
177 nob -= (int)kiov->kiov_len;
178 tx->tx_kiov = ++kiov;
186 ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
191 if (ksocknal_data.ksnd_stall_tx != 0) {
192 cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
195 LASSERT (tx->tx_resid != 0);
197 rc = ksocknal_connsock_addref(conn);
199 LASSERT (conn->ksnc_closing);
204 if (ksocknal_data.ksnd_enomem_tx > 0) {
206 ksocknal_data.ksnd_enomem_tx--;
208 } else if (tx->tx_niov != 0) {
209 rc = ksocknal_send_iov (conn, tx);
211 rc = ksocknal_send_kiov (conn, tx);
214 bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
215 if (rc > 0) /* sent something? */
216 conn->ksnc_tx_bufnob += rc; /* account it */
218 if (bufnob < conn->ksnc_tx_bufnob) {
219 /* allocated send buffer bytes < computed; infer
220 * something got ACKed */
221 conn->ksnc_tx_deadline =
222 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
223 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
224 conn->ksnc_tx_bufnob = bufnob;
228 if (rc <= 0) { /* Didn't write anything? */
230 if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
233 /* Check if EAGAIN is due to memory pressure */
234 if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
240 /* socket's wmem_queued now includes 'rc' bytes */
241 atomic_sub (rc, &conn->ksnc_tx_nob);
244 } while (tx->tx_resid != 0);
246 ksocknal_connsock_decref(conn);
251 ksocknal_recv_iov (ksock_conn_t *conn)
253 struct iovec *iov = conn->ksnc_rx_iov;
257 LASSERT (conn->ksnc_rx_niov > 0);
259 /* Never touch conn->ksnc_rx_iov or change connection
260 * status inside ksocknal_lib_recv_iov */
261 rc = ksocknal_lib_recv_iov(conn);
266 /* received something... */
269 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
270 conn->ksnc_rx_deadline =
271 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
272 smp_mb(); /* order with setting rx_started */
273 conn->ksnc_rx_started = 1;
275 conn->ksnc_rx_nob_wanted -= nob;
276 conn->ksnc_rx_nob_left -= nob;
279 LASSERT (conn->ksnc_rx_niov > 0);
281 if (nob < (int)iov->iov_len) {
283 iov->iov_base += nob;
288 conn->ksnc_rx_iov = ++iov;
289 conn->ksnc_rx_niov--;
296 ksocknal_recv_kiov (ksock_conn_t *conn)
298 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
301 LASSERT (conn->ksnc_rx_nkiov > 0);
303 /* Never touch conn->ksnc_rx_kiov or change connection
304 * status inside ksocknal_lib_recv_iov */
305 rc = ksocknal_lib_recv_kiov(conn);
310 /* received something... */
313 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
314 conn->ksnc_rx_deadline =
315 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
316 smp_mb(); /* order with setting rx_started */
317 conn->ksnc_rx_started = 1;
319 conn->ksnc_rx_nob_wanted -= nob;
320 conn->ksnc_rx_nob_left -= nob;
323 LASSERT (conn->ksnc_rx_nkiov > 0);
325 if (nob < (int) kiov->kiov_len) {
326 kiov->kiov_offset += nob;
327 kiov->kiov_len -= nob;
331 nob -= kiov->kiov_len;
332 conn->ksnc_rx_kiov = ++kiov;
333 conn->ksnc_rx_nkiov--;
340 ksocknal_receive (ksock_conn_t *conn)
342 /* Return 1 on success, 0 on EOF, < 0 on error.
343 * Caller checks ksnc_rx_nob_wanted to determine
344 * progress/completion. */
348 if (ksocknal_data.ksnd_stall_rx != 0) {
349 cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
352 rc = ksocknal_connsock_addref(conn);
354 LASSERT (conn->ksnc_closing);
359 if (conn->ksnc_rx_niov != 0)
360 rc = ksocknal_recv_iov (conn);
362 rc = ksocknal_recv_kiov (conn);
365 /* error/EOF or partial receive */
368 } else if (rc == 0 && conn->ksnc_rx_started) {
369 /* EOF in the middle of a message */
375 /* Completed a fragment */
377 if (conn->ksnc_rx_nob_wanted == 0) {
383 ksocknal_connsock_decref(conn);
388 ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
390 lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
391 int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
394 LASSERT(ni != NULL || tx->tx_conn != NULL);
396 if (tx->tx_conn != NULL)
397 ksocknal_conn_decref(tx->tx_conn);
399 if (ni == NULL && tx->tx_conn != NULL)
400 ni = tx->tx_conn->ksnc_peer->ksnp_ni;
402 ksocknal_free_tx (tx);
403 if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
404 lnet_finalize (ni, lnetmsg, rc);
410 ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
414 while (!list_empty(txlist)) {
415 tx = list_entry(txlist->next, ksock_tx_t, tx_list);
417 if (error && tx->tx_lnetmsg != NULL) {
418 CNETERR("Deleting packet type %d len %d %s->%s\n",
419 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
420 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
421 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
422 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
424 CNETERR("Deleting noop packet\n");
427 list_del(&tx->tx_list);
429 LASSERT (atomic_read(&tx->tx_refcount) == 1);
430 ksocknal_tx_done (ni, tx);
435 ksocknal_check_zc_req(ksock_tx_t *tx)
437 ksock_conn_t *conn = tx->tx_conn;
438 ksock_peer_t *peer = conn->ksnc_peer;
440 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
441 * to ksnp_zc_req_list if some fragment of this message should be sent
442 * zero-copy. Our peer will send an ACK containing this cookie when
443 * she has received this message to tell us we can signal completion.
444 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
445 * ksnp_zc_req_list. */
446 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
447 LASSERT (tx->tx_zc_capable);
449 tx->tx_zc_checked = 1;
451 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
452 !conn->ksnc_zc_capable)
455 /* assign cookie and queue tx to pending list, it will be released when
456 * a matching ack is received. See ksocknal_handle_zcack() */
458 ksocknal_tx_addref(tx);
460 spin_lock(&peer->ksnp_lock);
462 /* ZC_REQ is going to be pinned to the peer */
464 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
466 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
468 tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
470 if (peer->ksnp_zc_next_cookie == 0)
471 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
473 list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
475 spin_unlock(&peer->ksnp_lock);
479 ksocknal_uncheck_zc_req(ksock_tx_t *tx)
481 ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
483 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
484 LASSERT(tx->tx_zc_capable);
486 tx->tx_zc_checked = 0;
488 spin_lock(&peer->ksnp_lock);
490 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
491 /* Not waiting for an ACK */
492 spin_unlock(&peer->ksnp_lock);
496 tx->tx_msg.ksm_zc_cookies[0] = 0;
497 list_del(&tx->tx_zc_list);
499 spin_unlock(&peer->ksnp_lock);
501 ksocknal_tx_decref(tx);
505 ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
509 if (tx->tx_zc_capable && !tx->tx_zc_checked)
510 ksocknal_check_zc_req(tx);
512 rc = ksocknal_transmit (conn, tx);
514 CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
516 if (tx->tx_resid == 0) {
517 /* Sent everything OK */
529 counter++; /* exponential backoff warnings */
530 if ((counter & (-counter)) == counter)
531 CWARN("%u ENOMEM tx %p (%u allocated)\n",
532 counter, conn, atomic_read(&libcfs_kmemory));
534 /* Queue on ksnd_enomem_conns for retry after a timeout */
535 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
537 /* enomem list takes over scheduler's ref... */
538 LASSERT (conn->ksnc_tx_scheduled);
539 list_add_tail(&conn->ksnc_tx_list,
540 &ksocknal_data.ksnd_enomem_conns);
541 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
542 SOCKNAL_ENOMEM_RETRY),
543 ksocknal_data.ksnd_reaper_waketime))
544 wake_up(&ksocknal_data.ksnd_reaper_waitq);
546 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
553 if (!conn->ksnc_closing) {
556 LCONSOLE_WARN("Host %u.%u.%u.%u reset our connection "
557 "while we were sending data; it may have "
559 HIPQUAD(conn->ksnc_ipaddr));
562 LCONSOLE_WARN("There was an unexpected network error "
563 "while writing to %u.%u.%u.%u: %d.\n",
564 HIPQUAD(conn->ksnc_ipaddr), rc);
567 CDEBUG(D_NET, "[%p] Error %d on write to %s"
568 " ip %d.%d.%d.%d:%d\n", conn, rc,
569 libcfs_id2str(conn->ksnc_peer->ksnp_id),
570 HIPQUAD(conn->ksnc_ipaddr),
574 if (tx->tx_zc_checked)
575 ksocknal_uncheck_zc_req(tx);
577 /* it's not an error if conn is being closed */
578 ksocknal_close_conn_and_siblings (conn,
579 (conn->ksnc_closing) ? 0 : rc);
585 ksocknal_launch_connection_locked (ksock_route_t *route)
588 /* called holding write lock on ksnd_global_lock */
590 LASSERT (!route->ksnr_scheduled);
591 LASSERT (!route->ksnr_connecting);
592 LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
594 route->ksnr_scheduled = 1; /* scheduling conn for connd */
595 ksocknal_route_addref(route); /* extra ref for connd */
597 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
599 list_add_tail(&route->ksnr_connd_list,
600 &ksocknal_data.ksnd_connd_routes);
601 wake_up(&ksocknal_data.ksnd_connd_waitq);
603 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
607 ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
609 ksock_route_t *route;
611 /* called holding write lock on ksnd_global_lock */
613 /* launch any/all connections that need it */
614 route = ksocknal_find_connectable_route_locked(peer);
618 ksocknal_launch_connection_locked(route);
623 ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
625 struct list_head *tmp;
627 ksock_conn_t *typed = NULL;
628 ksock_conn_t *fallback = NULL;
632 list_for_each(tmp, &peer->ksnp_conns) {
633 ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
634 int nob = atomic_read(&c->ksnc_tx_nob) +
635 libcfs_sock_wmem_queued(c->ksnc_sock);
638 LASSERT (!c->ksnc_closing);
639 LASSERT (c->ksnc_proto != NULL &&
640 c->ksnc_proto->pro_match_tx != NULL);
642 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
647 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
650 case SOCKNAL_MATCH_YES: /* typed connection */
651 if (typed == NULL || tnob > nob ||
652 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
653 cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
659 case SOCKNAL_MATCH_MAY: /* fallback connection */
660 if (fallback == NULL || fnob > nob ||
661 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
662 cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
670 /* prefer the typed selection */
671 conn = (typed != NULL) ? typed : fallback;
674 conn->ksnc_tx_last_post = cfs_time_current();
680 ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
682 conn->ksnc_proto->pro_pack(tx);
684 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
685 ksocknal_conn_addref(conn); /* +1 ref for tx */
690 ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
692 ksock_sched_t *sched = conn->ksnc_scheduler;
693 ksock_msg_t *msg = &tx->tx_msg;
694 ksock_tx_t *ztx = NULL;
697 /* called holding global lock (read or irq-write) and caller may
698 * not have dropped this lock between finding conn and calling me,
699 * so we don't need the {get,put}connsock dance to deref
701 LASSERT(!conn->ksnc_closing);
703 CDEBUG(D_NET, "Sending to %s ip %d.%d.%d.%d:%d\n",
704 libcfs_id2str(conn->ksnc_peer->ksnp_id),
705 HIPQUAD(conn->ksnc_ipaddr),
708 ksocknal_tx_prep(conn, tx);
710 /* Ensure the frags we've been given EXACTLY match the number of
711 * bytes we want to send. Many TCP/IP stacks disregard any total
712 * size parameters passed to them and just look at the frags.
714 * We always expect at least 1 mapped fragment containing the
715 * complete ksocknal message header. */
716 LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
717 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
718 (unsigned int)tx->tx_nob);
719 LASSERT (tx->tx_niov >= 1);
720 LASSERT (tx->tx_resid == tx->tx_nob);
722 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
723 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
725 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
727 bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
728 spin_lock_bh(&sched->kss_lock);
730 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
731 /* First packet starts the timeout */
732 conn->ksnc_tx_deadline =
733 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
734 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
735 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
736 conn->ksnc_tx_bufnob = 0;
737 smp_mb(); /* order with adding to tx_queue */
740 if (msg->ksm_type == KSOCK_MSG_NOOP) {
741 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
742 * on a normal packet so I don't need to send it */
743 LASSERT (msg->ksm_zc_cookies[1] != 0);
744 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
746 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
747 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
750 /* It's a normal packet - can it piggback a noop zc-ack that
751 * has been queued already? */
752 LASSERT (msg->ksm_zc_cookies[1] == 0);
753 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
755 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
756 /* ztx will be released later */
760 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
761 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
764 if (conn->ksnc_tx_ready && /* able to send */
765 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
766 /* +1 ref for scheduler */
767 ksocknal_conn_addref(conn);
768 list_add_tail(&conn->ksnc_tx_list,
769 &sched->kss_tx_conns);
770 conn->ksnc_tx_scheduled = 1;
771 wake_up(&sched->kss_waitq);
774 spin_unlock_bh(&sched->kss_lock);
779 ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
781 cfs_time_t now = cfs_time_current();
782 struct list_head *tmp;
783 ksock_route_t *route;
785 list_for_each(tmp, &peer->ksnp_routes) {
786 route = list_entry(tmp, ksock_route_t, ksnr_list);
788 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
790 if (route->ksnr_scheduled) /* connections being established */
793 /* all route types connected ? */
794 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
797 if (!(route->ksnr_retry_interval == 0 || /* first attempt */
798 cfs_time_aftereq(now, route->ksnr_timeout))) {
800 "Too soon to retry route %u.%u.%u.%u "
801 "(cnted %d, interval %ld, %ld secs later)\n",
802 HIPQUAD(route->ksnr_ipaddr),
803 route->ksnr_connected,
804 route->ksnr_retry_interval,
805 cfs_duration_sec(route->ksnr_timeout - now));
816 ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
818 struct list_head *tmp;
819 ksock_route_t *route;
821 list_for_each(tmp, &peer->ksnp_routes) {
822 route = list_entry(tmp, ksock_route_t, ksnr_list);
824 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
826 if (route->ksnr_scheduled)
834 ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
842 LASSERT (tx->tx_conn == NULL);
844 g_lock = &ksocknal_data.ksnd_global_lock;
846 for (retry = 0;; retry = 1) {
848 peer = ksocknal_find_peer_locked(ni, id);
850 if (ksocknal_find_connectable_route_locked(peer) == NULL) {
851 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
853 /* I've got no routes that need to be
854 * connecting and I do have an actual
856 ksocknal_queue_tx_locked (tx, conn);
863 /* I'll need a write lock... */
866 write_lock_bh(g_lock);
868 peer = ksocknal_find_peer_locked(ni, id);
872 write_unlock_bh(g_lock);
874 if ((id.pid & LNET_PID_USERFLAG) != 0) {
875 CERROR("Refusing to create a connection to "
876 "userspace process %s\n", libcfs_id2str(id));
877 return -EHOSTUNREACH;
881 CERROR("Can't find peer %s\n", libcfs_id2str(id));
882 return -EHOSTUNREACH;
885 rc = ksocknal_add_peer(ni, id,
886 LNET_NIDADDR(id.nid),
887 lnet_acceptor_port());
889 CERROR("Can't add peer %s: %d\n",
890 libcfs_id2str(id), rc);
895 ksocknal_launch_all_connections_locked(peer);
897 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
899 /* Connection exists; queue message on it */
900 ksocknal_queue_tx_locked (tx, conn);
901 write_unlock_bh(g_lock);
905 if (peer->ksnp_accepting > 0 ||
906 ksocknal_find_connecting_route_locked (peer) != NULL) {
907 /* the message is going to be pinned to the peer */
909 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
911 /* Queue the message until a connection is established */
912 list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
913 write_unlock_bh(g_lock);
917 write_unlock_bh(g_lock);
919 /* NB Routes may be ignored if connections to them failed recently */
920 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
921 return (-EHOSTUNREACH);
925 ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
928 int type = lntmsg->msg_type;
929 lnet_process_id_t target = lntmsg->msg_target;
930 unsigned int payload_niov = lntmsg->msg_niov;
931 struct iovec *payload_iov = lntmsg->msg_iov;
932 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
933 unsigned int payload_offset = lntmsg->msg_offset;
934 unsigned int payload_nob = lntmsg->msg_len;
939 /* NB 'private' is different depending on what we're sending.
940 * Just ignore it... */
942 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
943 payload_nob, payload_niov, libcfs_id2str(target));
945 LASSERT (payload_nob == 0 || payload_niov > 0);
946 LASSERT (payload_niov <= LNET_MAX_IOV);
947 /* payload is either all vaddrs or all pages */
948 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
949 LASSERT (!in_interrupt ());
951 if (payload_iov != NULL)
952 desc_size = offsetof(ksock_tx_t,
953 tx_frags.virt.iov[1 + payload_niov]);
955 desc_size = offsetof(ksock_tx_t,
956 tx_frags.paged.kiov[payload_niov]);
958 if (lntmsg->msg_vmflush)
959 mpflag = cfs_memory_pressure_get_and_set();
960 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
962 CERROR("Can't allocate tx desc type %d size %d\n",
964 if (lntmsg->msg_vmflush)
965 cfs_memory_pressure_restore(mpflag);
969 tx->tx_conn = NULL; /* set when assigned a conn */
970 tx->tx_lnetmsg = lntmsg;
972 if (payload_iov != NULL) {
975 tx->tx_iov = tx->tx_frags.virt.iov;
977 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
978 payload_niov, payload_iov,
979 payload_offset, payload_nob);
982 tx->tx_iov = &tx->tx_frags.paged.iov;
983 tx->tx_kiov = tx->tx_frags.paged.kiov;
984 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
985 payload_niov, payload_kiov,
986 payload_offset, payload_nob);
988 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
989 tx->tx_zc_capable = 1;
992 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
994 /* The first fragment will be set later in pro_pack */
995 rc = ksocknal_launch_packet(ni, tx, target);
997 cfs_memory_pressure_restore(mpflag);
1002 ksocknal_free_tx(tx);
1007 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1009 struct task_struct *task = kthread_run(fn, arg, name);
1012 return PTR_ERR(task);
1014 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1015 ksocknal_data.ksnd_nthreads++;
1016 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1021 ksocknal_thread_fini (void)
1023 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1024 ksocknal_data.ksnd_nthreads--;
1025 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1029 ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
1031 static char ksocknal_slop_buffer[4096];
1037 LASSERT(conn->ksnc_proto != NULL);
1039 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1040 /* Remind the socket to ack eagerly... */
1041 ksocknal_lib_eager_ack(conn);
1044 if (nob_to_skip == 0) { /* right at next packet boundary now */
1045 conn->ksnc_rx_started = 0;
1046 smp_mb(); /* racing with timeout thread */
1048 switch (conn->ksnc_proto->pro_version) {
1049 case KSOCK_PROTO_V2:
1050 case KSOCK_PROTO_V3:
1051 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1052 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1053 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
1055 conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
1056 conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
1057 conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
1060 case KSOCK_PROTO_V1:
1061 /* Receiving bare lnet_hdr_t */
1062 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1063 conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
1064 conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
1066 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1067 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1068 conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
1074 conn->ksnc_rx_niov = 1;
1076 conn->ksnc_rx_kiov = NULL;
1077 conn->ksnc_rx_nkiov = 0;
1078 conn->ksnc_rx_csum = ~0;
1082 /* Set up to skip as much as possible now. If there's more left
1083 * (ran out of iov entries) we'll get called again */
1085 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1086 conn->ksnc_rx_nob_left = nob_to_skip;
1087 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1092 nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
1094 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1095 conn->ksnc_rx_iov[niov].iov_len = nob;
1100 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1101 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
1103 conn->ksnc_rx_niov = niov;
1104 conn->ksnc_rx_kiov = NULL;
1105 conn->ksnc_rx_nkiov = 0;
1106 conn->ksnc_rx_nob_wanted = skipped;
1111 ksocknal_process_receive (ksock_conn_t *conn)
1114 lnet_process_id_t *id;
1117 LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
1119 /* NB: sched lock NOT held */
1120 /* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
1121 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1122 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1123 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1124 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1126 if (conn->ksnc_rx_nob_wanted != 0) {
1127 rc = ksocknal_receive(conn);
1130 LASSERT (rc != -EAGAIN);
1133 CDEBUG (D_NET, "[%p] EOF from %s"
1134 " ip %d.%d.%d.%d:%d\n", conn,
1135 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1136 HIPQUAD(conn->ksnc_ipaddr),
1138 else if (!conn->ksnc_closing)
1139 CERROR ("[%p] Error %d on read from %s"
1140 " ip %d.%d.%d.%d:%d\n",
1142 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1143 HIPQUAD(conn->ksnc_ipaddr),
1146 /* it's not an error if conn is being closed */
1147 ksocknal_close_conn_and_siblings (conn,
1148 (conn->ksnc_closing) ? 0 : rc);
1149 return (rc == 0 ? -ESHUTDOWN : rc);
1152 if (conn->ksnc_rx_nob_wanted != 0) {
1157 switch (conn->ksnc_rx_state) {
1158 case SOCKNAL_RX_KSM_HEADER:
1159 if (conn->ksnc_flip) {
1160 __swab32s(&conn->ksnc_msg.ksm_type);
1161 __swab32s(&conn->ksnc_msg.ksm_csum);
1162 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1163 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1166 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1167 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1168 CERROR("%s: Unknown message type: %x\n",
1169 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1170 conn->ksnc_msg.ksm_type);
1171 ksocknal_new_packet(conn, 0);
1172 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1176 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1177 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1178 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1179 /* NOOP Checksum error */
1180 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1181 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1182 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1183 ksocknal_new_packet(conn, 0);
1184 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1188 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1191 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1193 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1194 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1196 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1197 conn->ksnc_msg.ksm_zc_cookies[1]);
1200 CERROR("%s: Unknown ZC-ACK cookie: "LPU64", "LPU64"\n",
1201 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1202 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1203 ksocknal_new_packet(conn, 0);
1204 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1209 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1210 ksocknal_new_packet (conn, 0);
1211 return 0; /* NOOP is done and just return */
1214 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1215 conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
1216 conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
1218 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1219 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1220 conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
1222 conn->ksnc_rx_niov = 1;
1223 conn->ksnc_rx_kiov = NULL;
1224 conn->ksnc_rx_nkiov = 0;
1226 goto again; /* read lnet header now */
1228 case SOCKNAL_RX_LNET_HEADER:
1229 /* unpack message header */
1230 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1232 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1233 /* Userspace peer */
1234 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1235 id = &conn->ksnc_peer->ksnp_id;
1237 /* Substitute process ID assigned at connection time */
1238 lhdr->src_pid = cpu_to_le32(id->pid);
1239 lhdr->src_nid = cpu_to_le64(id->nid);
1242 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1243 ksocknal_conn_addref(conn); /* ++ref while parsing */
1245 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1246 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1247 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1249 /* I just received garbage: give up on this conn */
1250 ksocknal_new_packet(conn, 0);
1251 ksocknal_close_conn_and_siblings (conn, rc);
1252 ksocknal_conn_decref(conn);
1256 /* I'm racing with ksocknal_recv() */
1257 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1258 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1260 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1263 /* ksocknal_recv() got called */
1266 case SOCKNAL_RX_LNET_PAYLOAD:
1267 /* payload all received */
1270 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1271 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1272 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1273 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1274 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1275 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1279 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1280 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1282 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1283 id = &conn->ksnc_peer->ksnp_id;
1285 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1286 conn->ksnc_msg.ksm_zc_cookies[0],
1287 *ksocknal_tunables.ksnd_nonblk_zcack ||
1288 le64_to_cpu(lhdr->src_nid) != id->nid);
1291 lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
1294 ksocknal_new_packet(conn, 0);
1295 ksocknal_close_conn_and_siblings (conn, rc);
1300 case SOCKNAL_RX_SLOP:
1301 /* starting new packet? */
1302 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1303 return 0; /* come back later */
1304 goto again; /* try to finish reading slop now */
1312 return (-EINVAL); /* keep gcc happy */
1316 ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
1317 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
1318 unsigned int offset, unsigned int mlen, unsigned int rlen)
1320 ksock_conn_t *conn = (ksock_conn_t *)private;
1321 ksock_sched_t *sched = conn->ksnc_scheduler;
1323 LASSERT (mlen <= rlen);
1324 LASSERT (niov <= LNET_MAX_IOV);
1326 conn->ksnc_cookie = msg;
1327 conn->ksnc_rx_nob_wanted = mlen;
1328 conn->ksnc_rx_nob_left = rlen;
1330 if (mlen == 0 || iov != NULL) {
1331 conn->ksnc_rx_nkiov = 0;
1332 conn->ksnc_rx_kiov = NULL;
1333 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1334 conn->ksnc_rx_niov =
1335 lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
1336 niov, iov, offset, mlen);
1338 conn->ksnc_rx_niov = 0;
1339 conn->ksnc_rx_iov = NULL;
1340 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1341 conn->ksnc_rx_nkiov =
1342 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1343 niov, kiov, offset, mlen);
1347 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1348 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1350 LASSERT (conn->ksnc_rx_scheduled);
1352 spin_lock_bh(&sched->kss_lock);
1354 switch (conn->ksnc_rx_state) {
1355 case SOCKNAL_RX_PARSE_WAIT:
1356 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1357 wake_up(&sched->kss_waitq);
1358 LASSERT(conn->ksnc_rx_ready);
1361 case SOCKNAL_RX_PARSE:
1362 /* scheduler hasn't noticed I'm parsing yet */
1366 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1368 spin_unlock_bh(&sched->kss_lock);
1369 ksocknal_conn_decref(conn);
1374 ksocknal_sched_cansleep(ksock_sched_t *sched)
1378 spin_lock_bh(&sched->kss_lock);
1380 rc = (!ksocknal_data.ksnd_shuttingdown &&
1381 list_empty(&sched->kss_rx_conns) &&
1382 list_empty(&sched->kss_tx_conns));
1384 spin_unlock_bh(&sched->kss_lock);
1388 int ksocknal_scheduler(void *arg)
1390 struct ksock_sched_info *info;
1391 ksock_sched_t *sched;
1396 long id = (long)arg;
1398 info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
1399 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
1401 cfs_block_allsigs();
1403 rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
1405 CERROR("Can't set CPT affinity to %d: %d\n",
1409 spin_lock_bh(&sched->kss_lock);
1411 while (!ksocknal_data.ksnd_shuttingdown) {
1412 int did_something = 0;
1414 /* Ensure I progress everything semi-fairly */
1416 if (!list_empty(&sched->kss_rx_conns)) {
1417 conn = list_entry(sched->kss_rx_conns.next,
1418 ksock_conn_t, ksnc_rx_list);
1419 list_del(&conn->ksnc_rx_list);
1421 LASSERT(conn->ksnc_rx_scheduled);
1422 LASSERT(conn->ksnc_rx_ready);
1424 /* clear rx_ready in case receive isn't complete.
1425 * Do it BEFORE we call process_recv, since
1426 * data_ready can set it any time after we release
1428 conn->ksnc_rx_ready = 0;
1429 spin_unlock_bh(&sched->kss_lock);
1431 rc = ksocknal_process_receive(conn);
1433 spin_lock_bh(&sched->kss_lock);
1435 /* I'm the only one that can clear this flag */
1436 LASSERT(conn->ksnc_rx_scheduled);
1438 /* Did process_receive get everything it wanted? */
1440 conn->ksnc_rx_ready = 1;
1442 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1443 /* Conn blocked waiting for ksocknal_recv()
1444 * I change its state (under lock) to signal
1445 * it can be rescheduled */
1446 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1447 } else if (conn->ksnc_rx_ready) {
1448 /* reschedule for rx */
1449 list_add_tail(&conn->ksnc_rx_list,
1450 &sched->kss_rx_conns);
1452 conn->ksnc_rx_scheduled = 0;
1454 ksocknal_conn_decref(conn);
1460 if (!list_empty(&sched->kss_tx_conns)) {
1461 struct list_head zlist = LIST_HEAD_INIT(zlist);
1463 if (!list_empty(&sched->kss_zombie_noop_txs)) {
1465 &sched->kss_zombie_noop_txs);
1466 list_del_init(&sched->kss_zombie_noop_txs);
1469 conn = list_entry(sched->kss_tx_conns.next,
1470 ksock_conn_t, ksnc_tx_list);
1471 list_del(&conn->ksnc_tx_list);
1473 LASSERT(conn->ksnc_tx_scheduled);
1474 LASSERT(conn->ksnc_tx_ready);
1475 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1477 tx = list_entry(conn->ksnc_tx_queue.next,
1478 ksock_tx_t, tx_list);
1480 if (conn->ksnc_tx_carrier == tx)
1481 ksocknal_next_tx_carrier(conn);
1483 /* dequeue now so empty list => more to send */
1484 list_del(&tx->tx_list);
1486 /* Clear tx_ready in case send isn't complete. Do
1487 * it BEFORE we call process_transmit, since
1488 * write_space can set it any time after we release
1490 conn->ksnc_tx_ready = 0;
1491 spin_unlock_bh(&sched->kss_lock);
1493 if (!list_empty(&zlist)) {
1494 /* free zombie noop txs, it's fast because
1495 * noop txs are just put in freelist */
1496 ksocknal_txlist_done(NULL, &zlist, 0);
1499 rc = ksocknal_process_transmit(conn, tx);
1501 if (rc == -ENOMEM || rc == -EAGAIN) {
1502 /* Incomplete send: replace tx on HEAD of tx_queue */
1503 spin_lock_bh(&sched->kss_lock);
1504 list_add(&tx->tx_list,
1505 &conn->ksnc_tx_queue);
1507 /* Complete send; tx -ref */
1508 ksocknal_tx_decref(tx);
1510 spin_lock_bh(&sched->kss_lock);
1511 /* assume space for more */
1512 conn->ksnc_tx_ready = 1;
1515 if (rc == -ENOMEM) {
1516 /* Do nothing; after a short timeout, this
1517 * conn will be reposted on kss_tx_conns. */
1518 } else if (conn->ksnc_tx_ready &&
1519 !list_empty(&conn->ksnc_tx_queue)) {
1520 /* reschedule for tx */
1521 list_add_tail(&conn->ksnc_tx_list,
1522 &sched->kss_tx_conns);
1524 conn->ksnc_tx_scheduled = 0;
1526 ksocknal_conn_decref(conn);
1531 if (!did_something || /* nothing to do */
1532 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1533 spin_unlock_bh(&sched->kss_lock);
1537 if (!did_something) { /* wait for something to do */
1538 rc = wait_event_interruptible_exclusive(
1540 !ksocknal_sched_cansleep(sched));
1546 spin_lock_bh(&sched->kss_lock);
1550 spin_unlock_bh(&sched->kss_lock);
1551 ksocknal_thread_fini();
1556 * Add connection to kss_rx_conns of scheduler
1557 * and wakeup the scheduler.
1559 void ksocknal_read_callback (ksock_conn_t *conn)
1561 ksock_sched_t *sched;
1564 sched = conn->ksnc_scheduler;
1566 spin_lock_bh(&sched->kss_lock);
1568 conn->ksnc_rx_ready = 1;
1570 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1571 list_add_tail(&conn->ksnc_rx_list,
1572 &sched->kss_rx_conns);
1573 conn->ksnc_rx_scheduled = 1;
1574 /* extra ref for scheduler */
1575 ksocknal_conn_addref(conn);
1577 wake_up (&sched->kss_waitq);
1579 spin_unlock_bh(&sched->kss_lock);
1585 * Add connection to kss_tx_conns of scheduler
1586 * and wakeup the scheduler.
1588 void ksocknal_write_callback(ksock_conn_t *conn)
1590 ksock_sched_t *sched;
1593 sched = conn->ksnc_scheduler;
1595 spin_lock_bh(&sched->kss_lock);
1597 conn->ksnc_tx_ready = 1;
1599 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1600 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1601 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1602 conn->ksnc_tx_scheduled = 1;
1603 /* extra ref for scheduler */
1604 ksocknal_conn_addref(conn);
1606 wake_up(&sched->kss_waitq);
1609 spin_unlock_bh(&sched->kss_lock);
1615 ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
1619 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1620 version = hello->kshm_version;
1621 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1622 version = __swab32(hello->kshm_version);
1625 #if SOCKNAL_VERSION_DEBUG
1626 if (*ksocknal_tunables.ksnd_protocol == 1)
1629 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1630 version == KSOCK_PROTO_V3)
1633 if (version == KSOCK_PROTO_V2)
1634 return &ksocknal_protocol_v2x;
1636 if (version == KSOCK_PROTO_V3)
1637 return &ksocknal_protocol_v3x;
1642 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1643 lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
1645 CLASSERT (sizeof (lnet_magicversion_t) ==
1646 offsetof (ksock_hello_msg_t, kshm_src_nid));
1648 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1649 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1650 return &ksocknal_protocol_v1x;
1657 ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
1658 lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
1660 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1661 ksock_net_t *net = (ksock_net_t *)ni->ni_data;
1663 LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES);
1665 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1666 LASSERT (conn->ksnc_proto != NULL);
1668 hello->kshm_src_nid = ni->ni_nid;
1669 hello->kshm_dst_nid = peer_nid;
1670 hello->kshm_src_pid = the_lnet.ln_pid;
1672 hello->kshm_src_incarnation = net->ksnn_incarnation;
1673 hello->kshm_ctype = conn->ksnc_type;
1675 return conn->ksnc_proto->pro_send_hello(conn, hello);
1679 ksocknal_invert_type(int type)
1683 case SOCKLND_CONN_ANY:
1684 case SOCKLND_CONN_CONTROL:
1686 case SOCKLND_CONN_BULK_IN:
1687 return SOCKLND_CONN_BULK_OUT;
1688 case SOCKLND_CONN_BULK_OUT:
1689 return SOCKLND_CONN_BULK_IN;
1691 return (SOCKLND_CONN_NONE);
1696 ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
1697 ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
1700 /* Return < 0 fatal error
1702 * EALREADY lost connection race
1703 * EPROTO protocol version mismatch
1705 cfs_socket_t *sock = conn->ksnc_sock;
1706 int active = (conn->ksnc_proto != NULL);
1710 ksock_proto_t *proto;
1711 lnet_process_id_t recv_id;
1713 /* socket type set on active connections - not set on passive */
1714 LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1716 timeout = active ? *ksocknal_tunables.ksnd_timeout :
1717 lnet_acceptor_timeout();
1719 rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
1721 CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
1722 rc, HIPQUAD(conn->ksnc_ipaddr));
1727 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1728 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1729 hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
1730 /* Unexpected magic! */
1731 CERROR ("Bad magic(1) %#08x (%#08x expected) from "
1732 "%u.%u.%u.%u\n", __cpu_to_le32 (hello->kshm_magic),
1733 LNET_PROTO_TCP_MAGIC,
1734 HIPQUAD(conn->ksnc_ipaddr));
1738 rc = libcfs_sock_read(sock, &hello->kshm_version,
1739 sizeof(hello->kshm_version), timeout);
1741 CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
1742 rc, HIPQUAD(conn->ksnc_ipaddr));
1747 proto = ksocknal_parse_proto_version(hello);
1748 if (proto == NULL) {
1750 /* unknown protocol from peer, tell peer my protocol */
1751 conn->ksnc_proto = &ksocknal_protocol_v3x;
1752 #if SOCKNAL_VERSION_DEBUG
1753 if (*ksocknal_tunables.ksnd_protocol == 2)
1754 conn->ksnc_proto = &ksocknal_protocol_v2x;
1755 else if (*ksocknal_tunables.ksnd_protocol == 1)
1756 conn->ksnc_proto = &ksocknal_protocol_v1x;
1758 hello->kshm_nips = 0;
1759 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1762 CERROR ("Unknown protocol version (%d.x expected)"
1763 " from %u.%u.%u.%u\n",
1764 conn->ksnc_proto->pro_version,
1765 HIPQUAD(conn->ksnc_ipaddr));
1770 proto_match = (conn->ksnc_proto == proto);
1771 conn->ksnc_proto = proto;
1773 /* receive the rest of hello message anyway */
1774 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1776 CERROR("Error %d reading or checking hello from from %u.%u.%u.%u\n",
1777 rc, HIPQUAD(conn->ksnc_ipaddr));
1782 *incarnation = hello->kshm_src_incarnation;
1784 if (hello->kshm_src_nid == LNET_NID_ANY) {
1785 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
1786 "from %u.%u.%u.%u\n", HIPQUAD(conn->ksnc_ipaddr));
1791 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1792 /* Userspace NAL assigns peer process ID from socket */
1793 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1794 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1796 recv_id.nid = hello->kshm_src_nid;
1797 recv_id.pid = hello->kshm_src_pid;
1803 /* peer determines type */
1804 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1805 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1806 CERROR ("Unexpected type %d from %s ip %u.%u.%u.%u\n",
1807 hello->kshm_ctype, libcfs_id2str(*peerid),
1808 HIPQUAD(conn->ksnc_ipaddr));
1815 if (peerid->pid != recv_id.pid ||
1816 peerid->nid != recv_id.nid) {
1817 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
1818 " %u.%u.%u.%u, but they claimed they were "
1819 "%s; please check your Lustre "
1821 libcfs_id2str(*peerid),
1822 HIPQUAD(conn->ksnc_ipaddr),
1823 libcfs_id2str(recv_id));
1827 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1828 /* Possible protocol mismatch or I lost the connection race */
1829 return proto_match ? EALREADY : EPROTO;
1832 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1833 CERROR ("Mismatched types: me %d, %s ip %u.%u.%u.%u %d\n",
1834 conn->ksnc_type, libcfs_id2str(*peerid),
1835 HIPQUAD(conn->ksnc_ipaddr),
1844 ksocknal_connect (ksock_route_t *route)
1846 struct list_head zombies = LIST_HEAD_INIT(zombies);
1847 ksock_peer_t *peer = route->ksnr_peer;
1851 cfs_time_t deadline;
1852 int retry_later = 0;
1855 deadline = cfs_time_add(cfs_time_current(),
1856 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
1858 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1860 LASSERT (route->ksnr_scheduled);
1861 LASSERT (!route->ksnr_connecting);
1863 route->ksnr_connecting = 1;
1866 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1868 /* stop connecting if peer/route got closed under me, or
1869 * route got connected while queued */
1870 if (peer->ksnp_closing || route->ksnr_deleted ||
1876 /* reschedule if peer is connecting to me */
1877 if (peer->ksnp_accepting > 0) {
1879 "peer %s(%d) already connecting to me, retry later.\n",
1880 libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
1884 if (retry_later) /* needs reschedule */
1887 if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
1888 type = SOCKLND_CONN_ANY;
1889 } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
1890 type = SOCKLND_CONN_CONTROL;
1891 } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
1892 type = SOCKLND_CONN_BULK_IN;
1894 LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
1895 type = SOCKLND_CONN_BULK_OUT;
1898 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1900 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
1902 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1908 rc = lnet_connect(&sock, peer->ksnp_id.nid,
1909 route->ksnr_myipaddr,
1910 route->ksnr_ipaddr, route->ksnr_port);
1914 rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
1916 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1922 /* A +ve RC means I have to retry because I lost the connection
1923 * race or I have to renegotiate protocol version */
1924 retry_later = (rc != 0);
1926 CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
1927 libcfs_nid2str(peer->ksnp_id.nid));
1929 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1932 route->ksnr_scheduled = 0;
1933 route->ksnr_connecting = 0;
1936 /* re-queue for attention; this frees me up to handle
1937 * the peer's incoming connection request */
1939 if (rc == EALREADY ||
1940 (rc == 0 && peer->ksnp_accepting > 0)) {
1941 /* We want to introduce a delay before next
1942 * attempt to connect if we lost conn race,
1943 * but the race is resolved quickly usually,
1944 * so min_reconnectms should be good heuristic */
1945 route->ksnr_retry_interval =
1946 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
1947 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1948 route->ksnr_retry_interval);
1951 ksocknal_launch_connection_locked(route);
1954 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1958 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1960 route->ksnr_scheduled = 0;
1961 route->ksnr_connecting = 0;
1963 /* This is a retry rather than a new connection */
1964 route->ksnr_retry_interval *= 2;
1965 route->ksnr_retry_interval =
1966 MAX(route->ksnr_retry_interval,
1967 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
1968 route->ksnr_retry_interval =
1969 MIN(route->ksnr_retry_interval,
1970 cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
1972 LASSERT (route->ksnr_retry_interval != 0);
1973 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1974 route->ksnr_retry_interval);
1976 if (!list_empty(&peer->ksnp_tx_queue) &&
1977 peer->ksnp_accepting == 0 &&
1978 ksocknal_find_connecting_route_locked(peer) == NULL) {
1981 /* ksnp_tx_queue is queued on a conn on successful
1982 * connection for V1.x and V2.x */
1983 if (!list_empty(&peer->ksnp_conns)) {
1984 conn = list_entry(peer->ksnp_conns.next,
1985 ksock_conn_t, ksnc_list);
1986 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
1989 /* take all the blocked packets while I've got the lock and
1990 * complete below... */
1991 list_splice_init(&peer->ksnp_tx_queue, &zombies);
1994 #if 0 /* irrelevent with only eager routes */
1995 if (!route->ksnr_deleted) {
1996 /* make this route least-favourite for re-selection */
1997 list_del(&route->ksnr_list);
1998 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
2001 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2003 ksocknal_peer_failed(peer);
2004 ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
2009 * check whether we need to create more connds.
2010 * It will try to create new thread if it's necessary, @timeout can
2011 * be updated if failed to create, so caller wouldn't keep try while
2012 * running out of resource.
2015 ksocknal_connd_check_start(long sec, long *timeout)
2019 int total = ksocknal_data.ksnd_connd_starting +
2020 ksocknal_data.ksnd_connd_running;
2022 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2023 /* still in initializing */
2027 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2028 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2029 /* can't create more connd, or still have enough
2030 * threads to handle more connecting */
2034 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2035 /* no pending connecting request */
2039 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2040 /* may run out of resource, retry later */
2041 *timeout = cfs_time_seconds(1);
2045 if (ksocknal_data.ksnd_connd_starting > 0) {
2046 /* serialize starting to avoid flood */
2050 ksocknal_data.ksnd_connd_starting_stamp = sec;
2051 ksocknal_data.ksnd_connd_starting++;
2052 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2054 /* NB: total is the next id */
2055 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2056 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2058 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2063 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2064 ksocknal_data.ksnd_connd_starting--;
2065 ksocknal_data.ksnd_connd_failed_stamp = cfs_time_current_sec();
2071 * check whether current thread can exit, it will return 1 if there are too
2072 * many threads and no creating in past 120 seconds.
2073 * Also, this function may update @timeout to make caller come back
2074 * again to recheck these conditions.
2077 ksocknal_connd_check_stop(long sec, long *timeout)
2081 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2082 /* still in initializing */
2086 if (ksocknal_data.ksnd_connd_starting > 0) {
2087 /* in progress of starting new thread */
2091 if (ksocknal_data.ksnd_connd_running <=
2092 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2096 /* created thread in past 120 seconds? */
2097 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2098 SOCKNAL_CONND_TIMEOUT - sec);
2100 *timeout = (val > 0) ? cfs_time_seconds(val) :
2101 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2105 /* no creating in past 120 seconds */
2107 return ksocknal_data.ksnd_connd_running >
2108 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2111 /* Go through connd_routes queue looking for a route that we can process
2112 * right now, @timeout_p can be updated if we need to come back later */
2113 static ksock_route_t *
2114 ksocknal_connd_get_route_locked(signed long *timeout_p)
2116 ksock_route_t *route;
2119 now = cfs_time_current();
2121 /* connd_routes can contain both pending and ordinary routes */
2122 list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2125 if (route->ksnr_retry_interval == 0 ||
2126 cfs_time_aftereq(now, route->ksnr_timeout))
2129 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2130 (int)*timeout_p > (int)(route->ksnr_timeout - now))
2131 *timeout_p = (int)(route->ksnr_timeout - now);
2138 ksocknal_connd (void *arg)
2140 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2141 ksock_connreq_t *cr;
2146 cfs_block_allsigs ();
2148 init_waitqueue_entry_current(&wait);
2150 spin_lock_bh(connd_lock);
2152 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2153 ksocknal_data.ksnd_connd_starting--;
2154 ksocknal_data.ksnd_connd_running++;
2156 while (!ksocknal_data.ksnd_shuttingdown) {
2157 ksock_route_t *route = NULL;
2158 long sec = cfs_time_current_sec();
2159 long timeout = MAX_SCHEDULE_TIMEOUT;
2160 int dropped_lock = 0;
2162 if (ksocknal_connd_check_stop(sec, &timeout)) {
2163 /* wakeup another one to check stop */
2164 wake_up(&ksocknal_data.ksnd_connd_waitq);
2168 if (ksocknal_connd_check_start(sec, &timeout)) {
2169 /* created new thread */
2173 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2174 /* Connection accepted by the listener */
2175 cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
2176 next, ksock_connreq_t, ksncr_list);
2178 list_del(&cr->ksncr_list);
2179 spin_unlock_bh(connd_lock);
2182 ksocknal_create_conn(cr->ksncr_ni, NULL,
2183 cr->ksncr_sock, SOCKLND_CONN_NONE);
2184 lnet_ni_decref(cr->ksncr_ni);
2185 LIBCFS_FREE(cr, sizeof(*cr));
2187 spin_lock_bh(connd_lock);
2190 /* Only handle an outgoing connection request if there
2191 * is a thread left to handle incoming connections and
2192 * create new connd */
2193 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2194 ksocknal_data.ksnd_connd_running) {
2195 route = ksocknal_connd_get_route_locked(&timeout);
2197 if (route != NULL) {
2198 list_del(&route->ksnr_connd_list);
2199 ksocknal_data.ksnd_connd_connecting++;
2200 spin_unlock_bh(connd_lock);
2203 if (ksocknal_connect(route)) {
2204 /* consecutive retry */
2205 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2206 CWARN("massive consecutive "
2207 "re-connecting to %u.%u.%u.%u\n",
2208 HIPQUAD(route->ksnr_ipaddr));
2215 ksocknal_route_decref(route);
2217 spin_lock_bh(connd_lock);
2218 ksocknal_data.ksnd_connd_connecting--;
2222 if (++nloops < SOCKNAL_RESCHED)
2224 spin_unlock_bh(connd_lock);
2227 spin_lock_bh(connd_lock);
2231 /* Nothing to do for 'timeout' */
2232 set_current_state(TASK_INTERRUPTIBLE);
2233 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2234 spin_unlock_bh(connd_lock);
2237 waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
2239 set_current_state(TASK_RUNNING);
2240 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2241 spin_lock_bh(connd_lock);
2243 ksocknal_data.ksnd_connd_running--;
2244 spin_unlock_bh(connd_lock);
2246 ksocknal_thread_fini();
2251 ksocknal_find_timed_out_conn (ksock_peer_t *peer)
2253 /* We're called with a shared lock on ksnd_global_lock */
2255 struct list_head *ctmp;
2257 list_for_each(ctmp, &peer->ksnp_conns) {
2259 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
2261 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2262 LASSERT (!conn->ksnc_closing);
2264 error = libcfs_sock_error(conn->ksnc_sock);
2266 ksocknal_conn_addref(conn);
2270 CNETERR("A connection with %s "
2271 "(%u.%u.%u.%u:%d) was reset; "
2272 "it may have rebooted.\n",
2273 libcfs_id2str(peer->ksnp_id),
2274 HIPQUAD(conn->ksnc_ipaddr),
2278 CNETERR("A connection with %s "
2279 "(%u.%u.%u.%u:%d) timed out; the "
2280 "network or node may be down.\n",
2281 libcfs_id2str(peer->ksnp_id),
2282 HIPQUAD(conn->ksnc_ipaddr),
2286 CNETERR("An unexpected network error %d "
2288 "(%u.%u.%u.%u:%d\n", error,
2289 libcfs_id2str(peer->ksnp_id),
2290 HIPQUAD(conn->ksnc_ipaddr),
2298 if (conn->ksnc_rx_started &&
2299 cfs_time_aftereq(cfs_time_current(),
2300 conn->ksnc_rx_deadline)) {
2301 /* Timed out incomplete incoming message */
2302 ksocknal_conn_addref(conn);
2303 CNETERR("Timeout receiving from %s (%u.%u.%u.%u:%d), "
2304 "state %d wanted %d left %d\n",
2305 libcfs_id2str(peer->ksnp_id),
2306 HIPQUAD(conn->ksnc_ipaddr),
2308 conn->ksnc_rx_state,
2309 conn->ksnc_rx_nob_wanted,
2310 conn->ksnc_rx_nob_left);
2314 if ((!list_empty(&conn->ksnc_tx_queue) ||
2315 libcfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
2316 cfs_time_aftereq(cfs_time_current(),
2317 conn->ksnc_tx_deadline)) {
2318 /* Timed out messages queued for sending or
2319 * buffered in the socket's send buffer */
2320 ksocknal_conn_addref(conn);
2321 CNETERR("Timeout sending data to %s (%u.%u.%u.%u:%d) "
2322 "the network or that node may be down.\n",
2323 libcfs_id2str(peer->ksnp_id),
2324 HIPQUAD(conn->ksnc_ipaddr),
2334 ksocknal_flush_stale_txs(ksock_peer_t *peer)
2337 struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
2339 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2341 while (!list_empty(&peer->ksnp_tx_queue)) {
2342 tx = list_entry(peer->ksnp_tx_queue.next,
2343 ksock_tx_t, tx_list);
2345 if (!cfs_time_aftereq(cfs_time_current(),
2349 list_del(&tx->tx_list);
2350 list_add_tail(&tx->tx_list, &stale_txs);
2353 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2355 ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
2359 ksocknal_send_keepalive_locked(ksock_peer_t *peer)
2360 __must_hold(&ksocknal_data.ksnd_global_lock)
2362 ksock_sched_t *sched;
2366 /* last_alive will be updated by create_conn */
2367 if (list_empty(&peer->ksnp_conns))
2370 if (peer->ksnp_proto != &ksocknal_protocol_v3x)
2373 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2374 cfs_time_before(cfs_time_current(),
2375 cfs_time_add(peer->ksnp_last_alive,
2376 cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
2379 if (cfs_time_before(cfs_time_current(),
2380 peer->ksnp_send_keepalive))
2383 /* retry 10 secs later, so we wouldn't put pressure
2384 * on this peer if we failed to send keepalive this time */
2385 peer->ksnp_send_keepalive = cfs_time_shift(10);
2387 conn = ksocknal_find_conn_locked(peer, NULL, 1);
2389 sched = conn->ksnc_scheduler;
2391 spin_lock_bh(&sched->kss_lock);
2392 if (!list_empty(&conn->ksnc_tx_queue)) {
2393 spin_unlock_bh(&sched->kss_lock);
2394 /* there is an queued ACK, don't need keepalive */
2398 spin_unlock_bh(&sched->kss_lock);
2401 read_unlock(&ksocknal_data.ksnd_global_lock);
2403 /* cookie = 1 is reserved for keepalive PING */
2404 tx = ksocknal_alloc_tx_noop(1, 1);
2406 read_lock(&ksocknal_data.ksnd_global_lock);
2410 if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
2411 read_lock(&ksocknal_data.ksnd_global_lock);
2415 ksocknal_free_tx(tx);
2416 read_lock(&ksocknal_data.ksnd_global_lock);
2423 ksocknal_check_peer_timeouts (int idx)
2425 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2431 /* NB. We expect to have a look at all the peers and not find any
2432 * connections to time out, so we just use a shared lock while we
2434 read_lock(&ksocknal_data.ksnd_global_lock);
2436 list_for_each_entry(peer, peers, ksnp_list) {
2437 cfs_time_t deadline = 0;
2441 if (ksocknal_send_keepalive_locked(peer) != 0) {
2442 read_unlock(&ksocknal_data.ksnd_global_lock);
2446 conn = ksocknal_find_timed_out_conn (peer);
2449 read_unlock(&ksocknal_data.ksnd_global_lock);
2451 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2453 /* NB we won't find this one again, but we can't
2454 * just proceed with the next peer, since we dropped
2455 * ksnd_global_lock and it might be dead already! */
2456 ksocknal_conn_decref(conn);
2460 /* we can't process stale txs right here because we're
2461 * holding only shared lock */
2462 if (!list_empty(&peer->ksnp_tx_queue)) {
2464 list_entry(peer->ksnp_tx_queue.next,
2465 ksock_tx_t, tx_list);
2467 if (cfs_time_aftereq(cfs_time_current(),
2470 ksocknal_peer_addref(peer);
2471 read_unlock(&ksocknal_data.ksnd_global_lock);
2473 ksocknal_flush_stale_txs(peer);
2475 ksocknal_peer_decref(peer);
2480 if (list_empty(&peer->ksnp_zc_req_list))
2483 spin_lock(&peer->ksnp_lock);
2484 list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
2485 if (!cfs_time_aftereq(cfs_time_current(),
2488 /* ignore the TX if connection is being closed */
2489 if (tx->tx_conn->ksnc_closing)
2495 spin_unlock(&peer->ksnp_lock);
2499 tx = list_entry(peer->ksnp_zc_req_list.next,
2500 ksock_tx_t, tx_zc_list);
2501 deadline = tx->tx_deadline;
2502 resid = tx->tx_resid;
2504 ksocknal_conn_addref(conn);
2506 spin_unlock(&peer->ksnp_lock);
2507 read_unlock(&ksocknal_data.ksnd_global_lock);
2509 CERROR("Total %d stale ZC_REQs for peer %s detected; the "
2510 "oldest(%p) timed out %ld secs ago, "
2511 "resid: %d, wmem: %d\n",
2512 n, libcfs_nid2str(peer->ksnp_id.nid), tx,
2513 cfs_duration_sec(cfs_time_current() - deadline),
2514 resid, libcfs_sock_wmem_queued(conn->ksnc_sock));
2516 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2517 ksocknal_conn_decref(conn);
2521 read_unlock(&ksocknal_data.ksnd_global_lock);
2524 int ksocknal_reaper(void *arg)
2528 ksock_sched_t *sched;
2529 struct list_head enomem_conns;
2531 cfs_duration_t timeout;
2534 cfs_time_t deadline = cfs_time_current();
2536 cfs_block_allsigs ();
2538 INIT_LIST_HEAD(&enomem_conns);
2539 init_waitqueue_entry_current(&wait);
2541 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2543 while (!ksocknal_data.ksnd_shuttingdown) {
2545 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2546 conn = list_entry(ksocknal_data. \
2547 ksnd_deathrow_conns.next,
2548 ksock_conn_t, ksnc_list);
2549 list_del(&conn->ksnc_list);
2551 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2553 ksocknal_terminate_conn(conn);
2554 ksocknal_conn_decref(conn);
2556 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2560 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2561 conn = list_entry(ksocknal_data.ksnd_zombie_conns.\
2562 next, ksock_conn_t, ksnc_list);
2563 list_del(&conn->ksnc_list);
2565 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2567 ksocknal_destroy_conn(conn);
2569 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2573 if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
2574 list_add(&enomem_conns,
2575 &ksocknal_data.ksnd_enomem_conns);
2576 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2579 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2581 /* reschedule all the connections that stalled with ENOMEM... */
2583 while (!list_empty(&enomem_conns)) {
2584 conn = list_entry(enomem_conns.next,
2585 ksock_conn_t, ksnc_tx_list);
2586 list_del(&conn->ksnc_tx_list);
2588 sched = conn->ksnc_scheduler;
2590 spin_lock_bh(&sched->kss_lock);
2592 LASSERT(conn->ksnc_tx_scheduled);
2593 conn->ksnc_tx_ready = 1;
2594 list_add_tail(&conn->ksnc_tx_list,
2595 &sched->kss_tx_conns);
2596 wake_up(&sched->kss_waitq);
2598 spin_unlock_bh(&sched->kss_lock);
2602 /* careful with the jiffy wrap... */
2603 while ((timeout = cfs_time_sub(deadline,
2604 cfs_time_current())) <= 0) {
2607 int chunk = ksocknal_data.ksnd_peer_hash_size;
2609 /* Time to check for timeouts on a few more peers: I do
2610 * checks every 'p' seconds on a proportion of the peer
2611 * table and I need to check every connection 'n' times
2612 * within a timeout interval, to ensure I detect a
2613 * timeout on any connection within (n+1)/n times the
2614 * timeout interval. */
2616 if (*ksocknal_tunables.ksnd_timeout > n * p)
2617 chunk = (chunk * n * p) /
2618 *ksocknal_tunables.ksnd_timeout;
2622 for (i = 0; i < chunk; i++) {
2623 ksocknal_check_peer_timeouts (peer_index);
2624 peer_index = (peer_index + 1) %
2625 ksocknal_data.ksnd_peer_hash_size;
2628 deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2631 if (nenomem_conns != 0) {
2632 /* Reduce my timeout if I rescheduled ENOMEM conns.
2633 * This also prevents me getting woken immediately
2634 * if any go back on my enomem list. */
2635 timeout = SOCKNAL_ENOMEM_RETRY;
2637 ksocknal_data.ksnd_reaper_waketime =
2638 cfs_time_add(cfs_time_current(), timeout);
2640 set_current_state(TASK_INTERRUPTIBLE);
2641 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2643 if (!ksocknal_data.ksnd_shuttingdown &&
2644 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2645 list_empty(&ksocknal_data.ksnd_zombie_conns))
2646 waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
2648 set_current_state(TASK_RUNNING);
2649 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2651 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2654 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2656 ksocknal_thread_fini();