2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Lustre, https://wiki.whamcloud.com/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <libcfs/linux/linux-mem.h>
29 #include <linux/sunrpc/addr.h>
32 ksocknal_alloc_tx(int type, int size)
34 struct ksock_tx *tx = NULL;
36 if (type == KSOCK_MSG_NOOP) {
37 LASSERT(size == KSOCK_NOOP_TX_SIZE);
39 /* searching for a noop tx in free list */
40 spin_lock(&ksocknal_data.ksnd_tx_lock);
42 tx = list_first_entry_or_null(&ksocknal_data.ksnd_idle_noop_txs,
43 struct ksock_tx, tx_list);
45 LASSERT(tx->tx_desc_size == size);
46 list_del(&tx->tx_list);
49 spin_unlock(&ksocknal_data.ksnd_tx_lock);
53 LIBCFS_ALLOC(tx, size);
58 refcount_set(&tx->tx_refcount, 1);
59 tx->tx_zc_aborted = 0;
60 tx->tx_zc_capable = 0;
61 tx->tx_zc_checked = 0;
62 tx->tx_hstatus = LNET_MSG_STATUS_OK;
63 tx->tx_desc_size = size;
65 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
71 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
75 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
77 CERROR("Can't allocate noop tx desc\n");
82 tx->tx_lnetmsg = NULL;
86 tx->tx_nonblk = nonblk;
88 tx->tx_msg.ksm_csum = 0;
89 tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
90 tx->tx_msg.ksm_zc_cookies[0] = 0;
91 tx->tx_msg.ksm_zc_cookies[1] = cookie;
98 ksocknal_free_tx(struct ksock_tx *tx)
100 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
102 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
104 spin_lock(&ksocknal_data.ksnd_tx_lock);
106 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
108 spin_unlock(&ksocknal_data.ksnd_tx_lock);
110 LIBCFS_FREE(tx, tx->tx_desc_size);
115 ksocknal_send_hdr(struct ksock_conn *conn, struct ksock_tx *tx,
116 struct kvec *scratch_iov)
118 struct kvec *iov = &tx->tx_hdr;
122 LASSERT(tx->tx_niov > 0);
124 /* Never touch tx->tx_hdr inside ksocknal_lib_send_hdr() */
125 rc = ksocknal_lib_send_hdr(conn, tx, scratch_iov);
127 if (rc <= 0) /* sent nothing? */
131 LASSERT(nob <= tx->tx_resid);
135 LASSERT(tx->tx_niov == 1);
137 if (nob < (int) iov->iov_len) {
138 iov->iov_base += nob;
143 LASSERT(nob == iov->iov_len);
150 ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
151 struct kvec *scratch_iov)
153 struct bio_vec *kiov = tx->tx_kiov;
157 LASSERT(tx->tx_niov == 0);
158 LASSERT(tx->tx_nkiov > 0);
160 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
161 rc = ksocknal_lib_send_kiov(conn, tx, scratch_iov);
163 if (rc <= 0) /* sent nothing? */
167 LASSERT(nob <= tx->tx_resid);
172 LASSERT(tx->tx_nkiov > 0);
174 if (nob < (int)kiov->bv_len) {
175 kiov->bv_offset += nob;
180 nob -= (int)kiov->bv_len;
181 tx->tx_kiov = ++kiov;
189 ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
190 struct kvec *scratch_iov)
195 if (ksocknal_data.ksnd_stall_tx != 0)
196 schedule_timeout_uninterruptible(
197 cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
199 LASSERT(tx->tx_resid != 0);
201 rc = ksocknal_connsock_addref(conn);
203 LASSERT(conn->ksnc_closing);
208 if (ksocknal_data.ksnd_enomem_tx > 0) {
210 ksocknal_data.ksnd_enomem_tx--;
212 } else if (tx->tx_niov != 0) {
213 rc = ksocknal_send_hdr(conn, tx, scratch_iov);
215 rc = ksocknal_send_kiov(conn, tx, scratch_iov);
218 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
219 if (rc > 0) /* sent something? */
220 conn->ksnc_tx_bufnob += rc; /* account it */
222 if (bufnob < conn->ksnc_tx_bufnob) {
223 /* allocated send buffer bytes < computed; infer
224 * something got ACKed */
225 conn->ksnc_tx_deadline = ktime_get_seconds() +
227 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
228 conn->ksnc_tx_bufnob = bufnob;
232 if (rc <= 0) { /* Didn't write anything? */
233 /* some stacks return 0 instead of -EAGAIN */
237 /* Check if EAGAIN is due to memory pressure */
238 if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
244 /* socket's wmem_queued now includes 'rc' bytes */
245 atomic_sub (rc, &conn->ksnc_tx_nob);
248 } while (tx->tx_resid != 0);
250 ksocknal_connsock_decref(conn);
255 ksocknal_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov)
257 struct kvec *iov = conn->ksnc_rx_iov;
261 LASSERT(conn->ksnc_rx_niov > 0);
263 /* Never touch conn->ksnc_rx_iov or change connection
264 * status inside ksocknal_lib_recv_iov */
265 rc = ksocknal_lib_recv_iov(conn, scratchiov);
270 /* received something... */
273 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
274 conn->ksnc_rx_deadline = ktime_get_seconds() +
276 smp_mb(); /* order with setting rx_started */
277 conn->ksnc_rx_started = 1;
279 conn->ksnc_rx_nob_wanted -= nob;
280 conn->ksnc_rx_nob_left -= nob;
283 LASSERT(conn->ksnc_rx_niov > 0);
285 if (nob < (int)iov->iov_len) {
287 iov->iov_base += nob;
292 conn->ksnc_rx_iov = ++iov;
293 conn->ksnc_rx_niov--;
300 ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
301 struct kvec *scratch_iov)
303 struct bio_vec *kiov = conn->ksnc_rx_kiov;
307 LASSERT(conn->ksnc_rx_nkiov > 0);
308 /* Never touch conn->ksnc_rx_kiov or change connection
309 * status inside ksocknal_lib_recv_iov */
310 rc = ksocknal_lib_recv_kiov(conn, rx_scratch_pgs, scratch_iov);
315 /* received something... */
318 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
319 conn->ksnc_rx_deadline = ktime_get_seconds() +
321 smp_mb(); /* order with setting rx_started */
322 conn->ksnc_rx_started = 1;
324 conn->ksnc_rx_nob_wanted -= nob;
325 conn->ksnc_rx_nob_left -= nob;
328 LASSERT(conn->ksnc_rx_nkiov > 0);
330 if (nob < (int) kiov->bv_len) {
331 kiov->bv_offset += nob;
337 conn->ksnc_rx_kiov = ++kiov;
338 conn->ksnc_rx_nkiov--;
345 ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs,
346 struct kvec *scratch_iov)
348 /* Return 1 on success, 0 on EOF, < 0 on error.
349 * Caller checks ksnc_rx_nob_wanted to determine
350 * progress/completion. */
354 if (ksocknal_data.ksnd_stall_rx != 0)
355 schedule_timeout_uninterruptible(
356 cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
358 rc = ksocknal_connsock_addref(conn);
360 LASSERT(conn->ksnc_closing);
365 if (conn->ksnc_rx_niov != 0)
366 rc = ksocknal_recv_iov(conn, scratch_iov);
368 rc = ksocknal_recv_kiov(conn, rx_scratch_pgs,
372 /* error/EOF or partial receive */
375 } else if (rc == 0 && conn->ksnc_rx_started) {
376 /* EOF in the middle of a message */
382 /* Completed a fragment */
384 if (conn->ksnc_rx_nob_wanted == 0) {
390 ksocknal_connsock_decref(conn);
395 ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
397 struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
398 enum lnet_msg_hstatus hstatus = tx->tx_hstatus;
400 LASSERT(ni != NULL || tx->tx_conn != NULL);
402 if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) {
404 if (hstatus == LNET_MSG_STATUS_OK)
405 hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
408 if (tx->tx_conn != NULL)
409 ksocknal_conn_decref(tx->tx_conn);
411 ksocknal_free_tx(tx);
412 if (lnetmsg != NULL) { /* KSOCK_MSG_NOOP go without lnetmsg */
413 lnetmsg->msg_health_status = hstatus;
414 lnet_finalize(lnetmsg, rc);
419 ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
423 while ((tx = list_first_entry_or_null(txlist, struct ksock_tx,
425 if (error && tx->tx_lnetmsg) {
426 CNETERR("Deleting packet type %d len %d %s->%s\n",
427 tx->tx_lnetmsg->msg_type,
428 tx->tx_lnetmsg->msg_len,
429 libcfs_nidstr(&tx->tx_lnetmsg->msg_initiator),
430 libcfs_nidstr(&tx->tx_lnetmsg->msg_target.nid));
432 CNETERR("Deleting noop packet\n");
435 list_del(&tx->tx_list);
437 if (tx->tx_hstatus == LNET_MSG_STATUS_OK) {
438 if (error == -ETIMEDOUT)
440 LNET_MSG_STATUS_LOCAL_TIMEOUT;
441 else if (error == -ENETDOWN ||
442 error == -EHOSTUNREACH ||
443 error == -ENETUNREACH ||
444 error == -ECONNREFUSED ||
445 error == -ECONNRESET)
446 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
448 * for all other errors we don't want to
452 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
455 LASSERT(refcount_read(&tx->tx_refcount) == 1);
456 ksocknal_tx_done(ni, tx, error);
461 ksocknal_check_zc_req(struct ksock_tx *tx)
463 struct ksock_conn *conn = tx->tx_conn;
464 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
466 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
467 * to ksnp_zc_req_list if some fragment of this message should be sent
468 * zero-copy. Our peer_ni will send an ACK containing this cookie when
469 * she has received this message to tell us we can signal completion.
470 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
471 * ksnp_zc_req_list. */
472 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
473 LASSERT (tx->tx_zc_capable);
475 tx->tx_zc_checked = 1;
477 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
478 !conn->ksnc_zc_capable)
481 /* assign cookie and queue tx to pending list, it will be released when
482 * a matching ack is received. See ksocknal_handle_zcack() */
484 ksocknal_tx_addref(tx);
486 spin_lock(&peer_ni->ksnp_lock);
488 /* ZC_REQ is going to be pinned to the peer_ni */
489 tx->tx_deadline = ktime_get_seconds() +
492 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
494 tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++;
496 if (peer_ni->ksnp_zc_next_cookie == 0)
497 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
499 list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list);
501 spin_unlock(&peer_ni->ksnp_lock);
505 ksocknal_uncheck_zc_req(struct ksock_tx *tx)
507 struct ksock_peer_ni *peer_ni = tx->tx_conn->ksnc_peer;
509 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
510 LASSERT(tx->tx_zc_capable);
512 tx->tx_zc_checked = 0;
514 spin_lock(&peer_ni->ksnp_lock);
516 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
517 /* Not waiting for an ACK */
518 spin_unlock(&peer_ni->ksnp_lock);
522 tx->tx_msg.ksm_zc_cookies[0] = 0;
523 list_del(&tx->tx_zc_list);
525 spin_unlock(&peer_ni->ksnp_lock);
527 ksocknal_tx_decref(tx);
531 ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
532 struct kvec *scratch_iov)
535 bool error_sim = false;
537 if (lnet_send_error_simulation(tx->tx_lnetmsg, &tx->tx_hstatus)) {
543 if (tx->tx_zc_capable && !tx->tx_zc_checked)
544 ksocknal_check_zc_req(tx);
546 rc = ksocknal_transmit(conn, tx, scratch_iov);
548 CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
550 if (tx->tx_resid == 0) {
551 /* Sent everything OK */
563 counter++; /* exponential backoff warnings */
564 if ((counter & (-counter)) == counter)
565 CWARN("%u ENOMEM tx %p (%lld allocated)\n",
566 counter, conn, libcfs_kmem_read());
568 /* Queue on ksnd_enomem_conns for retry after a timeout */
569 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
571 /* enomem list takes over scheduler's ref... */
572 LASSERT(conn->ksnc_tx_scheduled);
573 list_add_tail(&conn->ksnc_tx_list,
574 &ksocknal_data.ksnd_enomem_conns);
575 if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY <
576 ksocknal_data.ksnd_reaper_waketime)
577 wake_up(&ksocknal_data.ksnd_reaper_waitq);
579 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
582 * set the health status of the message which determines
583 * whether we should retry the transmit
585 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
596 * set the health status of the message which determines
597 * whether we should retry the transmit
599 if (rc == -ETIMEDOUT)
600 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
602 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
605 if (!conn->ksnc_closing) {
608 LCONSOLE_WARN("Host %pISc reset our connection while we were sending data; it may have rebooted.\n",
609 &conn->ksnc_peeraddr);
612 LCONSOLE_WARN("There was an unexpected network error while writing to %pISc: %d.\n",
613 &conn->ksnc_peeraddr, rc);
616 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pIScp\n",
617 conn, rc, libcfs_idstr(&conn->ksnc_peer->ksnp_id),
618 &conn->ksnc_peeraddr);
621 if (tx->tx_zc_checked)
622 ksocknal_uncheck_zc_req(tx);
624 /* it's not an error if conn is being closed */
625 ksocknal_close_conn_and_siblings(conn,
626 (conn->ksnc_closing) ? 0 : rc);
632 ksocknal_launch_connection_locked(struct ksock_conn_cb *conn_cb)
634 /* called holding write lock on ksnd_global_lock */
636 LASSERT(!conn_cb->ksnr_scheduled);
637 LASSERT(!conn_cb->ksnr_connecting);
638 LASSERT((ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected) != 0);
640 /* scheduling conn for connd */
641 conn_cb->ksnr_scheduled = 1;
643 /* extra ref for connd */
644 ksocknal_conn_cb_addref(conn_cb);
646 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
648 list_add_tail(&conn_cb->ksnr_connd_list,
649 &ksocknal_data.ksnd_connd_routes);
650 wake_up(&ksocknal_data.ksnd_connd_waitq);
652 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
656 ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
658 struct ksock_conn_cb *conn_cb;
660 /* called holding write lock on ksnd_global_lock */
662 /* launch any/all connections that need it */
663 conn_cb = ksocknal_find_connectable_conn_cb_locked(peer_ni);
667 ksocknal_launch_connection_locked(conn_cb);
672 ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
674 struct ksock_conn *c;
675 struct ksock_conn *conn;
676 struct ksock_conn *typed = NULL;
677 struct ksock_conn *fallback = NULL;
681 list_for_each_entry(c, &peer_ni->ksnp_conns, ksnc_list) {
682 int nob = atomic_read(&c->ksnc_tx_nob) +
683 c->ksnc_sock->sk->sk_wmem_queued;
686 LASSERT (!c->ksnc_closing);
687 LASSERT (c->ksnc_proto != NULL &&
688 c->ksnc_proto->pro_match_tx != NULL);
690 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
695 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
698 case SOCKNAL_MATCH_YES: /* typed connection */
699 if (typed == NULL || tnob > nob ||
700 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
701 typed->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
707 case SOCKNAL_MATCH_MAY: /* fallback connection */
708 if (fallback == NULL || fnob > nob ||
709 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
710 fallback->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
718 /* prefer the typed selection */
719 conn = (typed != NULL) ? typed : fallback;
722 conn->ksnc_tx_last_post = ktime_get_seconds();
728 ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
730 conn->ksnc_proto->pro_pack(tx);
732 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
733 ksocknal_conn_addref(conn); /* +1 ref for tx */
738 ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
740 struct ksock_sched *sched = conn->ksnc_scheduler;
741 struct ksock_msg *msg = &tx->tx_msg;
742 struct ksock_tx *ztx = NULL;
745 /* called holding global lock (read or irq-write) and caller may
746 * not have dropped this lock between finding conn and calling me,
747 * so we don't need the {get,put}connsock dance to deref
749 LASSERT(!conn->ksnc_closing);
751 CDEBUG(D_NET, "Sending to %s ip %pIScp\n",
752 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
753 &conn->ksnc_peeraddr);
755 ksocknal_tx_prep(conn, tx);
757 /* Ensure the frags we've been given EXACTLY match the number of
758 * bytes we want to send. Many TCP/IP stacks disregard any total
759 * size parameters passed to them and just look at the frags.
761 * We always expect at least 1 mapped fragment containing the
762 * complete ksocknal message header.
764 LASSERT(lnet_iov_nob(tx->tx_niov, &tx->tx_hdr) +
765 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
766 (unsigned int)tx->tx_nob);
767 LASSERT(tx->tx_niov >= 1);
768 LASSERT(tx->tx_resid == tx->tx_nob);
770 CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
771 tx, tx->tx_lnetmsg ? tx->tx_lnetmsg->msg_type : KSOCK_MSG_NOOP,
772 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
774 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
775 spin_lock_bh(&sched->kss_lock);
777 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
778 /* First packet starts the timeout */
779 conn->ksnc_tx_deadline = ktime_get_seconds() +
781 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
782 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
783 conn->ksnc_tx_bufnob = 0;
784 smp_mb(); /* order with adding to tx_queue */
787 if (msg->ksm_type == KSOCK_MSG_NOOP) {
788 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
789 * on a normal packet so I don't need to send it */
790 LASSERT (msg->ksm_zc_cookies[1] != 0);
791 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
793 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
794 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
797 /* It's a normal packet - can it piggback a noop zc-ack that
798 * has been queued already? */
799 LASSERT (msg->ksm_zc_cookies[1] == 0);
800 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
802 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
803 /* ztx will be released later */
807 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
808 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
811 if (conn->ksnc_tx_ready && /* able to send */
812 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
813 /* +1 ref for scheduler */
814 ksocknal_conn_addref(conn);
815 list_add_tail(&conn->ksnc_tx_list,
816 &sched->kss_tx_conns);
817 conn->ksnc_tx_scheduled = 1;
818 wake_up(&sched->kss_waitq);
821 spin_unlock_bh(&sched->kss_lock);
825 struct ksock_conn_cb *
826 ksocknal_find_connectable_conn_cb_locked(struct ksock_peer_ni *peer_ni)
828 time64_t now = ktime_get_seconds();
829 struct ksock_conn_cb *conn_cb;
831 conn_cb = peer_ni->ksnp_conn_cb;
835 LASSERT(!conn_cb->ksnr_connecting || conn_cb->ksnr_scheduled);
837 if (conn_cb->ksnr_scheduled) /* connections being established */
840 /* all conn types connected ? */
841 if ((ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected) == 0)
844 if (!(conn_cb->ksnr_retry_interval == 0 || /* first attempt */
845 now >= conn_cb->ksnr_timeout)) {
847 "Too soon to retry route %pISc (cnted %d, interval %lld, %lld secs later)\n",
849 conn_cb->ksnr_connected,
850 conn_cb->ksnr_retry_interval,
851 conn_cb->ksnr_timeout - now);
858 struct ksock_conn_cb *
859 ksocknal_find_connecting_conn_cb_locked(struct ksock_peer_ni *peer_ni)
861 struct ksock_conn_cb *conn_cb;
863 conn_cb = peer_ni->ksnp_conn_cb;
867 LASSERT(!conn_cb->ksnr_connecting || conn_cb->ksnr_scheduled);
869 return conn_cb->ksnr_scheduled ? conn_cb : NULL;
873 ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
874 struct lnet_processid *id)
876 struct ksock_peer_ni *peer_ni;
877 struct ksock_conn *conn;
878 struct sockaddr_storage sa;
883 LASSERT(tx->tx_conn == NULL);
885 g_lock = &ksocknal_data.ksnd_global_lock;
887 for (retry = 0;; retry = 1) {
889 peer_ni = ksocknal_find_peer_locked(ni, id);
890 if (peer_ni != NULL) {
891 if (ksocknal_find_connectable_conn_cb_locked(peer_ni) == NULL) {
892 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
894 /* I've got nothing that need to be
895 * connecting and I do have an actual
898 ksocknal_queue_tx_locked(tx, conn);
905 /* I'll need a write lock... */
908 write_lock_bh(g_lock);
910 peer_ni = ksocknal_find_peer_locked(ni, id);
914 write_unlock_bh(g_lock);
916 if ((id->pid & LNET_PID_USERFLAG) != 0) {
917 CERROR("Refusing to create a connection to userspace process %s\n",
919 return -EHOSTUNREACH;
923 CERROR("Can't find peer_ni %s\n", libcfs_idstr(id));
924 return -EHOSTUNREACH;
927 memset(&sa, 0, sizeof(sa));
928 switch (NID_ADDR_BYTES(&id->nid)) {
929 struct sockaddr_in *sin;
930 struct sockaddr_in6 *sin6;
933 sin->sin_family = AF_INET;
934 sin->sin_addr.s_addr = id->nid.nid_addr[0];
935 sin->sin_port = htons(lnet_acceptor_port());
939 sin6->sin6_family = AF_INET6;
940 memcpy(&sin6->sin6_addr, id->nid.nid_addr,
941 sizeof(sin6->sin6_addr));
942 sin6->sin6_port = htons(lnet_acceptor_port());
945 rc = ksocknal_add_peer(ni, id, (struct sockaddr *)&sa);
947 CERROR("Can't add peer_ni %s: %d\n",
948 libcfs_idstr(id), rc);
953 ksocknal_launch_all_connections_locked(peer_ni);
955 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
957 /* Connection exists; queue message on it */
958 ksocknal_queue_tx_locked (tx, conn);
959 write_unlock_bh(g_lock);
963 if (peer_ni->ksnp_accepting > 0 ||
964 ksocknal_find_connecting_conn_cb_locked(peer_ni) != NULL) {
965 /* the message is going to be pinned to the peer_ni */
966 tx->tx_deadline = ktime_get_seconds() +
969 /* Queue the message until a connection is established */
970 list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
971 write_unlock_bh(g_lock);
975 write_unlock_bh(g_lock);
977 /* NB Routes may be ignored if connections to them failed recently */
978 CNETERR("No usable routes to %s\n", libcfs_idstr(id));
979 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
980 return (-EHOSTUNREACH);
984 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
986 /* '1' for consistency with code that checks !mpflag to restore */
987 unsigned int mpflag = 1;
988 int type = lntmsg->msg_type;
989 struct lnet_processid *target = &lntmsg->msg_target;
990 unsigned int payload_niov = lntmsg->msg_niov;
991 struct bio_vec *payload_kiov = lntmsg->msg_kiov;
992 unsigned int payload_offset = lntmsg->msg_offset;
993 unsigned int payload_nob = lntmsg->msg_len;
998 /* NB 'private' is different depending on what we're sending.
1002 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
1003 payload_nob, payload_niov, libcfs_idstr(target));
1005 LASSERT (payload_nob == 0 || payload_niov > 0);
1006 LASSERT (!in_interrupt ());
1008 desc_size = offsetof(struct ksock_tx,
1009 tx_payload[payload_niov]);
1011 if (lntmsg->msg_vmflush)
1012 mpflag = memalloc_noreclaim_save();
1014 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
1016 CERROR("Can't allocate tx desc type %d size %d\n",
1018 if (lntmsg->msg_vmflush)
1019 memalloc_noreclaim_restore(mpflag);
1023 tx->tx_conn = NULL; /* set when assigned a conn */
1024 tx->tx_lnetmsg = lntmsg;
1027 tx->tx_kiov = tx->tx_payload;
1028 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
1029 payload_niov, payload_kiov,
1030 payload_offset, payload_nob);
1032 LASSERT(tx->tx_nkiov <= LNET_MAX_IOV);
1034 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
1035 tx->tx_zc_capable = 1;
1037 tx->tx_msg.ksm_csum = 0;
1038 tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
1039 tx->tx_msg.ksm_zc_cookies[0] = 0;
1040 tx->tx_msg.ksm_zc_cookies[1] = 0;
1042 /* The first fragment will be set later in pro_pack */
1043 rc = ksocknal_launch_packet(ni, tx, target);
1045 * We can't test lntsmg->msg_vmflush again as lntmsg may
1049 memalloc_noreclaim_restore(mpflag);
1054 lntmsg->msg_health_status = tx->tx_hstatus;
1055 ksocknal_free_tx(tx);
1060 ksocknal_thread_fini (void)
1062 if (atomic_dec_and_test(&ksocknal_data.ksnd_nthreads))
1063 wake_up_var(&ksocknal_data.ksnd_nthreads);
1067 ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
1069 static char ksocknal_slop_buffer[4096];
1074 LASSERT(conn->ksnc_proto != NULL);
1076 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1077 /* Remind the socket to ack eagerly... */
1078 ksocknal_lib_eager_ack(conn);
1081 if (nob_to_skip == 0) { /* right at next packet boundary now */
1082 conn->ksnc_rx_started = 0;
1083 smp_mb(); /* racing with timeout thread */
1085 switch (conn->ksnc_proto->pro_version) {
1086 case KSOCK_PROTO_V2:
1087 case KSOCK_PROTO_V3:
1088 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1089 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1090 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
1092 conn->ksnc_rx_nob_wanted = sizeof(struct ksock_msg_hdr);
1093 conn->ksnc_rx_nob_left = sizeof(struct ksock_msg_hdr);
1094 conn->ksnc_rx_iov[0].iov_len =
1095 sizeof(struct ksock_msg_hdr);
1098 case KSOCK_PROTO_V1:
1099 /* Receiving bare struct lnet_hdr_nid4 */
1100 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1101 conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr_nid4);
1102 conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr_nid4);
1104 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1105 conn->ksnc_rx_iov[0].iov_base =
1106 (void *)&conn->ksnc_msg.ksm_u.lnetmsg_nid4;
1107 conn->ksnc_rx_iov[0].iov_len =
1108 sizeof(struct lnet_hdr_nid4);
1114 conn->ksnc_rx_niov = 1;
1116 conn->ksnc_rx_kiov = NULL;
1117 conn->ksnc_rx_nkiov = 0;
1118 conn->ksnc_rx_csum = ~0;
1122 /* Set up to skip as much as possible now. If there's more left
1123 * (ran out of iov entries) we'll get called again */
1125 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1126 conn->ksnc_rx_nob_left = nob_to_skip;
1127 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1132 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
1134 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1135 conn->ksnc_rx_iov[niov].iov_len = nob;
1140 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1141 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
1143 conn->ksnc_rx_niov = niov;
1144 conn->ksnc_rx_kiov = NULL;
1145 conn->ksnc_rx_nkiov = 0;
1146 conn->ksnc_rx_nob_wanted = skipped;
1151 ksocknal_process_receive(struct ksock_conn *conn,
1152 struct page **rx_scratch_pgs,
1153 struct kvec *scratch_iov)
1155 struct _lnet_hdr_nid4 *lhdr;
1156 struct lnet_processid *id;
1157 struct lnet_hdr hdr;
1160 LASSERT(refcount_read(&conn->ksnc_conn_refcount) > 0);
1162 /* NB: sched lock NOT held */
1163 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1164 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1165 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1166 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1167 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1169 if (conn->ksnc_rx_nob_wanted != 0) {
1170 rc = ksocknal_receive(conn, rx_scratch_pgs,
1174 struct lnet_processid *ksnp_id;
1176 ksnp_id = &conn->ksnc_peer->ksnp_id;
1178 LASSERT(rc != -EAGAIN);
1180 CDEBUG(D_NET, "[%p] EOF from %s ip %pIScp\n",
1181 conn, libcfs_idstr(ksnp_id),
1182 &conn->ksnc_peeraddr);
1183 else if (!conn->ksnc_closing)
1184 CERROR("[%p] Error %d on read from %s ip %pIScp\n",
1185 conn, rc, libcfs_idstr(ksnp_id),
1186 &conn->ksnc_peeraddr);
1188 /* it's not an error if conn is being closed */
1189 ksocknal_close_conn_and_siblings (conn,
1190 (conn->ksnc_closing) ? 0 : rc);
1191 return (rc == 0 ? -ESHUTDOWN : rc);
1194 if (conn->ksnc_rx_nob_wanted != 0) {
1199 switch (conn->ksnc_rx_state) {
1200 case SOCKNAL_RX_KSM_HEADER:
1201 if (conn->ksnc_flip) {
1202 __swab32s(&conn->ksnc_msg.ksm_type);
1203 __swab32s(&conn->ksnc_msg.ksm_csum);
1204 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1205 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1208 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1209 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1210 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1211 /* NOOP Checksum error */
1212 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1213 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1214 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1215 ksocknal_new_packet(conn, 0);
1216 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1220 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1223 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1225 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1226 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1228 rc = conn->ksnc_proto->pro_handle_zcack(
1229 conn, cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1232 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1233 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1235 conn->ksnc_msg.ksm_zc_cookies[1]);
1236 ksocknal_new_packet(conn, 0);
1237 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1242 switch (conn->ksnc_msg.ksm_type) {
1243 case KSOCK_MSG_NOOP:
1244 ksocknal_new_packet(conn, 0);
1245 return 0; /* NOOP is done and just return */
1247 case KSOCK_MSG_LNET:
1249 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1250 conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr_nid4);
1251 conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr_nid4);
1253 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1254 conn->ksnc_rx_iov[0].iov_base =
1255 (void *)&conn->ksnc_msg.ksm_u.lnetmsg_nid4;
1256 conn->ksnc_rx_iov[0].iov_len =
1257 sizeof(struct lnet_hdr_nid4);
1259 conn->ksnc_rx_niov = 1;
1260 conn->ksnc_rx_kiov = NULL;
1261 conn->ksnc_rx_nkiov = 0;
1263 goto again; /* read lnet header now */
1266 CERROR("%s: Unknown message type: %x\n",
1267 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1268 conn->ksnc_msg.ksm_type);
1269 ksocknal_new_packet(conn, 0);
1270 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1274 case SOCKNAL_RX_LNET_HEADER:
1275 /* unpack message header */
1276 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg, &hdr);
1278 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1279 /* Userspace peer_ni */
1280 id = &conn->ksnc_peer->ksnp_id;
1282 /* Substitute process ID assigned at connection time */
1283 hdr.src_pid = id->pid;
1284 hdr.src_nid = id->nid;
1287 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1288 ksocknal_conn_addref(conn); /* ++ref while parsing */
1291 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1293 &conn->ksnc_peer->ksnp_id.nid,
1296 /* I just received garbage: give up on this conn */
1297 ksocknal_new_packet(conn, 0);
1298 ksocknal_close_conn_and_siblings(conn, rc);
1299 ksocknal_conn_decref(conn);
1303 /* I'm racing with ksocknal_recv() */
1304 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1305 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1307 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1310 /* ksocknal_recv() got called */
1313 case SOCKNAL_RX_LNET_PAYLOAD:
1314 /* payload all received */
1317 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1318 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1319 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1320 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1321 libcfs_idstr(&conn->ksnc_peer->ksnp_id),
1322 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1326 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1327 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1329 lhdr = (void *)&conn->ksnc_msg.ksm_u.lnetmsg_nid4;
1330 id = &conn->ksnc_peer->ksnp_id;
1332 rc = conn->ksnc_proto->pro_handle_zcreq(
1334 conn->ksnc_msg.ksm_zc_cookies[0],
1335 *ksocknal_tunables.ksnd_nonblk_zcack ||
1336 le64_to_cpu(lhdr->src_nid) !=
1337 lnet_nid_to_nid4(&id->nid));
1340 if (rc && conn->ksnc_lnet_msg)
1341 conn->ksnc_lnet_msg->msg_health_status =
1342 LNET_MSG_STATUS_REMOTE_ERROR;
1343 lnet_finalize(conn->ksnc_lnet_msg, rc);
1346 ksocknal_new_packet(conn, 0);
1347 ksocknal_close_conn_and_siblings(conn, rc);
1352 case SOCKNAL_RX_SLOP:
1353 /* starting new packet? */
1354 if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
1355 return 0; /* come back later */
1356 goto again; /* try to finish reading slop now */
1364 return (-EINVAL); /* keep gcc happy */
1368 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
1369 int delayed, unsigned int niov,
1370 struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
1373 struct ksock_conn *conn = private;
1374 struct ksock_sched *sched = conn->ksnc_scheduler;
1376 LASSERT (mlen <= rlen);
1378 conn->ksnc_lnet_msg = msg;
1379 conn->ksnc_rx_nob_wanted = mlen;
1380 conn->ksnc_rx_nob_left = rlen;
1383 conn->ksnc_rx_nkiov = 0;
1384 conn->ksnc_rx_kiov = NULL;
1385 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1386 conn->ksnc_rx_niov = 0;
1388 conn->ksnc_rx_niov = 0;
1389 conn->ksnc_rx_iov = NULL;
1390 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1391 conn->ksnc_rx_nkiov =
1392 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1393 niov, kiov, offset, mlen);
1396 LASSERT(conn->ksnc_rx_nkiov <= LNET_MAX_IOV);
1398 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1399 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1401 LASSERT (conn->ksnc_rx_scheduled);
1403 spin_lock_bh(&sched->kss_lock);
1405 switch (conn->ksnc_rx_state) {
1406 case SOCKNAL_RX_PARSE_WAIT:
1407 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1408 wake_up(&sched->kss_waitq);
1409 LASSERT(conn->ksnc_rx_ready);
1412 case SOCKNAL_RX_PARSE:
1413 /* scheduler hasn't noticed I'm parsing yet */
1417 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1419 spin_unlock_bh(&sched->kss_lock);
1420 ksocknal_conn_decref(conn);
1425 ksocknal_sched_cansleep(struct ksock_sched *sched)
1429 spin_lock_bh(&sched->kss_lock);
1431 rc = (!ksocknal_data.ksnd_shuttingdown &&
1432 list_empty(&sched->kss_rx_conns) &&
1433 list_empty(&sched->kss_tx_conns));
1435 spin_unlock_bh(&sched->kss_lock);
1439 int ksocknal_scheduler(void *arg)
1441 struct ksock_sched *sched;
1442 struct ksock_conn *conn;
1443 struct ksock_tx *tx;
1445 long id = (long)arg;
1446 struct page **rx_scratch_pgs;
1447 struct kvec *scratch_iov;
1449 sched = ksocknal_data.ksnd_schedulers[KSOCK_THREAD_CPT(id)];
1451 LIBCFS_CPT_ALLOC(rx_scratch_pgs, lnet_cpt_table(), sched->kss_cpt,
1452 sizeof(*rx_scratch_pgs) * LNET_MAX_IOV);
1453 if (!rx_scratch_pgs) {
1454 CERROR("Unable to allocate scratch pages\n");
1458 LIBCFS_CPT_ALLOC(scratch_iov, lnet_cpt_table(), sched->kss_cpt,
1459 sizeof(*scratch_iov) * LNET_MAX_IOV);
1461 CERROR("Unable to allocate scratch iov\n");
1465 rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
1467 CWARN("Can't set CPU partition affinity to %d: %d\n",
1468 sched->kss_cpt, rc);
1471 spin_lock_bh(&sched->kss_lock);
1473 while (!ksocknal_data.ksnd_shuttingdown) {
1474 bool did_something = false;
1476 /* Ensure I progress everything semi-fairly */
1477 conn = list_first_entry_or_null(&sched->kss_rx_conns,
1481 list_del(&conn->ksnc_rx_list);
1483 LASSERT(conn->ksnc_rx_scheduled);
1484 LASSERT(conn->ksnc_rx_ready);
1486 /* clear rx_ready in case receive isn't complete.
1487 * Do it BEFORE we call process_recv, since
1488 * data_ready can set it any time after we release
1490 conn->ksnc_rx_ready = 0;
1491 spin_unlock_bh(&sched->kss_lock);
1493 rc = ksocknal_process_receive(conn, rx_scratch_pgs,
1496 spin_lock_bh(&sched->kss_lock);
1498 /* I'm the only one that can clear this flag */
1499 LASSERT(conn->ksnc_rx_scheduled);
1501 /* Did process_receive get everything it wanted? */
1503 conn->ksnc_rx_ready = 1;
1505 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1506 /* Conn blocked waiting for ksocknal_recv()
1507 * I change its state (under lock) to signal
1508 * it can be rescheduled */
1509 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1510 } else if (conn->ksnc_rx_ready) {
1511 /* reschedule for rx */
1512 list_add_tail(&conn->ksnc_rx_list,
1513 &sched->kss_rx_conns);
1515 conn->ksnc_rx_scheduled = 0;
1517 ksocknal_conn_decref(conn);
1520 did_something = true;
1523 if (!list_empty(&sched->kss_tx_conns)) {
1526 list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
1528 conn = list_first_entry(&sched->kss_tx_conns,
1531 list_del(&conn->ksnc_tx_list);
1533 LASSERT(conn->ksnc_tx_scheduled);
1534 LASSERT(conn->ksnc_tx_ready);
1535 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1537 tx = list_first_entry(&conn->ksnc_tx_queue,
1538 struct ksock_tx, tx_list);
1540 if (conn->ksnc_tx_carrier == tx)
1541 ksocknal_next_tx_carrier(conn);
1543 /* dequeue now so empty list => more to send */
1544 list_del(&tx->tx_list);
1546 /* Clear tx_ready in case send isn't complete. Do
1547 * it BEFORE we call process_transmit, since
1548 * write_space can set it any time after we release
1550 conn->ksnc_tx_ready = 0;
1551 spin_unlock_bh(&sched->kss_lock);
1553 if (!list_empty(&zlist)) {
1554 /* free zombie noop txs, it's fast because
1555 * noop txs are just put in freelist */
1556 ksocknal_txlist_done(NULL, &zlist, 0);
1559 rc = ksocknal_process_transmit(conn, tx, scratch_iov);
1561 if (rc == -ENOMEM || rc == -EAGAIN) {
1562 /* Incomplete send: replace tx on HEAD of tx_queue */
1563 spin_lock_bh(&sched->kss_lock);
1564 list_add(&tx->tx_list,
1565 &conn->ksnc_tx_queue);
1567 /* Complete send; tx -ref */
1568 ksocknal_tx_decref(tx);
1570 spin_lock_bh(&sched->kss_lock);
1571 /* assume space for more */
1572 conn->ksnc_tx_ready = 1;
1575 if (rc == -ENOMEM) {
1576 /* Do nothing; after a short timeout, this
1577 * conn will be reposted on kss_tx_conns. */
1578 } else if (conn->ksnc_tx_ready &&
1579 !list_empty(&conn->ksnc_tx_queue)) {
1580 /* reschedule for tx */
1581 list_add_tail(&conn->ksnc_tx_list,
1582 &sched->kss_tx_conns);
1584 conn->ksnc_tx_scheduled = 0;
1586 ksocknal_conn_decref(conn);
1589 did_something = true;
1591 if (!did_something || /* nothing to do */
1592 need_resched()) { /* hogging CPU? */
1593 spin_unlock_bh(&sched->kss_lock);
1595 if (!did_something) { /* wait for something to do */
1596 rc = wait_event_interruptible_exclusive(
1598 !ksocknal_sched_cansleep(sched));
1604 spin_lock_bh(&sched->kss_lock);
1608 spin_unlock_bh(&sched->kss_lock);
1609 CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
1610 CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
1611 ksocknal_thread_fini();
1616 * Add connection to kss_rx_conns of scheduler
1617 * and wakeup the scheduler.
1619 void ksocknal_read_callback(struct ksock_conn *conn)
1621 struct ksock_sched *sched;
1623 sched = conn->ksnc_scheduler;
1625 spin_lock_bh(&sched->kss_lock);
1627 conn->ksnc_rx_ready = 1;
1629 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1630 list_add_tail(&conn->ksnc_rx_list,
1631 &sched->kss_rx_conns);
1632 conn->ksnc_rx_scheduled = 1;
1633 /* extra ref for scheduler */
1634 ksocknal_conn_addref(conn);
1636 wake_up (&sched->kss_waitq);
1638 spin_unlock_bh(&sched->kss_lock);
1642 * Add connection to kss_tx_conns of scheduler
1643 * and wakeup the scheduler.
1645 void ksocknal_write_callback(struct ksock_conn *conn)
1647 struct ksock_sched *sched;
1649 sched = conn->ksnc_scheduler;
1651 spin_lock_bh(&sched->kss_lock);
1653 conn->ksnc_tx_ready = 1;
1655 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1656 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1657 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1658 conn->ksnc_tx_scheduled = 1;
1659 /* extra ref for scheduler */
1660 ksocknal_conn_addref(conn);
1662 wake_up(&sched->kss_waitq);
1665 spin_unlock_bh(&sched->kss_lock);
1668 static const struct ksock_proto *
1669 ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
1673 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1674 version = hello->kshm_version;
1675 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1676 version = __swab32(hello->kshm_version);
1679 #if SOCKNAL_VERSION_DEBUG
1680 if (*ksocknal_tunables.ksnd_protocol == 1)
1683 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1684 version == KSOCK_PROTO_V3)
1687 if (version == KSOCK_PROTO_V2)
1688 return &ksocknal_protocol_v2x;
1690 if (version == KSOCK_PROTO_V3)
1691 return &ksocknal_protocol_v3x;
1696 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1697 struct lnet_magicversion *hmv;
1699 BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
1700 offsetof(struct ksock_hello_msg, kshm_src_nid));
1702 hmv = (struct lnet_magicversion *)hello;
1704 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1705 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1706 return &ksocknal_protocol_v1x;
1713 ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1714 struct lnet_nid *peer_nid, struct ksock_hello_msg *hello)
1716 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1717 struct ksock_net *net = (struct ksock_net *)ni->ni_data;
1719 LASSERT(hello->kshm_nips <= LNET_INTERFACES_NUM);
1721 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1722 LASSERT(conn->ksnc_proto != NULL);
1724 hello->kshm_src_nid = ni->ni_nid;
1725 hello->kshm_dst_nid = *peer_nid;
1726 hello->kshm_src_pid = the_lnet.ln_pid;
1728 hello->kshm_src_incarnation = net->ksnn_incarnation;
1729 hello->kshm_ctype = conn->ksnc_type;
1731 return conn->ksnc_proto->pro_send_hello(conn, hello);
1735 ksocknal_invert_type(int type)
1738 case SOCKLND_CONN_ANY:
1739 case SOCKLND_CONN_CONTROL:
1741 case SOCKLND_CONN_BULK_IN:
1742 return SOCKLND_CONN_BULK_OUT;
1743 case SOCKLND_CONN_BULK_OUT:
1744 return SOCKLND_CONN_BULK_IN;
1746 return (SOCKLND_CONN_NONE);
1751 ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1752 struct ksock_hello_msg *hello,
1753 struct lnet_processid *peerid,
1756 /* Return < 0 fatal error
1758 * EALREADY lost connection race
1759 * EPROTO protocol version mismatch
1761 struct socket *sock = conn->ksnc_sock;
1762 int active = (conn->ksnc_proto != NULL);
1766 const struct ksock_proto *proto;
1767 struct lnet_processid recv_id;
1769 /* socket type set on active connections - not set on passive */
1770 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1772 timeout = active ? ksocknal_timeout() :
1773 lnet_acceptor_timeout();
1775 rc = lnet_sock_read(sock, &hello->kshm_magic,
1776 sizeof(hello->kshm_magic), timeout);
1778 CERROR("Error %d reading HELLO from %pISc\n",
1779 rc, &conn->ksnc_peeraddr);
1784 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1785 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1786 hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1787 /* Unexpected magic! */
1788 CERROR("Bad magic(1) %#08x (%#08x expected) from %pISc\n",
1789 __cpu_to_le32 (hello->kshm_magic),
1790 LNET_PROTO_TCP_MAGIC, &conn->ksnc_peeraddr);
1794 rc = lnet_sock_read(sock, &hello->kshm_version,
1795 sizeof(hello->kshm_version), timeout);
1797 CERROR("Error %d reading HELLO from %pISc\n",
1798 rc, &conn->ksnc_peeraddr);
1803 proto = ksocknal_parse_proto_version(hello);
1804 if (proto == NULL) {
1806 /* unknown protocol from peer_ni,
1807 * tell peer_ni my protocol.
1809 conn->ksnc_proto = &ksocknal_protocol_v3x;
1810 #if SOCKNAL_VERSION_DEBUG
1811 if (*ksocknal_tunables.ksnd_protocol == 2)
1812 conn->ksnc_proto = &ksocknal_protocol_v2x;
1813 else if (*ksocknal_tunables.ksnd_protocol == 1)
1814 conn->ksnc_proto = &ksocknal_protocol_v1x;
1816 hello->kshm_nips = 0;
1817 ksocknal_send_hello(ni, conn, &ni->ni_nid,
1821 CERROR("Unknown protocol version (%d.x expected) from %pISc\n",
1822 conn->ksnc_proto->pro_version, &conn->ksnc_peeraddr);
1827 proto_match = (conn->ksnc_proto == proto);
1828 conn->ksnc_proto = proto;
1830 /* receive the rest of hello message anyway */
1831 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1833 CERROR("Error %d reading or checking hello from from %pISc\n",
1834 rc, &conn->ksnc_peeraddr);
1839 *incarnation = hello->kshm_src_incarnation;
1841 if (LNET_NID_IS_ANY(&hello->kshm_src_nid)) {
1842 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pISc\n",
1843 &conn->ksnc_peeraddr);
1848 rpc_get_port((struct sockaddr *)&conn->ksnc_peeraddr) >
1849 LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1850 /* Userspace NAL assigns peer_ni process ID from socket */
1851 recv_id.pid = rpc_get_port((struct sockaddr *)
1852 &conn->ksnc_peeraddr) |
1854 LASSERT(conn->ksnc_peeraddr.ss_family == AF_INET);
1855 memset(&recv_id.nid, 0, sizeof(recv_id.nid));
1856 recv_id.nid.nid_type = ni->ni_nid.nid_type;
1857 recv_id.nid.nid_num = ni->ni_nid.nid_num;
1858 recv_id.nid.nid_addr[0] =
1859 ((struct sockaddr_in *)
1860 &conn->ksnc_peeraddr)->sin_addr.s_addr;
1862 recv_id.nid = hello->kshm_src_nid;
1863 recv_id.pid = hello->kshm_src_pid;
1869 /* peer_ni determines type */
1870 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1871 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1872 CERROR("Unexpected type %d from %s ip %pISc\n",
1873 hello->kshm_ctype, libcfs_idstr(peerid),
1874 &conn->ksnc_peeraddr);
1880 if (peerid->pid != recv_id.pid ||
1881 !nid_same(&peerid->nid, &recv_id.nid)) {
1882 LCONSOLE_ERROR_MSG(0x130,
1883 "Connected successfully to %s on host %pISc, but they claimed they were %s; please check your Lustre configuration.\n",
1884 libcfs_idstr(peerid),
1885 &conn->ksnc_peeraddr,
1886 libcfs_idstr(&recv_id));
1890 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1891 /* Possible protocol mismatch or I lost the connection race */
1892 return proto_match ? EALREADY : EPROTO;
1895 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1896 CERROR("Mismatched types: me %d, %s ip %pISc %d\n",
1897 conn->ksnc_type, libcfs_idstr(peerid),
1898 &conn->ksnc_peeraddr,
1906 ksocknal_connect(struct ksock_conn_cb *conn_cb)
1909 struct ksock_peer_ni *peer_ni = conn_cb->ksnr_peer;
1910 int type = SOCKLND_CONN_NONE;
1912 struct socket *sock;
1914 bool retry_later = false;
1917 deadline = ktime_get_seconds() + ksocknal_timeout();
1919 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1921 LASSERT(conn_cb->ksnr_scheduled);
1922 LASSERT(!conn_cb->ksnr_connecting);
1924 conn_cb->ksnr_connecting = 1;
1927 wanted = ksocknal_conn_cb_mask() & ~conn_cb->ksnr_connected;
1929 /* stop connecting if peer_ni/cb got closed under me, or
1930 * conn cb got connected while queued
1932 if (peer_ni->ksnp_closing || conn_cb->ksnr_deleted ||
1934 retry_later = false;
1938 /* reschedule if peer_ni is connecting to me */
1939 if (peer_ni->ksnp_accepting > 0) {
1941 "peer_ni %s(%d) already connecting to me, retry later.\n",
1942 libcfs_nidstr(&peer_ni->ksnp_id.nid),
1943 peer_ni->ksnp_accepting);
1947 if (retry_later) /* needs reschedule */
1950 if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) {
1951 type = SOCKLND_CONN_ANY;
1952 } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
1953 type = SOCKLND_CONN_CONTROL;
1954 } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0 &&
1955 conn_cb->ksnr_blki_conn_count <= conn_cb->ksnr_blko_conn_count) {
1956 type = SOCKLND_CONN_BULK_IN;
1958 LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
1959 type = SOCKLND_CONN_BULK_OUT;
1962 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1964 if (ktime_get_seconds() >= deadline) {
1966 lnet_connect_console_error(
1967 rc, &peer_ni->ksnp_id.nid,
1968 (struct sockaddr *)&conn_cb->ksnr_addr);
1972 sock = lnet_connect(&peer_ni->ksnp_id.nid,
1973 conn_cb->ksnr_myiface,
1974 (struct sockaddr *)&conn_cb->ksnr_addr,
1975 peer_ni->ksnp_ni->ni_net_ns);
1981 rc = ksocknal_create_conn(peer_ni->ksnp_ni, conn_cb, sock,
1984 lnet_connect_console_error(
1985 rc, &peer_ni->ksnp_id.nid,
1986 (struct sockaddr *)&conn_cb->ksnr_addr);
1990 if (rc == EALREADY && conn_cb->ksnr_conn_count > 0)
1991 conn_cb->ksnr_busy_retry_count += 1;
1993 conn_cb->ksnr_busy_retry_count = 0;
1995 /* A +ve RC means I have to retry because I lost the connection
1996 * race or I have to renegotiate protocol version
1998 retry_later = (rc != 0);
2001 CDEBUG(D_NET, "peer_ni %s: conn race, retry later. rc %d\n",
2002 libcfs_nidstr(&peer_ni->ksnp_id.nid), rc);
2004 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2007 conn_cb->ksnr_scheduled = 0;
2008 conn_cb->ksnr_connecting = 0;
2010 if (conn_cb->ksnr_busy_retry_count >= SOCKNAL_MAX_BUSY_RETRIES &&
2011 type > SOCKLND_CONN_NONE) {
2012 /* After so many retries due to EALREADY assume that
2013 * the peer doesn't support as many connections as we want
2015 conn_cb->ksnr_connected |= BIT(type);
2016 retry_later = false;
2020 /* re-queue for attention; this frees me up to handle
2021 * the peer_ni's incoming connection request
2024 if (rc == EALREADY ||
2025 (rc == 0 && peer_ni->ksnp_accepting > 0)) {
2026 /* We want to introduce a delay before next
2027 * attempt to connect if we lost conn race, but
2028 * the race is resolved quickly usually, so
2029 * min_reconnectms should be good heuristic
2031 conn_cb->ksnr_retry_interval =
2032 *ksocknal_tunables.ksnd_min_reconnectms / 1000;
2033 conn_cb->ksnr_timeout = ktime_get_seconds() +
2034 conn_cb->ksnr_retry_interval;
2037 ksocknal_launch_connection_locked(conn_cb);
2040 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2044 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2046 conn_cb->ksnr_scheduled = 0;
2047 conn_cb->ksnr_connecting = 0;
2049 /* This is a retry rather than a new connection */
2050 conn_cb->ksnr_retry_interval *= 2;
2051 conn_cb->ksnr_retry_interval =
2052 max_t(time64_t, conn_cb->ksnr_retry_interval,
2053 *ksocknal_tunables.ksnd_min_reconnectms / 1000);
2054 conn_cb->ksnr_retry_interval =
2055 min_t(time64_t, conn_cb->ksnr_retry_interval,
2056 *ksocknal_tunables.ksnd_max_reconnectms / 1000);
2058 LASSERT(conn_cb->ksnr_retry_interval);
2059 conn_cb->ksnr_timeout = ktime_get_seconds() +
2060 conn_cb->ksnr_retry_interval;
2062 if (!list_empty(&peer_ni->ksnp_tx_queue) &&
2063 peer_ni->ksnp_accepting == 0 &&
2064 !ksocknal_find_connecting_conn_cb_locked(peer_ni)) {
2065 struct ksock_conn *conn;
2067 /* ksnp_tx_queue is queued on a conn on successful
2068 * connection for V1.x and V2.x
2070 conn = list_first_entry_or_null(&peer_ni->ksnp_conns,
2071 struct ksock_conn, ksnc_list);
2073 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x ||
2074 conn->ksnc_proto == &ksocknal_protocol_v4x);
2076 /* take all the blocked packets while I've got the lock and
2079 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
2082 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2084 ksocknal_peer_failed(peer_ni);
2085 ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, rc);
2090 * check whether we need to create more connds.
2091 * It will try to create new thread if it's necessary, @timeout can
2092 * be updated if failed to create, so caller wouldn't keep try while
2093 * running out of resource.
2096 ksocknal_connd_check_start(time64_t sec, long *timeout)
2099 int total = ksocknal_data.ksnd_connd_starting +
2100 ksocknal_data.ksnd_connd_running;
2102 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2103 /* still in initializing */
2107 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2108 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2109 /* can't create more connd, or still have enough
2110 * threads to handle more connecting */
2114 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2115 /* no pending connecting request */
2119 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2120 /* may run out of resource, retry later */
2121 *timeout = cfs_time_seconds(1);
2125 if (ksocknal_data.ksnd_connd_starting > 0) {
2126 /* serialize starting to avoid flood */
2130 ksocknal_data.ksnd_connd_starting_stamp = sec;
2131 ksocknal_data.ksnd_connd_starting++;
2132 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2134 /* NB: total is the next id */
2135 rc = ksocknal_thread_start(ksocknal_connd, NULL,
2136 "socknal_cd%02d", total);
2138 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2143 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2144 ksocknal_data.ksnd_connd_starting--;
2145 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2151 * check whether current thread can exit, it will return 1 if there are too
2152 * many threads and no creating in past 120 seconds.
2153 * Also, this function may update @timeout to make caller come back
2154 * again to recheck these conditions.
2157 ksocknal_connd_check_stop(time64_t sec, long *timeout)
2161 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2162 /* still in initializing */
2166 if (ksocknal_data.ksnd_connd_starting > 0) {
2167 /* in progress of starting new thread */
2171 if (ksocknal_data.ksnd_connd_running <=
2172 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2176 /* created thread in past 120 seconds? */
2177 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2178 SOCKNAL_CONND_TIMEOUT - sec);
2180 *timeout = (val > 0) ? cfs_time_seconds(val) :
2181 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2185 /* no creating in past 120 seconds */
2187 return ksocknal_data.ksnd_connd_running >
2188 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2191 /* Go through connd_cbs queue looking for a conn_cb that we can process
2192 * right now, @timeout_p can be updated if we need to come back later */
2193 static struct ksock_conn_cb *
2194 ksocknal_connd_get_conn_cb_locked(signed long *timeout_p)
2196 time64_t now = ktime_get_seconds();
2197 time64_t conn_timeout;
2198 struct ksock_conn_cb *conn_cb;
2200 /* connd_routes can contain both pending and ordinary routes */
2201 list_for_each_entry(conn_cb, &ksocknal_data.ksnd_connd_routes,
2204 conn_timeout = conn_cb->ksnr_timeout;
2206 if (conn_cb->ksnr_retry_interval == 0 ||
2207 now >= conn_timeout)
2210 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2211 *timeout_p > cfs_time_seconds(conn_timeout - now))
2212 *timeout_p = cfs_time_seconds(conn_timeout - now);
2219 ksocknal_connd(void *arg)
2221 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2222 struct ksock_connreq *cr;
2223 wait_queue_entry_t wait;
2228 spin_lock_bh(connd_lock);
2230 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2231 ksocknal_data.ksnd_connd_starting--;
2232 ksocknal_data.ksnd_connd_running++;
2234 while (!ksocknal_data.ksnd_shuttingdown) {
2235 struct ksock_conn_cb *conn_cb = NULL;
2236 time64_t sec = ktime_get_real_seconds();
2237 long timeout = MAX_SCHEDULE_TIMEOUT;
2238 bool dropped_lock = false;
2240 if (ksocknal_connd_check_stop(sec, &timeout)) {
2241 /* wakeup another one to check stop */
2242 wake_up(&ksocknal_data.ksnd_connd_waitq);
2246 if (ksocknal_connd_check_start(sec, &timeout)) {
2247 /* created new thread */
2248 dropped_lock = true;
2251 cr = list_first_entry_or_null(&ksocknal_data.ksnd_connd_connreqs,
2252 struct ksock_connreq, ksncr_list);
2254 /* Connection accepted by the listener */
2255 list_del(&cr->ksncr_list);
2256 spin_unlock_bh(connd_lock);
2257 dropped_lock = true;
2259 ksocknal_create_conn(cr->ksncr_ni, NULL,
2260 cr->ksncr_sock, SOCKLND_CONN_NONE);
2261 lnet_ni_decref(cr->ksncr_ni);
2262 LIBCFS_FREE(cr, sizeof(*cr));
2264 spin_lock_bh(connd_lock);
2267 /* Only handle an outgoing connection request if there
2268 * is a thread left to handle incoming connections and
2271 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2272 ksocknal_data.ksnd_connd_running)
2273 conn_cb = ksocknal_connd_get_conn_cb_locked(&timeout);
2276 list_del(&conn_cb->ksnr_connd_list);
2277 ksocknal_data.ksnd_connd_connecting++;
2278 spin_unlock_bh(connd_lock);
2279 dropped_lock = true;
2281 if (ksocknal_connect(conn_cb)) {
2282 /* consecutive retry */
2283 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2284 CWARN("massive consecutive re-connecting to %pISc\n",
2285 &conn_cb->ksnr_addr);
2292 ksocknal_conn_cb_decref(conn_cb);
2294 spin_lock_bh(connd_lock);
2295 ksocknal_data.ksnd_connd_connecting--;
2299 if (!need_resched())
2301 spin_unlock_bh(connd_lock);
2303 spin_lock_bh(connd_lock);
2307 /* Nothing to do for 'timeout' */
2308 set_current_state(TASK_INTERRUPTIBLE);
2309 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
2311 spin_unlock_bh(connd_lock);
2313 schedule_timeout(timeout);
2315 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2316 spin_lock_bh(connd_lock);
2318 ksocknal_data.ksnd_connd_running--;
2319 spin_unlock_bh(connd_lock);
2321 ksocknal_thread_fini();
2325 static struct ksock_conn *
2326 ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
2328 /* We're called with a shared lock on ksnd_global_lock */
2329 struct ksock_conn *conn;
2330 struct ksock_tx *tx;
2331 struct ksock_sched *sched;
2333 list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
2336 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2337 LASSERT (!conn->ksnc_closing);
2338 sched = conn->ksnc_scheduler;
2340 error = conn->ksnc_sock->sk->sk_err;
2342 ksocknal_conn_addref(conn);
2346 CNETERR("A connection with %s (%pIScp) was reset; it may have rebooted.\n",
2347 libcfs_idstr(&peer_ni->ksnp_id),
2348 &conn->ksnc_peeraddr);
2351 CNETERR("A connection with %s (%pIScp) timed out; the network or node may be down.\n",
2352 libcfs_idstr(&peer_ni->ksnp_id),
2353 &conn->ksnc_peeraddr);
2356 CNETERR("An unexpected network error %d occurred with %s (%pIScp\n",
2358 libcfs_idstr(&peer_ni->ksnp_id),
2359 &conn->ksnc_peeraddr);
2366 if (conn->ksnc_rx_started &&
2367 ktime_get_seconds() >= conn->ksnc_rx_deadline) {
2368 /* Timed out incomplete incoming message */
2369 ksocknal_conn_addref(conn);
2370 CNETERR("Timeout receiving from %s (%pIScp), state %d wanted %d left %d\n",
2371 libcfs_idstr(&peer_ni->ksnp_id),
2372 &conn->ksnc_peeraddr,
2373 conn->ksnc_rx_state,
2374 conn->ksnc_rx_nob_wanted,
2375 conn->ksnc_rx_nob_left);
2379 spin_lock_bh(&sched->kss_lock);
2380 if ((!list_empty(&conn->ksnc_tx_queue) ||
2381 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2382 ktime_get_seconds() >= conn->ksnc_tx_deadline) {
2383 /* Timed out messages queued for sending or
2384 * buffered in the socket's send buffer
2386 ksocknal_conn_addref(conn);
2387 list_for_each_entry(tx, &conn->ksnc_tx_queue,
2390 LNET_MSG_STATUS_LOCAL_TIMEOUT;
2391 CNETERR("Timeout sending data to %s (%pIScp) the network or that node may be down.\n",
2392 libcfs_idstr(&peer_ni->ksnp_id),
2393 &conn->ksnc_peeraddr);
2394 spin_unlock_bh(&sched->kss_lock);
2397 spin_unlock_bh(&sched->kss_lock);
2404 ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
2406 struct ksock_tx *tx;
2407 LIST_HEAD(stale_txs);
2409 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2411 while ((tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
2413 tx_list)) != NULL) {
2414 if (ktime_get_seconds() < tx->tx_deadline)
2417 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2419 list_move_tail(&tx->tx_list, &stale_txs);
2422 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2424 ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, -ETIMEDOUT);
2428 ksocknal_send_keepalive_locked(struct ksock_peer_ni *peer_ni)
2429 __must_hold(&ksocknal_data.ksnd_global_lock)
2431 struct ksock_sched *sched;
2432 struct ksock_conn *conn;
2433 struct ksock_tx *tx;
2435 /* last_alive will be updated by create_conn */
2436 if (list_empty(&peer_ni->ksnp_conns))
2439 if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x &&
2440 peer_ni->ksnp_proto != &ksocknal_protocol_v4x)
2443 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2444 ktime_get_seconds() < peer_ni->ksnp_last_alive +
2445 *ksocknal_tunables.ksnd_keepalive)
2448 if (ktime_get_seconds() < peer_ni->ksnp_send_keepalive)
2451 /* retry 10 secs later, so we wouldn't put pressure
2452 * on this peer_ni if we failed to send keepalive this time */
2453 peer_ni->ksnp_send_keepalive = ktime_get_seconds() + 10;
2455 conn = ksocknal_find_conn_locked(peer_ni, NULL, 1);
2457 sched = conn->ksnc_scheduler;
2459 spin_lock_bh(&sched->kss_lock);
2460 if (!list_empty(&conn->ksnc_tx_queue)) {
2461 spin_unlock_bh(&sched->kss_lock);
2462 /* there is an queued ACK, don't need keepalive */
2466 spin_unlock_bh(&sched->kss_lock);
2469 read_unlock(&ksocknal_data.ksnd_global_lock);
2471 /* cookie = 1 is reserved for keepalive PING */
2472 tx = ksocknal_alloc_tx_noop(1, 1);
2474 read_lock(&ksocknal_data.ksnd_global_lock);
2478 if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, &peer_ni->ksnp_id)
2480 read_lock(&ksocknal_data.ksnd_global_lock);
2484 ksocknal_free_tx(tx);
2485 read_lock(&ksocknal_data.ksnd_global_lock);
2492 ksocknal_check_peer_timeouts(int idx)
2494 struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
2495 struct ksock_peer_ni *peer_ni;
2496 struct ksock_conn *conn;
2497 struct ksock_tx *tx;
2500 /* NB. We expect to have a look at all the peers and not find any
2501 * connections to time out, so we just use a shared lock while we
2504 read_lock(&ksocknal_data.ksnd_global_lock);
2506 hlist_for_each_entry(peer_ni, peers, ksnp_list) {
2507 struct ksock_tx *tx_stale;
2508 time64_t deadline = 0;
2512 if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
2513 read_unlock(&ksocknal_data.ksnd_global_lock);
2517 conn = ksocknal_find_timed_out_conn(peer_ni);
2520 read_unlock(&ksocknal_data.ksnd_global_lock);
2522 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
2524 /* NB we won't find this one again, but we can't
2525 * just proceed with the next peer_ni, since we dropped
2526 * ksnd_global_lock and it might be dead already!
2528 ksocknal_conn_decref(conn);
2532 /* we can't process stale txs right here because we're
2533 * holding only shared lock
2535 tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
2536 struct ksock_tx, tx_list);
2537 if (tx && ktime_get_seconds() >= tx->tx_deadline) {
2538 ksocknal_peer_addref(peer_ni);
2539 read_unlock(&ksocknal_data.ksnd_global_lock);
2541 ksocknal_flush_stale_txs(peer_ni);
2543 ksocknal_peer_decref(peer_ni);
2547 if (list_empty(&peer_ni->ksnp_zc_req_list))
2551 spin_lock(&peer_ni->ksnp_lock);
2552 list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
2553 if (ktime_get_seconds() < tx->tx_deadline)
2555 /* ignore the TX if connection is being closed */
2556 if (tx->tx_conn->ksnc_closing)
2559 if (tx_stale == NULL)
2563 if (tx_stale == NULL) {
2564 spin_unlock(&peer_ni->ksnp_lock);
2568 deadline = tx_stale->tx_deadline;
2569 resid = tx_stale->tx_resid;
2570 conn = tx_stale->tx_conn;
2571 ksocknal_conn_addref(conn);
2573 spin_unlock(&peer_ni->ksnp_lock);
2574 read_unlock(&ksocknal_data.ksnd_global_lock);
2576 CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the "
2577 "oldest(%p) timed out %lld secs ago, "
2578 "resid: %d, wmem: %d\n",
2579 n, libcfs_nidstr(&peer_ni->ksnp_id.nid), tx_stale,
2580 ktime_get_seconds() - deadline,
2581 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2583 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2584 ksocknal_conn_decref(conn);
2588 read_unlock(&ksocknal_data.ksnd_global_lock);
2591 int ksocknal_reaper(void *arg)
2593 wait_queue_entry_t wait;
2594 struct ksock_conn *conn;
2595 struct ksock_sched *sched;
2596 LIST_HEAD(enomem_conns);
2601 time64_t deadline = ktime_get_seconds();
2605 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2607 while (!ksocknal_data.ksnd_shuttingdown) {
2608 conn = list_first_entry_or_null(&ksocknal_data.ksnd_deathrow_conns,
2609 struct ksock_conn, ksnc_list);
2611 list_del(&conn->ksnc_list);
2613 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2615 ksocknal_terminate_conn(conn);
2616 ksocknal_conn_decref(conn);
2618 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2622 conn = list_first_entry_or_null(&ksocknal_data.ksnd_zombie_conns,
2623 struct ksock_conn, ksnc_list);
2625 list_del(&conn->ksnc_list);
2627 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2629 ksocknal_destroy_conn(conn);
2631 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2635 list_splice_init(&ksocknal_data.ksnd_enomem_conns,
2638 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2640 /* reschedule all the connections that stalled with ENOMEM... */
2642 while ((conn = list_first_entry_or_null(&enomem_conns,
2644 ksnc_tx_list)) != NULL) {
2645 list_del(&conn->ksnc_tx_list);
2647 sched = conn->ksnc_scheduler;
2649 spin_lock_bh(&sched->kss_lock);
2651 LASSERT(conn->ksnc_tx_scheduled);
2652 conn->ksnc_tx_ready = 1;
2653 list_add_tail(&conn->ksnc_tx_list,
2654 &sched->kss_tx_conns);
2655 wake_up(&sched->kss_waitq);
2657 spin_unlock_bh(&sched->kss_lock);
2661 /* careful with the jiffy wrap... */
2662 while ((timeout = deadline - ktime_get_seconds()) <= 0) {
2665 int chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
2666 unsigned int lnd_timeout;
2668 /* Time to check for timeouts on a few more peers: I
2669 * do checks every 'p' seconds on a proportion of the
2670 * peer_ni table and I need to check every connection
2671 * 'n' times within a timeout interval, to ensure I
2672 * detect a timeout on any connection within (n+1)/n
2673 * times the timeout interval.
2676 lnd_timeout = ksocknal_timeout();
2677 if (lnd_timeout > n * p)
2678 chunk = (chunk * n * p) / lnd_timeout;
2682 for (i = 0; i < chunk; i++) {
2683 ksocknal_check_peer_timeouts(peer_index);
2684 peer_index = (peer_index + 1) %
2685 HASH_SIZE(ksocknal_data.ksnd_peers);
2691 if (nenomem_conns != 0) {
2692 /* Reduce my timeout if I rescheduled ENOMEM conns.
2693 * This also prevents me getting woken immediately
2694 * if any go back on my enomem list. */
2695 timeout = SOCKNAL_ENOMEM_RETRY;
2697 ksocknal_data.ksnd_reaper_waketime = ktime_get_seconds() +
2700 set_current_state(TASK_INTERRUPTIBLE);
2701 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2703 if (!ksocknal_data.ksnd_shuttingdown &&
2704 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2705 list_empty(&ksocknal_data.ksnd_zombie_conns))
2706 schedule_timeout(cfs_time_seconds(timeout));
2708 set_current_state(TASK_RUNNING);
2709 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2711 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2714 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2716 ksocknal_thread_fini();