2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Lustre, https://wiki.whamcloud.com/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <libcfs/linux/linux-mem.h>
29 #include <linux/sunrpc/addr.h>
32 ksocknal_alloc_tx(int type, int size)
34 struct ksock_tx *tx = NULL;
36 if (type == KSOCK_MSG_NOOP) {
37 LASSERT(size == KSOCK_NOOP_TX_SIZE);
39 /* searching for a noop tx in free list */
40 spin_lock(&ksocknal_data.ksnd_tx_lock);
42 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
43 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
44 struct ksock_tx, tx_list);
45 LASSERT(tx->tx_desc_size == size);
46 list_del(&tx->tx_list);
49 spin_unlock(&ksocknal_data.ksnd_tx_lock);
53 LIBCFS_ALLOC(tx, size);
58 refcount_set(&tx->tx_refcount, 1);
59 tx->tx_zc_aborted = 0;
60 tx->tx_zc_capable = 0;
61 tx->tx_zc_checked = 0;
62 tx->tx_hstatus = LNET_MSG_STATUS_OK;
63 tx->tx_desc_size = size;
65 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
71 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
75 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
77 CERROR("Can't allocate noop tx desc\n");
82 tx->tx_lnetmsg = NULL;
86 tx->tx_nonblk = nonblk;
88 tx->tx_msg.ksm_csum = 0;
89 tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
90 tx->tx_msg.ksm_zc_cookies[0] = 0;
91 tx->tx_msg.ksm_zc_cookies[1] = cookie;
98 ksocknal_free_tx(struct ksock_tx *tx)
100 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
102 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
104 spin_lock(&ksocknal_data.ksnd_tx_lock);
106 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
108 spin_unlock(&ksocknal_data.ksnd_tx_lock);
110 LIBCFS_FREE(tx, tx->tx_desc_size);
115 ksocknal_send_hdr(struct ksock_conn *conn, struct ksock_tx *tx,
116 struct kvec *scratch_iov)
118 struct kvec *iov = &tx->tx_hdr;
122 LASSERT(tx->tx_niov > 0);
124 /* Never touch tx->tx_hdr inside ksocknal_lib_send_hdr() */
125 rc = ksocknal_lib_send_hdr(conn, tx, scratch_iov);
127 if (rc <= 0) /* sent nothing? */
131 LASSERT(nob <= tx->tx_resid);
135 LASSERT(tx->tx_niov == 1);
137 if (nob < (int) iov->iov_len) {
138 iov->iov_base += nob;
143 LASSERT(nob == iov->iov_len);
150 ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
151 struct kvec *scratch_iov)
153 struct bio_vec *kiov = tx->tx_kiov;
157 LASSERT(tx->tx_niov == 0);
158 LASSERT(tx->tx_nkiov > 0);
160 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
161 rc = ksocknal_lib_send_kiov(conn, tx, scratch_iov);
163 if (rc <= 0) /* sent nothing? */
167 LASSERT(nob <= tx->tx_resid);
172 LASSERT(tx->tx_nkiov > 0);
174 if (nob < (int)kiov->bv_len) {
175 kiov->bv_offset += nob;
180 nob -= (int)kiov->bv_len;
181 tx->tx_kiov = ++kiov;
189 ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
190 struct kvec *scratch_iov)
195 if (ksocknal_data.ksnd_stall_tx != 0)
196 schedule_timeout_uninterruptible(
197 cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
199 LASSERT(tx->tx_resid != 0);
201 rc = ksocknal_connsock_addref(conn);
203 LASSERT(conn->ksnc_closing);
208 if (ksocknal_data.ksnd_enomem_tx > 0) {
210 ksocknal_data.ksnd_enomem_tx--;
212 } else if (tx->tx_niov != 0) {
213 rc = ksocknal_send_hdr(conn, tx, scratch_iov);
215 rc = ksocknal_send_kiov(conn, tx, scratch_iov);
218 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
219 if (rc > 0) /* sent something? */
220 conn->ksnc_tx_bufnob += rc; /* account it */
222 if (bufnob < conn->ksnc_tx_bufnob) {
223 /* allocated send buffer bytes < computed; infer
224 * something got ACKed */
225 conn->ksnc_tx_deadline = ktime_get_seconds() +
227 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
228 conn->ksnc_tx_bufnob = bufnob;
232 if (rc <= 0) { /* Didn't write anything? */
233 /* some stacks return 0 instead of -EAGAIN */
237 /* Check if EAGAIN is due to memory pressure */
238 if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
244 /* socket's wmem_queued now includes 'rc' bytes */
245 atomic_sub (rc, &conn->ksnc_tx_nob);
248 } while (tx->tx_resid != 0);
250 ksocknal_connsock_decref(conn);
255 ksocknal_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov)
257 struct kvec *iov = conn->ksnc_rx_iov;
261 LASSERT(conn->ksnc_rx_niov > 0);
263 /* Never touch conn->ksnc_rx_iov or change connection
264 * status inside ksocknal_lib_recv_iov */
265 rc = ksocknal_lib_recv_iov(conn, scratchiov);
270 /* received something... */
273 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
274 conn->ksnc_rx_deadline = ktime_get_seconds() +
276 smp_mb(); /* order with setting rx_started */
277 conn->ksnc_rx_started = 1;
279 conn->ksnc_rx_nob_wanted -= nob;
280 conn->ksnc_rx_nob_left -= nob;
283 LASSERT(conn->ksnc_rx_niov > 0);
285 if (nob < (int)iov->iov_len) {
287 iov->iov_base += nob;
292 conn->ksnc_rx_iov = ++iov;
293 conn->ksnc_rx_niov--;
300 ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
301 struct kvec *scratch_iov)
303 struct bio_vec *kiov = conn->ksnc_rx_kiov;
306 LASSERT(conn->ksnc_rx_nkiov > 0);
308 /* Never touch conn->ksnc_rx_kiov or change connection
309 * status inside ksocknal_lib_recv_iov */
310 rc = ksocknal_lib_recv_kiov(conn, rx_scratch_pgs, scratch_iov);
315 /* received something... */
318 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
319 conn->ksnc_rx_deadline = ktime_get_seconds() +
321 smp_mb(); /* order with setting rx_started */
322 conn->ksnc_rx_started = 1;
324 conn->ksnc_rx_nob_wanted -= nob;
325 conn->ksnc_rx_nob_left -= nob;
328 LASSERT(conn->ksnc_rx_nkiov > 0);
330 if (nob < (int) kiov->bv_len) {
331 kiov->bv_offset += nob;
337 conn->ksnc_rx_kiov = ++kiov;
338 conn->ksnc_rx_nkiov--;
345 ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs,
346 struct kvec *scratch_iov)
348 /* Return 1 on success, 0 on EOF, < 0 on error.
349 * Caller checks ksnc_rx_nob_wanted to determine
350 * progress/completion. */
354 if (ksocknal_data.ksnd_stall_rx != 0)
355 schedule_timeout_uninterruptible(
356 cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
358 rc = ksocknal_connsock_addref(conn);
360 LASSERT(conn->ksnc_closing);
365 if (conn->ksnc_rx_niov != 0)
366 rc = ksocknal_recv_iov(conn, scratch_iov);
368 rc = ksocknal_recv_kiov(conn, rx_scratch_pgs,
372 /* error/EOF or partial receive */
375 } else if (rc == 0 && conn->ksnc_rx_started) {
376 /* EOF in the middle of a message */
382 /* Completed a fragment */
384 if (conn->ksnc_rx_nob_wanted == 0) {
390 ksocknal_connsock_decref(conn);
395 ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
397 struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
398 enum lnet_msg_hstatus hstatus = tx->tx_hstatus;
401 LASSERT(ni != NULL || tx->tx_conn != NULL);
403 if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) {
405 if (hstatus == LNET_MSG_STATUS_OK)
406 hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
409 if (tx->tx_conn != NULL)
410 ksocknal_conn_decref(tx->tx_conn);
412 ksocknal_free_tx(tx);
413 if (lnetmsg != NULL) { /* KSOCK_MSG_NOOP go without lnetmsg */
414 lnetmsg->msg_health_status = hstatus;
415 lnet_finalize(lnetmsg, rc);
422 ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
426 while (!list_empty(txlist)) {
427 tx = list_entry(txlist->next, struct ksock_tx, tx_list);
429 if (error && tx->tx_lnetmsg != NULL) {
430 CNETERR("Deleting packet type %d len %d %s->%s\n",
431 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
432 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
433 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
434 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
436 CNETERR("Deleting noop packet\n");
439 list_del(&tx->tx_list);
441 if (tx->tx_hstatus == LNET_MSG_STATUS_OK) {
442 if (error == -ETIMEDOUT)
444 LNET_MSG_STATUS_LOCAL_TIMEOUT;
445 else if (error == -ENETDOWN ||
446 error == -EHOSTUNREACH ||
447 error == -ENETUNREACH ||
448 error == -ECONNREFUSED ||
449 error == -ECONNRESET)
450 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
452 * for all other errors we don't want to
456 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
459 LASSERT(refcount_read(&tx->tx_refcount) == 1);
460 ksocknal_tx_done(ni, tx, error);
465 ksocknal_check_zc_req(struct ksock_tx *tx)
467 struct ksock_conn *conn = tx->tx_conn;
468 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
470 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
471 * to ksnp_zc_req_list if some fragment of this message should be sent
472 * zero-copy. Our peer_ni will send an ACK containing this cookie when
473 * she has received this message to tell us we can signal completion.
474 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
475 * ksnp_zc_req_list. */
476 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
477 LASSERT (tx->tx_zc_capable);
479 tx->tx_zc_checked = 1;
481 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
482 !conn->ksnc_zc_capable)
485 /* assign cookie and queue tx to pending list, it will be released when
486 * a matching ack is received. See ksocknal_handle_zcack() */
488 ksocknal_tx_addref(tx);
490 spin_lock(&peer_ni->ksnp_lock);
492 /* ZC_REQ is going to be pinned to the peer_ni */
493 tx->tx_deadline = ktime_get_seconds() +
496 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
498 tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++;
500 if (peer_ni->ksnp_zc_next_cookie == 0)
501 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
503 list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list);
505 spin_unlock(&peer_ni->ksnp_lock);
509 ksocknal_uncheck_zc_req(struct ksock_tx *tx)
511 struct ksock_peer_ni *peer_ni = tx->tx_conn->ksnc_peer;
513 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
514 LASSERT(tx->tx_zc_capable);
516 tx->tx_zc_checked = 0;
518 spin_lock(&peer_ni->ksnp_lock);
520 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
521 /* Not waiting for an ACK */
522 spin_unlock(&peer_ni->ksnp_lock);
526 tx->tx_msg.ksm_zc_cookies[0] = 0;
527 list_del(&tx->tx_zc_list);
529 spin_unlock(&peer_ni->ksnp_lock);
531 ksocknal_tx_decref(tx);
535 ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
536 struct kvec *scratch_iov)
539 bool error_sim = false;
541 if (lnet_send_error_simulation(tx->tx_lnetmsg, &tx->tx_hstatus)) {
547 if (tx->tx_zc_capable && !tx->tx_zc_checked)
548 ksocknal_check_zc_req(tx);
550 rc = ksocknal_transmit(conn, tx, scratch_iov);
552 CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
554 if (tx->tx_resid == 0) {
555 /* Sent everything OK */
567 counter++; /* exponential backoff warnings */
568 if ((counter & (-counter)) == counter)
569 CWARN("%u ENOMEM tx %p (%lld allocated)\n",
570 counter, conn, libcfs_kmem_read());
572 /* Queue on ksnd_enomem_conns for retry after a timeout */
573 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
575 /* enomem list takes over scheduler's ref... */
576 LASSERT(conn->ksnc_tx_scheduled);
577 list_add_tail(&conn->ksnc_tx_list,
578 &ksocknal_data.ksnd_enomem_conns);
579 if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY <
580 ksocknal_data.ksnd_reaper_waketime)
581 wake_up(&ksocknal_data.ksnd_reaper_waitq);
583 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
586 * set the health status of the message which determines
587 * whether we should retry the transmit
589 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
600 * set the health status of the message which determines
601 * whether we should retry the transmit
603 if (rc == -ETIMEDOUT)
604 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
606 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
609 if (!conn->ksnc_closing) {
612 LCONSOLE_WARN("Host %pIS reset our connection while we were sending data; it may have rebooted.\n",
613 &conn->ksnc_peeraddr);
616 LCONSOLE_WARN("There was an unexpected network error while writing to %pIS: %d.\n",
617 &conn->ksnc_peeraddr, rc);
620 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pISp\n",
621 conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
622 &conn->ksnc_peeraddr);
625 if (tx->tx_zc_checked)
626 ksocknal_uncheck_zc_req(tx);
628 /* it's not an error if conn is being closed */
629 ksocknal_close_conn_and_siblings(conn,
630 (conn->ksnc_closing) ? 0 : rc);
636 ksocknal_launch_connection_locked(struct ksock_route *route)
639 /* called holding write lock on ksnd_global_lock */
641 LASSERT (!route->ksnr_scheduled);
642 LASSERT (!route->ksnr_connecting);
643 LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
645 route->ksnr_scheduled = 1; /* scheduling conn for connd */
646 ksocknal_route_addref(route); /* extra ref for connd */
648 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
650 list_add_tail(&route->ksnr_connd_list,
651 &ksocknal_data.ksnd_connd_routes);
652 wake_up(&ksocknal_data.ksnd_connd_waitq);
654 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
658 ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
660 struct ksock_route *route;
662 /* called holding write lock on ksnd_global_lock */
664 /* launch any/all connections that need it */
665 route = ksocknal_find_connectable_route_locked(peer_ni);
669 ksocknal_launch_connection_locked(route);
674 ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
676 struct list_head *tmp;
677 struct ksock_conn *conn;
678 struct ksock_conn *typed = NULL;
679 struct ksock_conn *fallback = NULL;
683 list_for_each(tmp, &peer_ni->ksnp_conns) {
684 struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
686 int nob = atomic_read(&c->ksnc_tx_nob) +
687 c->ksnc_sock->sk->sk_wmem_queued;
690 LASSERT (!c->ksnc_closing);
691 LASSERT (c->ksnc_proto != NULL &&
692 c->ksnc_proto->pro_match_tx != NULL);
694 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
699 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
702 case SOCKNAL_MATCH_YES: /* typed connection */
703 if (typed == NULL || tnob > nob ||
704 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
705 typed->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
711 case SOCKNAL_MATCH_MAY: /* fallback connection */
712 if (fallback == NULL || fnob > nob ||
713 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
714 fallback->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
722 /* prefer the typed selection */
723 conn = (typed != NULL) ? typed : fallback;
726 conn->ksnc_tx_last_post = ktime_get_seconds();
732 ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
734 conn->ksnc_proto->pro_pack(tx);
736 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
737 ksocknal_conn_addref(conn); /* +1 ref for tx */
742 ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
744 struct ksock_sched *sched = conn->ksnc_scheduler;
745 struct ksock_msg *msg = &tx->tx_msg;
746 struct ksock_tx *ztx = NULL;
749 /* called holding global lock (read or irq-write) and caller may
750 * not have dropped this lock between finding conn and calling me,
751 * so we don't need the {get,put}connsock dance to deref
753 LASSERT(!conn->ksnc_closing);
755 CDEBUG(D_NET, "Sending to %s ip %pISp\n",
756 libcfs_id2str(conn->ksnc_peer->ksnp_id),
757 &conn->ksnc_peeraddr);
759 ksocknal_tx_prep(conn, tx);
761 /* Ensure the frags we've been given EXACTLY match the number of
762 * bytes we want to send. Many TCP/IP stacks disregard any total
763 * size parameters passed to them and just look at the frags.
765 * We always expect at least 1 mapped fragment containing the
766 * complete ksocknal message header.
768 LASSERT(lnet_iov_nob(tx->tx_niov, &tx->tx_hdr) +
769 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
770 (unsigned int)tx->tx_nob);
771 LASSERT(tx->tx_niov >= 1);
772 LASSERT(tx->tx_resid == tx->tx_nob);
774 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
775 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
777 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
779 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
780 spin_lock_bh(&sched->kss_lock);
782 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
783 /* First packet starts the timeout */
784 conn->ksnc_tx_deadline = ktime_get_seconds() +
786 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
787 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
788 conn->ksnc_tx_bufnob = 0;
789 smp_mb(); /* order with adding to tx_queue */
792 if (msg->ksm_type == KSOCK_MSG_NOOP) {
793 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
794 * on a normal packet so I don't need to send it */
795 LASSERT (msg->ksm_zc_cookies[1] != 0);
796 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
798 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
799 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
802 /* It's a normal packet - can it piggback a noop zc-ack that
803 * has been queued already? */
804 LASSERT (msg->ksm_zc_cookies[1] == 0);
805 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
807 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
808 /* ztx will be released later */
812 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
813 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
816 if (conn->ksnc_tx_ready && /* able to send */
817 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
818 /* +1 ref for scheduler */
819 ksocknal_conn_addref(conn);
820 list_add_tail(&conn->ksnc_tx_list,
821 &sched->kss_tx_conns);
822 conn->ksnc_tx_scheduled = 1;
823 wake_up(&sched->kss_waitq);
826 spin_unlock_bh(&sched->kss_lock);
831 ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni)
833 time64_t now = ktime_get_seconds();
834 struct list_head *tmp;
835 struct ksock_route *route;
837 list_for_each(tmp, &peer_ni->ksnp_routes) {
838 route = list_entry(tmp, struct ksock_route, ksnr_list);
840 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
842 if (route->ksnr_scheduled) /* connections being established */
845 /* all route types connected ? */
846 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
849 if (!(route->ksnr_retry_interval == 0 || /* first attempt */
850 now >= route->ksnr_timeout)) {
852 "Too soon to retry route %pIS (cnted %d, interval %lld, %lld secs later)\n",
854 route->ksnr_connected,
855 route->ksnr_retry_interval,
856 route->ksnr_timeout - now);
867 ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni)
869 struct list_head *tmp;
870 struct ksock_route *route;
872 list_for_each(tmp, &peer_ni->ksnp_routes) {
873 route = list_entry(tmp, struct ksock_route, ksnr_list);
875 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
877 if (route->ksnr_scheduled)
885 ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
886 struct lnet_process_id id)
888 struct ksock_peer_ni *peer_ni;
889 struct ksock_conn *conn;
894 LASSERT (tx->tx_conn == NULL);
896 g_lock = &ksocknal_data.ksnd_global_lock;
898 for (retry = 0;; retry = 1) {
900 peer_ni = ksocknal_find_peer_locked(ni, id);
901 if (peer_ni != NULL) {
902 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) {
903 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
905 /* I've got no routes that need to be
906 * connecting and I do have an actual
908 ksocknal_queue_tx_locked (tx, conn);
915 /* I'll need a write lock... */
918 write_lock_bh(g_lock);
920 peer_ni = ksocknal_find_peer_locked(ni, id);
924 write_unlock_bh(g_lock);
926 if ((id.pid & LNET_PID_USERFLAG) != 0) {
927 CERROR("Refusing to create a connection to "
928 "userspace process %s\n", libcfs_id2str(id));
929 return -EHOSTUNREACH;
933 CERROR("Can't find peer_ni %s\n", libcfs_id2str(id));
934 return -EHOSTUNREACH;
937 rc = ksocknal_add_peer(ni, id,
938 LNET_NIDADDR(id.nid),
939 lnet_acceptor_port());
941 CERROR("Can't add peer_ni %s: %d\n",
942 libcfs_id2str(id), rc);
947 ksocknal_launch_all_connections_locked(peer_ni);
949 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
951 /* Connection exists; queue message on it */
952 ksocknal_queue_tx_locked (tx, conn);
953 write_unlock_bh(g_lock);
957 if (peer_ni->ksnp_accepting > 0 ||
958 ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
959 /* the message is going to be pinned to the peer_ni */
960 tx->tx_deadline = ktime_get_seconds() +
963 /* Queue the message until a connection is established */
964 list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
965 write_unlock_bh(g_lock);
969 write_unlock_bh(g_lock);
971 /* NB Routes may be ignored if connections to them failed recently */
972 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
973 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
974 return (-EHOSTUNREACH);
978 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
980 /* '1' for consistency with code that checks !mpflag to restore */
981 unsigned int mpflag = 1;
982 int type = lntmsg->msg_type;
983 struct lnet_process_id target = lntmsg->msg_target;
984 unsigned int payload_niov = lntmsg->msg_niov;
985 struct bio_vec *payload_kiov = lntmsg->msg_kiov;
986 unsigned int payload_offset = lntmsg->msg_offset;
987 unsigned int payload_nob = lntmsg->msg_len;
992 /* NB 'private' is different depending on what we're sending.
993 * Just ignore it... */
995 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
996 payload_nob, payload_niov, libcfs_id2str(target));
998 LASSERT (payload_nob == 0 || payload_niov > 0);
999 LASSERT (payload_niov <= LNET_MAX_IOV);
1000 LASSERT (!in_interrupt ());
1002 desc_size = offsetof(struct ksock_tx,
1003 tx_payload[payload_niov]);
1005 if (lntmsg->msg_vmflush)
1006 mpflag = memalloc_noreclaim_save();
1008 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
1010 CERROR("Can't allocate tx desc type %d size %d\n",
1012 if (lntmsg->msg_vmflush)
1013 memalloc_noreclaim_restore(mpflag);
1017 tx->tx_conn = NULL; /* set when assigned a conn */
1018 tx->tx_lnetmsg = lntmsg;
1021 tx->tx_kiov = tx->tx_payload;
1022 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
1023 payload_niov, payload_kiov,
1024 payload_offset, payload_nob);
1026 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
1027 tx->tx_zc_capable = 1;
1029 tx->tx_msg.ksm_csum = 0;
1030 tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
1031 tx->tx_msg.ksm_zc_cookies[0] = 0;
1032 tx->tx_msg.ksm_zc_cookies[1] = 0;
1034 /* The first fragment will be set later in pro_pack */
1035 rc = ksocknal_launch_packet(ni, tx, target);
1037 * We can't test lntsmg->msg_vmflush again as lntmsg may
1041 memalloc_noreclaim_restore(mpflag);
1046 lntmsg->msg_health_status = tx->tx_hstatus;
1047 ksocknal_free_tx(tx);
1052 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1054 struct task_struct *task = kthread_run(fn, arg, "%s", name);
1057 return PTR_ERR(task);
1059 atomic_inc(&ksocknal_data.ksnd_nthreads);
1064 ksocknal_thread_fini (void)
1066 if (atomic_dec_and_test(&ksocknal_data.ksnd_nthreads))
1067 wake_up_var(&ksocknal_data.ksnd_nthreads);
1071 ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
1073 static char ksocknal_slop_buffer[4096];
1078 LASSERT(conn->ksnc_proto != NULL);
1080 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1081 /* Remind the socket to ack eagerly... */
1082 ksocknal_lib_eager_ack(conn);
1085 if (nob_to_skip == 0) { /* right at next packet boundary now */
1086 conn->ksnc_rx_started = 0;
1087 smp_mb(); /* racing with timeout thread */
1089 switch (conn->ksnc_proto->pro_version) {
1090 case KSOCK_PROTO_V2:
1091 case KSOCK_PROTO_V3:
1092 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1093 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1094 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
1096 conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
1097 conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
1098 conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
1101 case KSOCK_PROTO_V1:
1102 /* Receiving bare struct lnet_hdr */
1103 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1104 conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr);
1105 conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
1107 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1108 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1109 conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr);
1115 conn->ksnc_rx_niov = 1;
1117 conn->ksnc_rx_kiov = NULL;
1118 conn->ksnc_rx_nkiov = 0;
1119 conn->ksnc_rx_csum = ~0;
1123 /* Set up to skip as much as possible now. If there's more left
1124 * (ran out of iov entries) we'll get called again */
1126 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1127 conn->ksnc_rx_nob_left = nob_to_skip;
1128 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1133 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
1135 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1136 conn->ksnc_rx_iov[niov].iov_len = nob;
1141 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1142 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
1144 conn->ksnc_rx_niov = niov;
1145 conn->ksnc_rx_kiov = NULL;
1146 conn->ksnc_rx_nkiov = 0;
1147 conn->ksnc_rx_nob_wanted = skipped;
1152 ksocknal_process_receive(struct ksock_conn *conn,
1153 struct page **rx_scratch_pgs,
1154 struct kvec *scratch_iov)
1156 struct lnet_hdr *lhdr;
1157 struct lnet_process_id *id;
1160 LASSERT(refcount_read(&conn->ksnc_conn_refcount) > 0);
1162 /* NB: sched lock NOT held */
1163 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1164 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1165 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1166 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1167 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1169 if (conn->ksnc_rx_nob_wanted != 0) {
1170 rc = ksocknal_receive(conn, rx_scratch_pgs,
1174 struct lnet_process_id ksnp_id;
1176 ksnp_id = conn->ksnc_peer->ksnp_id;
1178 LASSERT(rc != -EAGAIN);
1180 CDEBUG(D_NET, "[%p] EOF from %s ip %pISp\n",
1181 conn, libcfs_id2str(ksnp_id),
1182 &conn->ksnc_peeraddr);
1183 else if (!conn->ksnc_closing)
1184 CERROR("[%p] Error %d on read from %s ip %pISp\n",
1185 conn, rc, libcfs_id2str(ksnp_id),
1186 &conn->ksnc_peeraddr);
1188 /* it's not an error if conn is being closed */
1189 ksocknal_close_conn_and_siblings (conn,
1190 (conn->ksnc_closing) ? 0 : rc);
1191 return (rc == 0 ? -ESHUTDOWN : rc);
1194 if (conn->ksnc_rx_nob_wanted != 0) {
1199 switch (conn->ksnc_rx_state) {
1200 case SOCKNAL_RX_KSM_HEADER:
1201 if (conn->ksnc_flip) {
1202 __swab32s(&conn->ksnc_msg.ksm_type);
1203 __swab32s(&conn->ksnc_msg.ksm_csum);
1204 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1205 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1208 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1209 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1210 CERROR("%s: Unknown message type: %x\n",
1211 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1212 conn->ksnc_msg.ksm_type);
1213 ksocknal_new_packet(conn, 0);
1214 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1218 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1219 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1220 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1221 /* NOOP Checksum error */
1222 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1223 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1224 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1225 ksocknal_new_packet(conn, 0);
1226 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1230 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1233 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1235 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1236 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1238 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1239 conn->ksnc_msg.ksm_zc_cookies[1]);
1242 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1243 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1244 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1245 ksocknal_new_packet(conn, 0);
1246 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1251 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1252 ksocknal_new_packet (conn, 0);
1253 return 0; /* NOOP is done and just return */
1256 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1257 conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
1258 conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
1260 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1261 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1262 conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
1264 conn->ksnc_rx_niov = 1;
1265 conn->ksnc_rx_kiov = NULL;
1266 conn->ksnc_rx_nkiov = 0;
1268 goto again; /* read lnet header now */
1270 case SOCKNAL_RX_LNET_HEADER:
1271 /* unpack message header */
1272 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1274 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1275 /* Userspace peer_ni */
1276 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1277 id = &conn->ksnc_peer->ksnp_id;
1279 /* Substitute process ID assigned at connection time */
1280 lhdr->src_pid = cpu_to_le32(id->pid);
1281 lhdr->src_nid = cpu_to_le64(id->nid);
1284 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1285 ksocknal_conn_addref(conn); /* ++ref while parsing */
1287 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1288 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1289 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1291 /* I just received garbage: give up on this conn */
1292 ksocknal_new_packet(conn, 0);
1293 ksocknal_close_conn_and_siblings (conn, rc);
1294 ksocknal_conn_decref(conn);
1298 /* I'm racing with ksocknal_recv() */
1299 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1300 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1302 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1305 /* ksocknal_recv() got called */
1308 case SOCKNAL_RX_LNET_PAYLOAD:
1309 /* payload all received */
1312 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1313 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1314 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1315 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1316 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1317 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1321 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1322 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1324 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1325 id = &conn->ksnc_peer->ksnp_id;
1327 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1328 conn->ksnc_msg.ksm_zc_cookies[0],
1329 *ksocknal_tunables.ksnd_nonblk_zcack ||
1330 le64_to_cpu(lhdr->src_nid) != id->nid);
1333 if (rc && conn->ksnc_lnet_msg)
1334 conn->ksnc_lnet_msg->msg_health_status =
1335 LNET_MSG_STATUS_REMOTE_ERROR;
1336 lnet_finalize(conn->ksnc_lnet_msg, rc);
1339 ksocknal_new_packet(conn, 0);
1340 ksocknal_close_conn_and_siblings (conn, rc);
1345 case SOCKNAL_RX_SLOP:
1346 /* starting new packet? */
1347 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1348 return 0; /* come back later */
1349 goto again; /* try to finish reading slop now */
1357 return (-EINVAL); /* keep gcc happy */
1361 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
1362 int delayed, unsigned int niov,
1363 struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
1366 struct ksock_conn *conn = private;
1367 struct ksock_sched *sched = conn->ksnc_scheduler;
1369 LASSERT (mlen <= rlen);
1370 LASSERT (niov <= LNET_MAX_IOV);
1372 conn->ksnc_lnet_msg = msg;
1373 conn->ksnc_rx_nob_wanted = mlen;
1374 conn->ksnc_rx_nob_left = rlen;
1377 conn->ksnc_rx_nkiov = 0;
1378 conn->ksnc_rx_kiov = NULL;
1379 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1380 conn->ksnc_rx_niov = 0;
1382 conn->ksnc_rx_niov = 0;
1383 conn->ksnc_rx_iov = NULL;
1384 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1385 conn->ksnc_rx_nkiov =
1386 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1387 niov, kiov, offset, mlen);
1391 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1392 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1394 LASSERT (conn->ksnc_rx_scheduled);
1396 spin_lock_bh(&sched->kss_lock);
1398 switch (conn->ksnc_rx_state) {
1399 case SOCKNAL_RX_PARSE_WAIT:
1400 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1401 wake_up(&sched->kss_waitq);
1402 LASSERT(conn->ksnc_rx_ready);
1405 case SOCKNAL_RX_PARSE:
1406 /* scheduler hasn't noticed I'm parsing yet */
1410 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1412 spin_unlock_bh(&sched->kss_lock);
1413 ksocknal_conn_decref(conn);
1418 ksocknal_sched_cansleep(struct ksock_sched *sched)
1422 spin_lock_bh(&sched->kss_lock);
1424 rc = (!ksocknal_data.ksnd_shuttingdown &&
1425 list_empty(&sched->kss_rx_conns) &&
1426 list_empty(&sched->kss_tx_conns));
1428 spin_unlock_bh(&sched->kss_lock);
1432 int ksocknal_scheduler(void *arg)
1434 struct ksock_sched *sched;
1435 struct ksock_conn *conn;
1436 struct ksock_tx *tx;
1438 long id = (long)arg;
1439 struct page **rx_scratch_pgs;
1440 struct kvec *scratch_iov;
1442 sched = ksocknal_data.ksnd_schedulers[KSOCK_THREAD_CPT(id)];
1444 LIBCFS_CPT_ALLOC(rx_scratch_pgs, lnet_cpt_table(), sched->kss_cpt,
1445 sizeof(*rx_scratch_pgs) * LNET_MAX_IOV);
1446 if (!rx_scratch_pgs) {
1447 CERROR("Unable to allocate scratch pages\n");
1451 LIBCFS_CPT_ALLOC(scratch_iov, lnet_cpt_table(), sched->kss_cpt,
1452 sizeof(*scratch_iov) * LNET_MAX_IOV);
1454 CERROR("Unable to allocate scratch iov\n");
1458 rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
1460 CWARN("Can't set CPU partition affinity to %d: %d\n",
1461 sched->kss_cpt, rc);
1464 spin_lock_bh(&sched->kss_lock);
1466 while (!ksocknal_data.ksnd_shuttingdown) {
1467 bool did_something = false;
1469 /* Ensure I progress everything semi-fairly */
1471 if (!list_empty(&sched->kss_rx_conns)) {
1472 conn = list_entry(sched->kss_rx_conns.next,
1473 struct ksock_conn, ksnc_rx_list);
1474 list_del(&conn->ksnc_rx_list);
1476 LASSERT(conn->ksnc_rx_scheduled);
1477 LASSERT(conn->ksnc_rx_ready);
1479 /* clear rx_ready in case receive isn't complete.
1480 * Do it BEFORE we call process_recv, since
1481 * data_ready can set it any time after we release
1483 conn->ksnc_rx_ready = 0;
1484 spin_unlock_bh(&sched->kss_lock);
1486 rc = ksocknal_process_receive(conn, rx_scratch_pgs,
1489 spin_lock_bh(&sched->kss_lock);
1491 /* I'm the only one that can clear this flag */
1492 LASSERT(conn->ksnc_rx_scheduled);
1494 /* Did process_receive get everything it wanted? */
1496 conn->ksnc_rx_ready = 1;
1498 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1499 /* Conn blocked waiting for ksocknal_recv()
1500 * I change its state (under lock) to signal
1501 * it can be rescheduled */
1502 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1503 } else if (conn->ksnc_rx_ready) {
1504 /* reschedule for rx */
1505 list_add_tail(&conn->ksnc_rx_list,
1506 &sched->kss_rx_conns);
1508 conn->ksnc_rx_scheduled = 0;
1510 ksocknal_conn_decref(conn);
1513 did_something = true;
1516 if (!list_empty(&sched->kss_tx_conns)) {
1519 list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
1521 conn = list_entry(sched->kss_tx_conns.next,
1522 struct ksock_conn, ksnc_tx_list);
1523 list_del(&conn->ksnc_tx_list);
1525 LASSERT(conn->ksnc_tx_scheduled);
1526 LASSERT(conn->ksnc_tx_ready);
1527 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1529 tx = list_entry(conn->ksnc_tx_queue.next,
1530 struct ksock_tx, tx_list);
1532 if (conn->ksnc_tx_carrier == tx)
1533 ksocknal_next_tx_carrier(conn);
1535 /* dequeue now so empty list => more to send */
1536 list_del(&tx->tx_list);
1538 /* Clear tx_ready in case send isn't complete. Do
1539 * it BEFORE we call process_transmit, since
1540 * write_space can set it any time after we release
1542 conn->ksnc_tx_ready = 0;
1543 spin_unlock_bh(&sched->kss_lock);
1545 if (!list_empty(&zlist)) {
1546 /* free zombie noop txs, it's fast because
1547 * noop txs are just put in freelist */
1548 ksocknal_txlist_done(NULL, &zlist, 0);
1551 rc = ksocknal_process_transmit(conn, tx, scratch_iov);
1553 if (rc == -ENOMEM || rc == -EAGAIN) {
1554 /* Incomplete send: replace tx on HEAD of tx_queue */
1555 spin_lock_bh(&sched->kss_lock);
1556 list_add(&tx->tx_list,
1557 &conn->ksnc_tx_queue);
1559 /* Complete send; tx -ref */
1560 ksocknal_tx_decref(tx);
1562 spin_lock_bh(&sched->kss_lock);
1563 /* assume space for more */
1564 conn->ksnc_tx_ready = 1;
1567 if (rc == -ENOMEM) {
1568 /* Do nothing; after a short timeout, this
1569 * conn will be reposted on kss_tx_conns. */
1570 } else if (conn->ksnc_tx_ready &&
1571 !list_empty(&conn->ksnc_tx_queue)) {
1572 /* reschedule for tx */
1573 list_add_tail(&conn->ksnc_tx_list,
1574 &sched->kss_tx_conns);
1576 conn->ksnc_tx_scheduled = 0;
1578 ksocknal_conn_decref(conn);
1581 did_something = true;
1583 if (!did_something || /* nothing to do */
1584 need_resched()) { /* hogging CPU? */
1585 spin_unlock_bh(&sched->kss_lock);
1587 if (!did_something) { /* wait for something to do */
1588 rc = wait_event_interruptible_exclusive(
1590 !ksocknal_sched_cansleep(sched));
1596 spin_lock_bh(&sched->kss_lock);
1600 spin_unlock_bh(&sched->kss_lock);
1601 CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
1602 CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
1603 ksocknal_thread_fini();
1608 * Add connection to kss_rx_conns of scheduler
1609 * and wakeup the scheduler.
1611 void ksocknal_read_callback(struct ksock_conn *conn)
1613 struct ksock_sched *sched;
1616 sched = conn->ksnc_scheduler;
1618 spin_lock_bh(&sched->kss_lock);
1620 conn->ksnc_rx_ready = 1;
1622 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1623 list_add_tail(&conn->ksnc_rx_list,
1624 &sched->kss_rx_conns);
1625 conn->ksnc_rx_scheduled = 1;
1626 /* extra ref for scheduler */
1627 ksocknal_conn_addref(conn);
1629 wake_up (&sched->kss_waitq);
1631 spin_unlock_bh(&sched->kss_lock);
1637 * Add connection to kss_tx_conns of scheduler
1638 * and wakeup the scheduler.
1640 void ksocknal_write_callback(struct ksock_conn *conn)
1642 struct ksock_sched *sched;
1645 sched = conn->ksnc_scheduler;
1647 spin_lock_bh(&sched->kss_lock);
1649 conn->ksnc_tx_ready = 1;
1651 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1652 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1653 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1654 conn->ksnc_tx_scheduled = 1;
1655 /* extra ref for scheduler */
1656 ksocknal_conn_addref(conn);
1658 wake_up(&sched->kss_waitq);
1661 spin_unlock_bh(&sched->kss_lock);
1666 static const struct ksock_proto *
1667 ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
1671 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1672 version = hello->kshm_version;
1673 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1674 version = __swab32(hello->kshm_version);
1677 #if SOCKNAL_VERSION_DEBUG
1678 if (*ksocknal_tunables.ksnd_protocol == 1)
1681 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1682 version == KSOCK_PROTO_V3)
1685 if (version == KSOCK_PROTO_V2)
1686 return &ksocknal_protocol_v2x;
1688 if (version == KSOCK_PROTO_V3)
1689 return &ksocknal_protocol_v3x;
1694 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1695 struct lnet_magicversion *hmv;
1697 BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
1698 offsetof(struct ksock_hello_msg, kshm_src_nid));
1700 hmv = (struct lnet_magicversion *)hello;
1702 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1703 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1704 return &ksocknal_protocol_v1x;
1711 ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1712 lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
1714 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1715 struct ksock_net *net = (struct ksock_net *)ni->ni_data;
1717 LASSERT(hello->kshm_nips <= LNET_INTERFACES_NUM);
1719 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1720 LASSERT(conn->ksnc_proto != NULL);
1722 hello->kshm_src_nid = ni->ni_nid;
1723 hello->kshm_dst_nid = peer_nid;
1724 hello->kshm_src_pid = the_lnet.ln_pid;
1726 hello->kshm_src_incarnation = net->ksnn_incarnation;
1727 hello->kshm_ctype = conn->ksnc_type;
1729 return conn->ksnc_proto->pro_send_hello(conn, hello);
1733 ksocknal_invert_type(int type)
1737 case SOCKLND_CONN_ANY:
1738 case SOCKLND_CONN_CONTROL:
1740 case SOCKLND_CONN_BULK_IN:
1741 return SOCKLND_CONN_BULK_OUT;
1742 case SOCKLND_CONN_BULK_OUT:
1743 return SOCKLND_CONN_BULK_IN;
1745 return (SOCKLND_CONN_NONE);
1750 ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1751 struct ksock_hello_msg *hello,
1752 struct lnet_process_id *peerid,
1755 /* Return < 0 fatal error
1757 * EALREADY lost connection race
1758 * EPROTO protocol version mismatch
1760 struct socket *sock = conn->ksnc_sock;
1761 int active = (conn->ksnc_proto != NULL);
1765 const struct ksock_proto *proto;
1766 struct lnet_process_id recv_id;
1768 /* socket type set on active connections - not set on passive */
1769 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1771 timeout = active ? ksocknal_timeout() :
1772 lnet_acceptor_timeout();
1774 rc = lnet_sock_read(sock, &hello->kshm_magic,
1775 sizeof(hello->kshm_magic), timeout);
1777 CERROR("Error %d reading HELLO from %pIS\n",
1778 rc, &conn->ksnc_peeraddr);
1783 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1784 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1785 hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1786 /* Unexpected magic! */
1787 CERROR("Bad magic(1) %#08x (%#08x expected) from %pIS\n",
1788 __cpu_to_le32 (hello->kshm_magic),
1789 LNET_PROTO_TCP_MAGIC, &conn->ksnc_peeraddr);
1793 rc = lnet_sock_read(sock, &hello->kshm_version,
1794 sizeof(hello->kshm_version), timeout);
1796 CERROR("Error %d reading HELLO from %pIS\n",
1797 rc, &conn->ksnc_peeraddr);
1802 proto = ksocknal_parse_proto_version(hello);
1803 if (proto == NULL) {
1805 /* unknown protocol from peer_ni, tell peer_ni my protocol */
1806 conn->ksnc_proto = &ksocknal_protocol_v3x;
1807 #if SOCKNAL_VERSION_DEBUG
1808 if (*ksocknal_tunables.ksnd_protocol == 2)
1809 conn->ksnc_proto = &ksocknal_protocol_v2x;
1810 else if (*ksocknal_tunables.ksnd_protocol == 1)
1811 conn->ksnc_proto = &ksocknal_protocol_v1x;
1813 hello->kshm_nips = 0;
1814 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1817 CERROR("Unknown protocol version (%d.x expected) from %pIS\n",
1818 conn->ksnc_proto->pro_version, &conn->ksnc_peeraddr);
1823 proto_match = (conn->ksnc_proto == proto);
1824 conn->ksnc_proto = proto;
1826 /* receive the rest of hello message anyway */
1827 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1829 CERROR("Error %d reading or checking hello from from %pIS\n",
1830 rc, &conn->ksnc_peeraddr);
1835 *incarnation = hello->kshm_src_incarnation;
1837 if (hello->kshm_src_nid == LNET_NID_ANY) {
1838 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pIS\n",
1839 &conn->ksnc_peeraddr);
1844 rpc_get_port((struct sockaddr *)&conn->ksnc_peeraddr) >
1845 LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1846 /* Userspace NAL assigns peer_ni process ID from socket */
1847 recv_id.pid = rpc_get_port((struct sockaddr *)
1848 &conn->ksnc_peeraddr) |
1850 LASSERT(conn->ksnc_peeraddr.ss_family == AF_INET);
1851 recv_id.nid = LNET_MKNID(
1852 LNET_NIDNET(ni->ni_nid),
1853 ntohl(((struct sockaddr_in *)
1854 &conn->ksnc_peeraddr)->sin_addr.s_addr));
1856 recv_id.nid = hello->kshm_src_nid;
1857 recv_id.pid = hello->kshm_src_pid;
1863 /* peer_ni determines type */
1864 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1865 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1866 CERROR("Unexpected type %d from %s ip %pIS\n",
1867 hello->kshm_ctype, libcfs_id2str(*peerid),
1868 &conn->ksnc_peeraddr);
1874 if (peerid->pid != recv_id.pid ||
1875 peerid->nid != recv_id.nid) {
1876 LCONSOLE_ERROR_MSG(0x130,
1877 "Connected successfully to %s on host %pIS, but they claimed they were %s; please check your Lustre configuration.\n",
1878 libcfs_id2str(*peerid),
1879 &conn->ksnc_peeraddr,
1880 libcfs_id2str(recv_id));
1884 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1885 /* Possible protocol mismatch or I lost the connection race */
1886 return proto_match ? EALREADY : EPROTO;
1889 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1890 CERROR("Mismatched types: me %d, %s ip %pIS %d\n",
1891 conn->ksnc_type, libcfs_id2str(*peerid),
1892 &conn->ksnc_peeraddr,
1900 ksocknal_connect(struct ksock_route *route)
1903 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
1906 struct socket *sock;
1908 bool retry_later = false;
1911 deadline = ktime_get_seconds() + ksocknal_timeout();
1913 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1915 LASSERT(route->ksnr_scheduled);
1916 LASSERT(!route->ksnr_connecting);
1918 route->ksnr_connecting = 1;
1921 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1923 /* stop connecting if peer_ni/route got closed under me, or
1924 * route got connected while queued */
1925 if (peer_ni->ksnp_closing || route->ksnr_deleted ||
1927 retry_later = false;
1931 /* reschedule if peer_ni is connecting to me */
1932 if (peer_ni->ksnp_accepting > 0) {
1934 "peer_ni %s(%d) already connecting to me, retry later.\n",
1935 libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
1939 if (retry_later) /* needs reschedule */
1942 if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) {
1943 type = SOCKLND_CONN_ANY;
1944 } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
1945 type = SOCKLND_CONN_CONTROL;
1946 } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0) {
1947 type = SOCKLND_CONN_BULK_IN;
1949 LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
1950 type = SOCKLND_CONN_BULK_OUT;
1953 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1955 if (ktime_get_seconds() >= deadline) {
1957 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1963 sock = lnet_connect(peer_ni->ksnp_id.nid,
1964 route->ksnr_myiface,
1965 (struct sockaddr *)&route->ksnr_addr,
1966 peer_ni->ksnp_ni->ni_net_ns);
1972 rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
1974 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1980 /* A +ve RC means I have to retry because I lost the connection
1981 * race or I have to renegotiate protocol version */
1982 retry_later = (rc != 0);
1984 CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
1985 libcfs_nid2str(peer_ni->ksnp_id.nid));
1987 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1990 route->ksnr_scheduled = 0;
1991 route->ksnr_connecting = 0;
1994 /* re-queue for attention; this frees me up to handle
1995 * the peer_ni's incoming connection request
1998 if (rc == EALREADY ||
1999 (rc == 0 && peer_ni->ksnp_accepting > 0)) {
2000 /* We want to introduce a delay before next
2001 * attempt to connect if we lost conn race, but
2002 * the race is resolved quickly usually, so
2003 * min_reconnectms should be good heuristic
2005 route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000;
2006 route->ksnr_timeout = ktime_get_seconds() +
2007 route->ksnr_retry_interval;
2010 ksocknal_launch_connection_locked(route);
2013 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2017 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2019 route->ksnr_scheduled = 0;
2020 route->ksnr_connecting = 0;
2022 /* This is a retry rather than a new connection */
2023 route->ksnr_retry_interval *= 2;
2024 route->ksnr_retry_interval =
2025 max_t(time64_t, route->ksnr_retry_interval,
2026 *ksocknal_tunables.ksnd_min_reconnectms / 1000);
2027 route->ksnr_retry_interval =
2028 min_t(time64_t, route->ksnr_retry_interval,
2029 *ksocknal_tunables.ksnd_max_reconnectms / 1000);
2031 LASSERT(route->ksnr_retry_interval);
2032 route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval;
2034 if (!list_empty(&peer_ni->ksnp_tx_queue) &&
2035 peer_ni->ksnp_accepting == 0 &&
2036 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
2037 struct ksock_conn *conn;
2039 /* ksnp_tx_queue is queued on a conn on successful
2040 * connection for V1.x and V2.x
2042 if (!list_empty(&peer_ni->ksnp_conns)) {
2043 conn = list_entry(peer_ni->ksnp_conns.next,
2044 struct ksock_conn, ksnc_list);
2045 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
2048 /* take all the blocked packets while I've got the lock and
2051 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
2054 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2056 ksocknal_peer_failed(peer_ni);
2057 ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, rc);
2062 * check whether we need to create more connds.
2063 * It will try to create new thread if it's necessary, @timeout can
2064 * be updated if failed to create, so caller wouldn't keep try while
2065 * running out of resource.
2068 ksocknal_connd_check_start(time64_t sec, long *timeout)
2072 int total = ksocknal_data.ksnd_connd_starting +
2073 ksocknal_data.ksnd_connd_running;
2075 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2076 /* still in initializing */
2080 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2081 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2082 /* can't create more connd, or still have enough
2083 * threads to handle more connecting */
2087 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2088 /* no pending connecting request */
2092 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2093 /* may run out of resource, retry later */
2094 *timeout = cfs_time_seconds(1);
2098 if (ksocknal_data.ksnd_connd_starting > 0) {
2099 /* serialize starting to avoid flood */
2103 ksocknal_data.ksnd_connd_starting_stamp = sec;
2104 ksocknal_data.ksnd_connd_starting++;
2105 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2107 /* NB: total is the next id */
2108 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2109 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2111 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2116 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2117 ksocknal_data.ksnd_connd_starting--;
2118 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2124 * check whether current thread can exit, it will return 1 if there are too
2125 * many threads and no creating in past 120 seconds.
2126 * Also, this function may update @timeout to make caller come back
2127 * again to recheck these conditions.
2130 ksocknal_connd_check_stop(time64_t sec, long *timeout)
2134 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2135 /* still in initializing */
2139 if (ksocknal_data.ksnd_connd_starting > 0) {
2140 /* in progress of starting new thread */
2144 if (ksocknal_data.ksnd_connd_running <=
2145 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2149 /* created thread in past 120 seconds? */
2150 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2151 SOCKNAL_CONND_TIMEOUT - sec);
2153 *timeout = (val > 0) ? cfs_time_seconds(val) :
2154 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2158 /* no creating in past 120 seconds */
2160 return ksocknal_data.ksnd_connd_running >
2161 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2164 /* Go through connd_routes queue looking for a route that we can process
2165 * right now, @timeout_p can be updated if we need to come back later */
2166 static struct ksock_route *
2167 ksocknal_connd_get_route_locked(signed long *timeout_p)
2169 time64_t now = ktime_get_seconds();
2170 struct ksock_route *route;
2172 /* connd_routes can contain both pending and ordinary routes */
2173 list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2176 if (route->ksnr_retry_interval == 0 ||
2177 now >= route->ksnr_timeout)
2180 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2181 *timeout_p > cfs_time_seconds(route->ksnr_timeout - now))
2182 *timeout_p = cfs_time_seconds(route->ksnr_timeout - now);
2189 ksocknal_connd(void *arg)
2191 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2192 struct ksock_connreq *cr;
2193 wait_queue_entry_t wait;
2198 spin_lock_bh(connd_lock);
2200 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2201 ksocknal_data.ksnd_connd_starting--;
2202 ksocknal_data.ksnd_connd_running++;
2204 while (!ksocknal_data.ksnd_shuttingdown) {
2205 struct ksock_route *route = NULL;
2206 time64_t sec = ktime_get_real_seconds();
2207 long timeout = MAX_SCHEDULE_TIMEOUT;
2208 bool dropped_lock = false;
2210 if (ksocknal_connd_check_stop(sec, &timeout)) {
2211 /* wakeup another one to check stop */
2212 wake_up(&ksocknal_data.ksnd_connd_waitq);
2216 if (ksocknal_connd_check_start(sec, &timeout)) {
2217 /* created new thread */
2218 dropped_lock = true;
2221 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2222 /* Connection accepted by the listener */
2223 cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
2224 struct ksock_connreq, ksncr_list);
2226 list_del(&cr->ksncr_list);
2227 spin_unlock_bh(connd_lock);
2228 dropped_lock = true;
2230 ksocknal_create_conn(cr->ksncr_ni, NULL,
2231 cr->ksncr_sock, SOCKLND_CONN_NONE);
2232 lnet_ni_decref(cr->ksncr_ni);
2233 LIBCFS_FREE(cr, sizeof(*cr));
2235 spin_lock_bh(connd_lock);
2238 /* Only handle an outgoing connection request if there
2239 * is a thread left to handle incoming connections and
2242 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2243 ksocknal_data.ksnd_connd_running) {
2244 route = ksocknal_connd_get_route_locked(&timeout);
2246 if (route != NULL) {
2247 list_del(&route->ksnr_connd_list);
2248 ksocknal_data.ksnd_connd_connecting++;
2249 spin_unlock_bh(connd_lock);
2250 dropped_lock = true;
2252 if (ksocknal_connect(route)) {
2253 /* consecutive retry */
2254 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2255 CWARN("massive consecutive re-connecting to %pIS\n",
2263 ksocknal_route_decref(route);
2265 spin_lock_bh(connd_lock);
2266 ksocknal_data.ksnd_connd_connecting--;
2270 if (!need_resched())
2272 spin_unlock_bh(connd_lock);
2274 spin_lock_bh(connd_lock);
2278 /* Nothing to do for 'timeout' */
2279 set_current_state(TASK_INTERRUPTIBLE);
2280 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
2282 spin_unlock_bh(connd_lock);
2284 schedule_timeout(timeout);
2286 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2287 spin_lock_bh(connd_lock);
2289 ksocknal_data.ksnd_connd_running--;
2290 spin_unlock_bh(connd_lock);
2292 ksocknal_thread_fini();
2296 static struct ksock_conn *
2297 ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
2299 /* We're called with a shared lock on ksnd_global_lock */
2300 struct ksock_conn *conn;
2301 struct list_head *ctmp;
2302 struct ksock_tx *tx;
2304 list_for_each(ctmp, &peer_ni->ksnp_conns) {
2307 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
2309 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2310 LASSERT (!conn->ksnc_closing);
2312 error = conn->ksnc_sock->sk->sk_err;
2314 ksocknal_conn_addref(conn);
2318 CNETERR("A connection with %s (%pISp) was reset; it may have rebooted.\n",
2319 libcfs_id2str(peer_ni->ksnp_id),
2320 &conn->ksnc_peeraddr);
2323 CNETERR("A connection with %s (%pISp) timed out; the network or node may be down.\n",
2324 libcfs_id2str(peer_ni->ksnp_id),
2325 &conn->ksnc_peeraddr);
2328 CNETERR("An unexpected network error %d occurred with %s (%pISp\n",
2330 libcfs_id2str(peer_ni->ksnp_id),
2331 &conn->ksnc_peeraddr);
2338 if (conn->ksnc_rx_started &&
2339 ktime_get_seconds() >= conn->ksnc_rx_deadline) {
2340 /* Timed out incomplete incoming message */
2341 ksocknal_conn_addref(conn);
2342 CNETERR("Timeout receiving from %s (%pISp), state %d wanted %d left %d\n",
2343 libcfs_id2str(peer_ni->ksnp_id),
2344 &conn->ksnc_peeraddr,
2345 conn->ksnc_rx_state,
2346 conn->ksnc_rx_nob_wanted,
2347 conn->ksnc_rx_nob_left);
2351 if ((!list_empty(&conn->ksnc_tx_queue) ||
2352 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2353 ktime_get_seconds() >= conn->ksnc_tx_deadline) {
2354 /* Timed out messages queued for sending or
2355 * buffered in the socket's send buffer
2357 ksocknal_conn_addref(conn);
2358 list_for_each_entry(tx, &conn->ksnc_tx_queue,
2361 LNET_MSG_STATUS_LOCAL_TIMEOUT;
2362 CNETERR("Timeout sending data to %s (%pISp) the network or that node may be down.\n",
2363 libcfs_id2str(peer_ni->ksnp_id),
2364 &conn->ksnc_peeraddr);
2373 ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
2375 struct ksock_tx *tx;
2376 LIST_HEAD(stale_txs);
2378 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2380 while (!list_empty(&peer_ni->ksnp_tx_queue)) {
2381 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2382 struct ksock_tx, tx_list);
2384 if (ktime_get_seconds() < tx->tx_deadline)
2387 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2389 list_move_tail(&tx->tx_list, &stale_txs);
2392 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2394 ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, -ETIMEDOUT);
2398 ksocknal_send_keepalive_locked(struct ksock_peer_ni *peer_ni)
2399 __must_hold(&ksocknal_data.ksnd_global_lock)
2401 struct ksock_sched *sched;
2402 struct ksock_conn *conn;
2403 struct ksock_tx *tx;
2405 /* last_alive will be updated by create_conn */
2406 if (list_empty(&peer_ni->ksnp_conns))
2409 if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x)
2412 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2413 ktime_get_seconds() < peer_ni->ksnp_last_alive +
2414 *ksocknal_tunables.ksnd_keepalive)
2417 if (ktime_get_seconds() < peer_ni->ksnp_send_keepalive)
2420 /* retry 10 secs later, so we wouldn't put pressure
2421 * on this peer_ni if we failed to send keepalive this time */
2422 peer_ni->ksnp_send_keepalive = ktime_get_seconds() + 10;
2424 conn = ksocknal_find_conn_locked(peer_ni, NULL, 1);
2426 sched = conn->ksnc_scheduler;
2428 spin_lock_bh(&sched->kss_lock);
2429 if (!list_empty(&conn->ksnc_tx_queue)) {
2430 spin_unlock_bh(&sched->kss_lock);
2431 /* there is an queued ACK, don't need keepalive */
2435 spin_unlock_bh(&sched->kss_lock);
2438 read_unlock(&ksocknal_data.ksnd_global_lock);
2440 /* cookie = 1 is reserved for keepalive PING */
2441 tx = ksocknal_alloc_tx_noop(1, 1);
2443 read_lock(&ksocknal_data.ksnd_global_lock);
2447 if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id) == 0) {
2448 read_lock(&ksocknal_data.ksnd_global_lock);
2452 ksocknal_free_tx(tx);
2453 read_lock(&ksocknal_data.ksnd_global_lock);
2460 ksocknal_check_peer_timeouts(int idx)
2462 struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
2463 struct ksock_peer_ni *peer_ni;
2464 struct ksock_conn *conn;
2465 struct ksock_tx *tx;
2468 /* NB. We expect to have a look at all the peers and not find any
2469 * connections to time out, so we just use a shared lock while we
2472 read_lock(&ksocknal_data.ksnd_global_lock);
2474 hlist_for_each_entry(peer_ni, peers, ksnp_list) {
2475 struct ksock_tx *tx_stale;
2476 time64_t deadline = 0;
2480 if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
2481 read_unlock(&ksocknal_data.ksnd_global_lock);
2485 conn = ksocknal_find_timed_out_conn(peer_ni);
2488 read_unlock(&ksocknal_data.ksnd_global_lock);
2490 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
2492 /* NB we won't find this one again, but we can't
2493 * just proceed with the next peer_ni, since we dropped
2494 * ksnd_global_lock and it might be dead already!
2496 ksocknal_conn_decref(conn);
2500 /* we can't process stale txs right here because we're
2501 * holding only shared lock
2503 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
2504 struct ksock_tx *tx;
2506 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2507 struct ksock_tx, tx_list);
2508 if (ktime_get_seconds() >= tx->tx_deadline) {
2509 ksocknal_peer_addref(peer_ni);
2510 read_unlock(&ksocknal_data.ksnd_global_lock);
2512 ksocknal_flush_stale_txs(peer_ni);
2514 ksocknal_peer_decref(peer_ni);
2519 if (list_empty(&peer_ni->ksnp_zc_req_list))
2523 spin_lock(&peer_ni->ksnp_lock);
2524 list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
2525 if (ktime_get_seconds() < tx->tx_deadline)
2527 /* ignore the TX if connection is being closed */
2528 if (tx->tx_conn->ksnc_closing)
2531 if (tx_stale == NULL)
2535 if (tx_stale == NULL) {
2536 spin_unlock(&peer_ni->ksnp_lock);
2540 deadline = tx_stale->tx_deadline;
2541 resid = tx_stale->tx_resid;
2542 conn = tx_stale->tx_conn;
2543 ksocknal_conn_addref(conn);
2545 spin_unlock(&peer_ni->ksnp_lock);
2546 read_unlock(&ksocknal_data.ksnd_global_lock);
2548 CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the "
2549 "oldest(%p) timed out %lld secs ago, "
2550 "resid: %d, wmem: %d\n",
2551 n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale,
2552 ktime_get_seconds() - deadline,
2553 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2555 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2556 ksocknal_conn_decref(conn);
2560 read_unlock(&ksocknal_data.ksnd_global_lock);
2563 int ksocknal_reaper(void *arg)
2565 wait_queue_entry_t wait;
2566 struct ksock_conn *conn;
2567 struct ksock_sched *sched;
2568 LIST_HEAD(enomem_conns);
2573 time64_t deadline = ktime_get_seconds();
2577 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2579 while (!ksocknal_data.ksnd_shuttingdown) {
2580 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2581 conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
2582 struct ksock_conn, ksnc_list);
2583 list_del(&conn->ksnc_list);
2585 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2587 ksocknal_terminate_conn(conn);
2588 ksocknal_conn_decref(conn);
2590 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2594 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2595 conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
2596 struct ksock_conn, ksnc_list);
2597 list_del(&conn->ksnc_list);
2599 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2601 ksocknal_destroy_conn(conn);
2603 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2607 list_splice_init(&ksocknal_data.ksnd_enomem_conns,
2610 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2612 /* reschedule all the connections that stalled with ENOMEM... */
2614 while (!list_empty(&enomem_conns)) {
2615 conn = list_entry(enomem_conns.next,
2616 struct ksock_conn, ksnc_tx_list);
2617 list_del(&conn->ksnc_tx_list);
2619 sched = conn->ksnc_scheduler;
2621 spin_lock_bh(&sched->kss_lock);
2623 LASSERT(conn->ksnc_tx_scheduled);
2624 conn->ksnc_tx_ready = 1;
2625 list_add_tail(&conn->ksnc_tx_list,
2626 &sched->kss_tx_conns);
2627 wake_up(&sched->kss_waitq);
2629 spin_unlock_bh(&sched->kss_lock);
2633 /* careful with the jiffy wrap... */
2634 while ((timeout = deadline - ktime_get_seconds()) <= 0) {
2637 int chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
2638 unsigned int lnd_timeout;
2640 /* Time to check for timeouts on a few more peers: I
2641 * do checks every 'p' seconds on a proportion of the
2642 * peer_ni table and I need to check every connection
2643 * 'n' times within a timeout interval, to ensure I
2644 * detect a timeout on any connection within (n+1)/n
2645 * times the timeout interval.
2648 lnd_timeout = ksocknal_timeout();
2649 if (lnd_timeout > n * p)
2650 chunk = (chunk * n * p) / lnd_timeout;
2654 for (i = 0; i < chunk; i++) {
2655 ksocknal_check_peer_timeouts(peer_index);
2656 peer_index = (peer_index + 1) %
2657 HASH_SIZE(ksocknal_data.ksnd_peers);
2663 if (nenomem_conns != 0) {
2664 /* Reduce my timeout if I rescheduled ENOMEM conns.
2665 * This also prevents me getting woken immediately
2666 * if any go back on my enomem list. */
2667 timeout = SOCKNAL_ENOMEM_RETRY;
2669 ksocknal_data.ksnd_reaper_waketime = ktime_get_seconds() +
2672 set_current_state(TASK_INTERRUPTIBLE);
2673 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2675 if (!ksocknal_data.ksnd_shuttingdown &&
2676 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2677 list_empty(&ksocknal_data.ksnd_zombie_conns))
2678 schedule_timeout(cfs_time_seconds(timeout));
2680 set_current_state(TASK_RUNNING);
2681 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2683 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2686 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2688 ksocknal_thread_fini();