2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Lustre, https://wiki.whamcloud.com/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <libcfs/linux/linux-mem.h>
31 ksocknal_alloc_tx(int type, int size)
33 struct ksock_tx *tx = NULL;
35 if (type == KSOCK_MSG_NOOP) {
36 LASSERT(size == KSOCK_NOOP_TX_SIZE);
38 /* searching for a noop tx in free list */
39 spin_lock(&ksocknal_data.ksnd_tx_lock);
41 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
42 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
43 struct ksock_tx, tx_list);
44 LASSERT(tx->tx_desc_size == size);
45 list_del(&tx->tx_list);
48 spin_unlock(&ksocknal_data.ksnd_tx_lock);
52 LIBCFS_ALLOC(tx, size);
57 atomic_set(&tx->tx_refcount, 1);
58 tx->tx_zc_aborted = 0;
59 tx->tx_zc_capable = 0;
60 tx->tx_zc_checked = 0;
61 tx->tx_hstatus = LNET_MSG_STATUS_OK;
62 tx->tx_desc_size = size;
64 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
70 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
74 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
76 CERROR("Can't allocate noop tx desc\n");
81 tx->tx_lnetmsg = NULL;
84 tx->tx_iov = tx->tx_frags.virt.iov;
86 tx->tx_nonblk = nonblk;
88 tx->tx_msg.ksm_csum = 0;
89 tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
90 tx->tx_msg.ksm_zc_cookies[0] = 0;
91 tx->tx_msg.ksm_zc_cookies[1] = cookie;
98 ksocknal_free_tx(struct ksock_tx *tx)
100 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
102 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
104 spin_lock(&ksocknal_data.ksnd_tx_lock);
106 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
108 spin_unlock(&ksocknal_data.ksnd_tx_lock);
110 LIBCFS_FREE(tx, tx->tx_desc_size);
115 ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx,
116 struct kvec *scratch_iov)
118 struct kvec *iov = tx->tx_iov;
122 LASSERT(tx->tx_niov > 0);
124 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
125 rc = ksocknal_lib_send_iov(conn, tx, scratch_iov);
127 if (rc <= 0) /* sent nothing? */
131 LASSERT(nob <= tx->tx_resid);
136 LASSERT(tx->tx_niov > 0);
138 if (nob < (int) iov->iov_len) {
139 iov->iov_base += nob;
153 ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
154 struct kvec *scratch_iov)
156 struct bio_vec *kiov = tx->tx_kiov;
160 LASSERT(tx->tx_niov == 0);
161 LASSERT(tx->tx_nkiov > 0);
163 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
164 rc = ksocknal_lib_send_kiov(conn, tx, scratch_iov);
166 if (rc <= 0) /* sent nothing? */
170 LASSERT(nob <= tx->tx_resid);
175 LASSERT(tx->tx_nkiov > 0);
177 if (nob < (int)kiov->bv_len) {
178 kiov->bv_offset += nob;
183 nob -= (int)kiov->bv_len;
184 tx->tx_kiov = ++kiov;
192 ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
193 struct kvec *scratch_iov)
198 if (ksocknal_data.ksnd_stall_tx != 0)
199 schedule_timeout_uninterruptible(
200 cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
202 LASSERT(tx->tx_resid != 0);
204 rc = ksocknal_connsock_addref(conn);
206 LASSERT(conn->ksnc_closing);
211 if (ksocknal_data.ksnd_enomem_tx > 0) {
213 ksocknal_data.ksnd_enomem_tx--;
215 } else if (tx->tx_niov != 0) {
216 rc = ksocknal_send_iov(conn, tx, scratch_iov);
218 rc = ksocknal_send_kiov(conn, tx, scratch_iov);
221 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
222 if (rc > 0) /* sent something? */
223 conn->ksnc_tx_bufnob += rc; /* account it */
225 if (bufnob < conn->ksnc_tx_bufnob) {
226 /* allocated send buffer bytes < computed; infer
227 * something got ACKed */
228 conn->ksnc_tx_deadline = ktime_get_seconds() +
229 lnet_get_lnd_timeout();
230 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
231 conn->ksnc_tx_bufnob = bufnob;
235 if (rc <= 0) { /* Didn't write anything? */
236 /* some stacks return 0 instead of -EAGAIN */
240 /* Check if EAGAIN is due to memory pressure */
241 if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
247 /* socket's wmem_queued now includes 'rc' bytes */
248 atomic_sub (rc, &conn->ksnc_tx_nob);
251 } while (tx->tx_resid != 0);
253 ksocknal_connsock_decref(conn);
258 ksocknal_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov)
260 struct kvec *iov = conn->ksnc_rx_iov;
264 LASSERT(conn->ksnc_rx_niov > 0);
266 /* Never touch conn->ksnc_rx_iov or change connection
267 * status inside ksocknal_lib_recv_iov */
268 rc = ksocknal_lib_recv_iov(conn, scratchiov);
273 /* received something... */
276 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
277 conn->ksnc_rx_deadline = ktime_get_seconds() +
278 lnet_get_lnd_timeout();
279 smp_mb(); /* order with setting rx_started */
280 conn->ksnc_rx_started = 1;
282 conn->ksnc_rx_nob_wanted -= nob;
283 conn->ksnc_rx_nob_left -= nob;
286 LASSERT(conn->ksnc_rx_niov > 0);
288 if (nob < (int)iov->iov_len) {
290 iov->iov_base += nob;
295 conn->ksnc_rx_iov = ++iov;
296 conn->ksnc_rx_niov--;
303 ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
304 struct kvec *scratch_iov)
306 struct bio_vec *kiov = conn->ksnc_rx_kiov;
309 LASSERT(conn->ksnc_rx_nkiov > 0);
311 /* Never touch conn->ksnc_rx_kiov or change connection
312 * status inside ksocknal_lib_recv_iov */
313 rc = ksocknal_lib_recv_kiov(conn, rx_scratch_pgs, scratch_iov);
318 /* received something... */
321 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
322 conn->ksnc_rx_deadline = ktime_get_seconds() +
323 lnet_get_lnd_timeout();
324 smp_mb(); /* order with setting rx_started */
325 conn->ksnc_rx_started = 1;
327 conn->ksnc_rx_nob_wanted -= nob;
328 conn->ksnc_rx_nob_left -= nob;
331 LASSERT(conn->ksnc_rx_nkiov > 0);
333 if (nob < (int) kiov->bv_len) {
334 kiov->bv_offset += nob;
340 conn->ksnc_rx_kiov = ++kiov;
341 conn->ksnc_rx_nkiov--;
348 ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs,
349 struct kvec *scratch_iov)
351 /* Return 1 on success, 0 on EOF, < 0 on error.
352 * Caller checks ksnc_rx_nob_wanted to determine
353 * progress/completion. */
357 if (ksocknal_data.ksnd_stall_rx != 0)
358 schedule_timeout_uninterruptible(
359 cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
361 rc = ksocknal_connsock_addref(conn);
363 LASSERT(conn->ksnc_closing);
368 if (conn->ksnc_rx_niov != 0)
369 rc = ksocknal_recv_iov(conn, scratch_iov);
371 rc = ksocknal_recv_kiov(conn, rx_scratch_pgs,
375 /* error/EOF or partial receive */
378 } else if (rc == 0 && conn->ksnc_rx_started) {
379 /* EOF in the middle of a message */
385 /* Completed a fragment */
387 if (conn->ksnc_rx_nob_wanted == 0) {
393 ksocknal_connsock_decref(conn);
398 ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
400 struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
401 enum lnet_msg_hstatus hstatus = tx->tx_hstatus;
404 LASSERT(ni != NULL || tx->tx_conn != NULL);
406 if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) {
408 if (hstatus == LNET_MSG_STATUS_OK)
409 hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
412 if (tx->tx_conn != NULL)
413 ksocknal_conn_decref(tx->tx_conn);
415 ksocknal_free_tx(tx);
416 if (lnetmsg != NULL) { /* KSOCK_MSG_NOOP go without lnetmsg */
417 lnetmsg->msg_health_status = hstatus;
418 lnet_finalize(lnetmsg, rc);
425 ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
429 while (!list_empty(txlist)) {
430 tx = list_entry(txlist->next, struct ksock_tx, tx_list);
432 if (error && tx->tx_lnetmsg != NULL) {
433 CNETERR("Deleting packet type %d len %d %s->%s\n",
434 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
435 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
436 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
437 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
439 CNETERR("Deleting noop packet\n");
442 list_del(&tx->tx_list);
444 if (tx->tx_hstatus == LNET_MSG_STATUS_OK) {
445 if (error == -ETIMEDOUT)
447 LNET_MSG_STATUS_LOCAL_TIMEOUT;
448 else if (error == -ENETDOWN ||
449 error == -EHOSTUNREACH ||
450 error == -ENETUNREACH ||
451 error == -ECONNREFUSED ||
452 error == -ECONNRESET)
453 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
455 * for all other errors we don't want to
459 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
462 LASSERT(atomic_read(&tx->tx_refcount) == 1);
463 ksocknal_tx_done(ni, tx, error);
468 ksocknal_check_zc_req(struct ksock_tx *tx)
470 struct ksock_conn *conn = tx->tx_conn;
471 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
473 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
474 * to ksnp_zc_req_list if some fragment of this message should be sent
475 * zero-copy. Our peer_ni will send an ACK containing this cookie when
476 * she has received this message to tell us we can signal completion.
477 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
478 * ksnp_zc_req_list. */
479 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
480 LASSERT (tx->tx_zc_capable);
482 tx->tx_zc_checked = 1;
484 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
485 !conn->ksnc_zc_capable)
488 /* assign cookie and queue tx to pending list, it will be released when
489 * a matching ack is received. See ksocknal_handle_zcack() */
491 ksocknal_tx_addref(tx);
493 spin_lock(&peer_ni->ksnp_lock);
495 /* ZC_REQ is going to be pinned to the peer_ni */
496 tx->tx_deadline = ktime_get_seconds() +
497 lnet_get_lnd_timeout();
499 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
501 tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++;
503 if (peer_ni->ksnp_zc_next_cookie == 0)
504 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
506 list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list);
508 spin_unlock(&peer_ni->ksnp_lock);
512 ksocknal_uncheck_zc_req(struct ksock_tx *tx)
514 struct ksock_peer_ni *peer_ni = tx->tx_conn->ksnc_peer;
516 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
517 LASSERT(tx->tx_zc_capable);
519 tx->tx_zc_checked = 0;
521 spin_lock(&peer_ni->ksnp_lock);
523 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
524 /* Not waiting for an ACK */
525 spin_unlock(&peer_ni->ksnp_lock);
529 tx->tx_msg.ksm_zc_cookies[0] = 0;
530 list_del(&tx->tx_zc_list);
532 spin_unlock(&peer_ni->ksnp_lock);
534 ksocknal_tx_decref(tx);
538 ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
539 struct kvec *scratch_iov)
542 bool error_sim = false;
544 if (lnet_send_error_simulation(tx->tx_lnetmsg, &tx->tx_hstatus)) {
550 if (tx->tx_zc_capable && !tx->tx_zc_checked)
551 ksocknal_check_zc_req(tx);
553 rc = ksocknal_transmit(conn, tx, scratch_iov);
555 CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
557 if (tx->tx_resid == 0) {
558 /* Sent everything OK */
570 counter++; /* exponential backoff warnings */
571 if ((counter & (-counter)) == counter)
572 CWARN("%u ENOMEM tx %p (%u allocated)\n",
573 counter, conn, atomic_read(&libcfs_kmemory));
575 /* Queue on ksnd_enomem_conns for retry after a timeout */
576 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
578 /* enomem list takes over scheduler's ref... */
579 LASSERT(conn->ksnc_tx_scheduled);
580 list_add_tail(&conn->ksnc_tx_list,
581 &ksocknal_data.ksnd_enomem_conns);
582 if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY <
583 ksocknal_data.ksnd_reaper_waketime)
584 wake_up(&ksocknal_data.ksnd_reaper_waitq);
586 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
589 * set the health status of the message which determines
590 * whether we should retry the transmit
592 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
603 * set the health status of the message which determines
604 * whether we should retry the transmit
606 if (rc == -ETIMEDOUT)
607 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
609 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
612 if (!conn->ksnc_closing) {
615 LCONSOLE_WARN("Host %pI4h reset our connection "
616 "while we were sending data; it may have "
621 LCONSOLE_WARN("There was an unexpected network error "
622 "while writing to %pI4h: %d.\n",
623 &conn->ksnc_ipaddr, rc);
626 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
627 conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
628 &conn->ksnc_ipaddr, conn->ksnc_port);
631 if (tx->tx_zc_checked)
632 ksocknal_uncheck_zc_req(tx);
634 /* it's not an error if conn is being closed */
635 ksocknal_close_conn_and_siblings(conn,
636 (conn->ksnc_closing) ? 0 : rc);
642 ksocknal_launch_connection_locked(struct ksock_route *route)
645 /* called holding write lock on ksnd_global_lock */
647 LASSERT (!route->ksnr_scheduled);
648 LASSERT (!route->ksnr_connecting);
649 LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
651 route->ksnr_scheduled = 1; /* scheduling conn for connd */
652 ksocknal_route_addref(route); /* extra ref for connd */
654 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
656 list_add_tail(&route->ksnr_connd_list,
657 &ksocknal_data.ksnd_connd_routes);
658 wake_up(&ksocknal_data.ksnd_connd_waitq);
660 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
664 ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
666 struct ksock_route *route;
668 /* called holding write lock on ksnd_global_lock */
670 /* launch any/all connections that need it */
671 route = ksocknal_find_connectable_route_locked(peer_ni);
675 ksocknal_launch_connection_locked(route);
680 ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
682 struct list_head *tmp;
683 struct ksock_conn *conn;
684 struct ksock_conn *typed = NULL;
685 struct ksock_conn *fallback = NULL;
689 list_for_each(tmp, &peer_ni->ksnp_conns) {
690 struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
692 int nob = atomic_read(&c->ksnc_tx_nob) +
693 c->ksnc_sock->sk->sk_wmem_queued;
696 LASSERT (!c->ksnc_closing);
697 LASSERT (c->ksnc_proto != NULL &&
698 c->ksnc_proto->pro_match_tx != NULL);
700 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
705 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
708 case SOCKNAL_MATCH_YES: /* typed connection */
709 if (typed == NULL || tnob > nob ||
710 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
711 typed->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
717 case SOCKNAL_MATCH_MAY: /* fallback connection */
718 if (fallback == NULL || fnob > nob ||
719 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
720 fallback->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
728 /* prefer the typed selection */
729 conn = (typed != NULL) ? typed : fallback;
732 conn->ksnc_tx_last_post = ktime_get_seconds();
738 ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
740 conn->ksnc_proto->pro_pack(tx);
742 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
743 ksocknal_conn_addref(conn); /* +1 ref for tx */
748 ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
750 struct ksock_sched *sched = conn->ksnc_scheduler;
751 struct ksock_msg *msg = &tx->tx_msg;
752 struct ksock_tx *ztx = NULL;
755 /* called holding global lock (read or irq-write) and caller may
756 * not have dropped this lock between finding conn and calling me,
757 * so we don't need the {get,put}connsock dance to deref
759 LASSERT(!conn->ksnc_closing);
761 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
762 libcfs_id2str(conn->ksnc_peer->ksnp_id),
763 &conn->ksnc_ipaddr, conn->ksnc_port);
765 ksocknal_tx_prep(conn, tx);
767 /* Ensure the frags we've been given EXACTLY match the number of
768 * bytes we want to send. Many TCP/IP stacks disregard any total
769 * size parameters passed to them and just look at the frags.
771 * We always expect at least 1 mapped fragment containing the
772 * complete ksocknal message header. */
773 LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
774 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
775 (unsigned int)tx->tx_nob);
776 LASSERT (tx->tx_niov >= 1);
777 LASSERT (tx->tx_resid == tx->tx_nob);
779 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
780 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
782 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
784 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
785 spin_lock_bh(&sched->kss_lock);
787 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
788 /* First packet starts the timeout */
789 conn->ksnc_tx_deadline = ktime_get_seconds() +
790 lnet_get_lnd_timeout();
791 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
792 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
793 conn->ksnc_tx_bufnob = 0;
794 smp_mb(); /* order with adding to tx_queue */
797 if (msg->ksm_type == KSOCK_MSG_NOOP) {
798 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
799 * on a normal packet so I don't need to send it */
800 LASSERT (msg->ksm_zc_cookies[1] != 0);
801 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
803 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
804 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
807 /* It's a normal packet - can it piggback a noop zc-ack that
808 * has been queued already? */
809 LASSERT (msg->ksm_zc_cookies[1] == 0);
810 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
812 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
813 /* ztx will be released later */
817 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
818 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
821 if (conn->ksnc_tx_ready && /* able to send */
822 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
823 /* +1 ref for scheduler */
824 ksocknal_conn_addref(conn);
825 list_add_tail(&conn->ksnc_tx_list,
826 &sched->kss_tx_conns);
827 conn->ksnc_tx_scheduled = 1;
828 wake_up(&sched->kss_waitq);
831 spin_unlock_bh(&sched->kss_lock);
836 ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni)
838 time64_t now = ktime_get_seconds();
839 struct list_head *tmp;
840 struct ksock_route *route;
842 list_for_each(tmp, &peer_ni->ksnp_routes) {
843 route = list_entry(tmp, struct ksock_route, ksnr_list);
845 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
847 if (route->ksnr_scheduled) /* connections being established */
850 /* all route types connected ? */
851 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
854 if (!(route->ksnr_retry_interval == 0 || /* first attempt */
855 now >= route->ksnr_timeout)) {
857 "Too soon to retry route %pI4h "
858 "(cnted %d, interval %lld, %lld secs later)\n",
860 route->ksnr_connected,
861 route->ksnr_retry_interval,
862 route->ksnr_timeout - now);
873 ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni)
875 struct list_head *tmp;
876 struct ksock_route *route;
878 list_for_each(tmp, &peer_ni->ksnp_routes) {
879 route = list_entry(tmp, struct ksock_route, ksnr_list);
881 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
883 if (route->ksnr_scheduled)
891 ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
892 struct lnet_process_id id)
894 struct ksock_peer_ni *peer_ni;
895 struct ksock_conn *conn;
900 LASSERT (tx->tx_conn == NULL);
902 g_lock = &ksocknal_data.ksnd_global_lock;
904 for (retry = 0;; retry = 1) {
906 peer_ni = ksocknal_find_peer_locked(ni, id);
907 if (peer_ni != NULL) {
908 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) {
909 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
911 /* I've got no routes that need to be
912 * connecting and I do have an actual
914 ksocknal_queue_tx_locked (tx, conn);
921 /* I'll need a write lock... */
924 write_lock_bh(g_lock);
926 peer_ni = ksocknal_find_peer_locked(ni, id);
930 write_unlock_bh(g_lock);
932 if ((id.pid & LNET_PID_USERFLAG) != 0) {
933 CERROR("Refusing to create a connection to "
934 "userspace process %s\n", libcfs_id2str(id));
935 return -EHOSTUNREACH;
939 CERROR("Can't find peer_ni %s\n", libcfs_id2str(id));
940 return -EHOSTUNREACH;
943 rc = ksocknal_add_peer(ni, id,
944 LNET_NIDADDR(id.nid),
945 lnet_acceptor_port());
947 CERROR("Can't add peer_ni %s: %d\n",
948 libcfs_id2str(id), rc);
953 ksocknal_launch_all_connections_locked(peer_ni);
955 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
957 /* Connection exists; queue message on it */
958 ksocknal_queue_tx_locked (tx, conn);
959 write_unlock_bh(g_lock);
963 if (peer_ni->ksnp_accepting > 0 ||
964 ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
965 /* the message is going to be pinned to the peer_ni */
966 tx->tx_deadline = ktime_get_seconds() +
967 lnet_get_lnd_timeout();
969 /* Queue the message until a connection is established */
970 list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
971 write_unlock_bh(g_lock);
975 write_unlock_bh(g_lock);
977 /* NB Routes may be ignored if connections to them failed recently */
978 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
979 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
980 return (-EHOSTUNREACH);
984 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
986 /* '1' for consistency with code that checks !mpflag to restore */
987 unsigned int mpflag = 1;
988 int type = lntmsg->msg_type;
989 struct lnet_process_id target = lntmsg->msg_target;
990 unsigned int payload_niov = lntmsg->msg_niov;
991 struct bio_vec *payload_kiov = lntmsg->msg_kiov;
992 unsigned int payload_offset = lntmsg->msg_offset;
993 unsigned int payload_nob = lntmsg->msg_len;
998 /* NB 'private' is different depending on what we're sending.
999 * Just ignore it... */
1001 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
1002 payload_nob, payload_niov, libcfs_id2str(target));
1004 LASSERT (payload_nob == 0 || payload_niov > 0);
1005 LASSERT (payload_niov <= LNET_MAX_IOV);
1006 LASSERT (!in_interrupt ());
1008 desc_size = offsetof(struct ksock_tx,
1009 tx_frags.paged.kiov[payload_niov]);
1011 if (lntmsg->msg_vmflush)
1012 mpflag = memalloc_noreclaim_save();
1014 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
1016 CERROR("Can't allocate tx desc type %d size %d\n",
1018 if (lntmsg->msg_vmflush)
1019 memalloc_noreclaim_restore(mpflag);
1023 tx->tx_conn = NULL; /* set when assigned a conn */
1024 tx->tx_lnetmsg = lntmsg;
1027 tx->tx_iov = &tx->tx_frags.paged.iov;
1028 tx->tx_kiov = tx->tx_frags.paged.kiov;
1029 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
1030 payload_niov, payload_kiov,
1031 payload_offset, payload_nob);
1033 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
1034 tx->tx_zc_capable = 1;
1036 tx->tx_msg.ksm_csum = 0;
1037 tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
1038 tx->tx_msg.ksm_zc_cookies[0] = 0;
1039 tx->tx_msg.ksm_zc_cookies[1] = 0;
1041 /* The first fragment will be set later in pro_pack */
1042 rc = ksocknal_launch_packet(ni, tx, target);
1044 * We can't test lntsmg->msg_vmflush again as lntmsg may
1048 memalloc_noreclaim_restore(mpflag);
1053 lntmsg->msg_health_status = tx->tx_hstatus;
1054 ksocknal_free_tx(tx);
1059 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1061 struct task_struct *task = kthread_run(fn, arg, name);
1064 return PTR_ERR(task);
1066 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1067 ksocknal_data.ksnd_nthreads++;
1068 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1073 ksocknal_thread_fini (void)
1075 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1076 if (--ksocknal_data.ksnd_nthreads == 0)
1077 wake_up_var(&ksocknal_data.ksnd_nthreads);
1078 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1082 ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
1084 static char ksocknal_slop_buffer[4096];
1089 LASSERT(conn->ksnc_proto != NULL);
1091 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1092 /* Remind the socket to ack eagerly... */
1093 ksocknal_lib_eager_ack(conn);
1096 if (nob_to_skip == 0) { /* right at next packet boundary now */
1097 conn->ksnc_rx_started = 0;
1098 smp_mb(); /* racing with timeout thread */
1100 switch (conn->ksnc_proto->pro_version) {
1101 case KSOCK_PROTO_V2:
1102 case KSOCK_PROTO_V3:
1103 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1104 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1105 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
1107 conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
1108 conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
1109 conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
1112 case KSOCK_PROTO_V1:
1113 /* Receiving bare struct lnet_hdr */
1114 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1115 conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr);
1116 conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
1118 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1119 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1120 conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr);
1126 conn->ksnc_rx_niov = 1;
1128 conn->ksnc_rx_kiov = NULL;
1129 conn->ksnc_rx_nkiov = 0;
1130 conn->ksnc_rx_csum = ~0;
1134 /* Set up to skip as much as possible now. If there's more left
1135 * (ran out of iov entries) we'll get called again */
1137 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1138 conn->ksnc_rx_nob_left = nob_to_skip;
1139 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1144 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
1146 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1147 conn->ksnc_rx_iov[niov].iov_len = nob;
1152 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1153 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
1155 conn->ksnc_rx_niov = niov;
1156 conn->ksnc_rx_kiov = NULL;
1157 conn->ksnc_rx_nkiov = 0;
1158 conn->ksnc_rx_nob_wanted = skipped;
1163 ksocknal_process_receive(struct ksock_conn *conn,
1164 struct page **rx_scratch_pgs,
1165 struct kvec *scratch_iov)
1167 struct lnet_hdr *lhdr;
1168 struct lnet_process_id *id;
1171 LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
1173 /* NB: sched lock NOT held */
1174 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1175 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1176 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1177 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1178 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1180 if (conn->ksnc_rx_nob_wanted != 0) {
1181 rc = ksocknal_receive(conn, rx_scratch_pgs,
1185 struct lnet_process_id ksnp_id;
1187 ksnp_id = conn->ksnc_peer->ksnp_id;
1189 LASSERT(rc != -EAGAIN);
1191 CDEBUG(D_NET, "[%p] EOF from %s "
1192 "ip %pI4h:%d\n", conn,
1193 libcfs_id2str(ksnp_id),
1196 else if (!conn->ksnc_closing)
1197 CERROR("[%p] Error %d on read from %s "
1198 "ip %pI4h:%d\n", conn, rc,
1199 libcfs_id2str(ksnp_id),
1203 /* it's not an error if conn is being closed */
1204 ksocknal_close_conn_and_siblings (conn,
1205 (conn->ksnc_closing) ? 0 : rc);
1206 return (rc == 0 ? -ESHUTDOWN : rc);
1209 if (conn->ksnc_rx_nob_wanted != 0) {
1214 switch (conn->ksnc_rx_state) {
1215 case SOCKNAL_RX_KSM_HEADER:
1216 if (conn->ksnc_flip) {
1217 __swab32s(&conn->ksnc_msg.ksm_type);
1218 __swab32s(&conn->ksnc_msg.ksm_csum);
1219 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1220 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1223 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1224 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1225 CERROR("%s: Unknown message type: %x\n",
1226 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1227 conn->ksnc_msg.ksm_type);
1228 ksocknal_new_packet(conn, 0);
1229 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1233 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1234 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1235 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1236 /* NOOP Checksum error */
1237 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1238 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1239 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1240 ksocknal_new_packet(conn, 0);
1241 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1245 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1248 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1250 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1251 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1253 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1254 conn->ksnc_msg.ksm_zc_cookies[1]);
1257 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1258 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1259 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1260 ksocknal_new_packet(conn, 0);
1261 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1266 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1267 ksocknal_new_packet (conn, 0);
1268 return 0; /* NOOP is done and just return */
1271 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1272 conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
1273 conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
1275 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1276 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1277 conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
1279 conn->ksnc_rx_niov = 1;
1280 conn->ksnc_rx_kiov = NULL;
1281 conn->ksnc_rx_nkiov = 0;
1283 goto again; /* read lnet header now */
1285 case SOCKNAL_RX_LNET_HEADER:
1286 /* unpack message header */
1287 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1289 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1290 /* Userspace peer_ni */
1291 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1292 id = &conn->ksnc_peer->ksnp_id;
1294 /* Substitute process ID assigned at connection time */
1295 lhdr->src_pid = cpu_to_le32(id->pid);
1296 lhdr->src_nid = cpu_to_le64(id->nid);
1299 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1300 ksocknal_conn_addref(conn); /* ++ref while parsing */
1302 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1303 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1304 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1306 /* I just received garbage: give up on this conn */
1307 ksocknal_new_packet(conn, 0);
1308 ksocknal_close_conn_and_siblings (conn, rc);
1309 ksocknal_conn_decref(conn);
1313 /* I'm racing with ksocknal_recv() */
1314 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1315 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1317 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1320 /* ksocknal_recv() got called */
1323 case SOCKNAL_RX_LNET_PAYLOAD:
1324 /* payload all received */
1327 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1328 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1329 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1330 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1331 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1332 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1336 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1337 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1339 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1340 id = &conn->ksnc_peer->ksnp_id;
1342 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1343 conn->ksnc_msg.ksm_zc_cookies[0],
1344 *ksocknal_tunables.ksnd_nonblk_zcack ||
1345 le64_to_cpu(lhdr->src_nid) != id->nid);
1348 if (rc && conn->ksnc_lnet_msg)
1349 conn->ksnc_lnet_msg->msg_health_status =
1350 LNET_MSG_STATUS_REMOTE_ERROR;
1351 lnet_finalize(conn->ksnc_lnet_msg, rc);
1354 ksocknal_new_packet(conn, 0);
1355 ksocknal_close_conn_and_siblings (conn, rc);
1360 case SOCKNAL_RX_SLOP:
1361 /* starting new packet? */
1362 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1363 return 0; /* come back later */
1364 goto again; /* try to finish reading slop now */
1372 return (-EINVAL); /* keep gcc happy */
1376 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
1377 int delayed, unsigned int niov,
1378 struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
1381 struct ksock_conn *conn = private;
1382 struct ksock_sched *sched = conn->ksnc_scheduler;
1384 LASSERT (mlen <= rlen);
1385 LASSERT (niov <= LNET_MAX_IOV);
1387 conn->ksnc_lnet_msg = msg;
1388 conn->ksnc_rx_nob_wanted = mlen;
1389 conn->ksnc_rx_nob_left = rlen;
1392 conn->ksnc_rx_nkiov = 0;
1393 conn->ksnc_rx_kiov = NULL;
1394 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1395 conn->ksnc_rx_niov = 0;
1397 conn->ksnc_rx_niov = 0;
1398 conn->ksnc_rx_iov = NULL;
1399 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1400 conn->ksnc_rx_nkiov =
1401 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1402 niov, kiov, offset, mlen);
1406 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1407 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1409 LASSERT (conn->ksnc_rx_scheduled);
1411 spin_lock_bh(&sched->kss_lock);
1413 switch (conn->ksnc_rx_state) {
1414 case SOCKNAL_RX_PARSE_WAIT:
1415 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1416 wake_up(&sched->kss_waitq);
1417 LASSERT(conn->ksnc_rx_ready);
1420 case SOCKNAL_RX_PARSE:
1421 /* scheduler hasn't noticed I'm parsing yet */
1425 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1427 spin_unlock_bh(&sched->kss_lock);
1428 ksocknal_conn_decref(conn);
1433 ksocknal_sched_cansleep(struct ksock_sched *sched)
1437 spin_lock_bh(&sched->kss_lock);
1439 rc = (!ksocknal_data.ksnd_shuttingdown &&
1440 list_empty(&sched->kss_rx_conns) &&
1441 list_empty(&sched->kss_tx_conns));
1443 spin_unlock_bh(&sched->kss_lock);
1447 int ksocknal_scheduler(void *arg)
1449 struct ksock_sched *sched;
1450 struct ksock_conn *conn;
1451 struct ksock_tx *tx;
1454 long id = (long)arg;
1455 struct page **rx_scratch_pgs;
1456 struct kvec *scratch_iov;
1458 sched = ksocknal_data.ksnd_schedulers[KSOCK_THREAD_CPT(id)];
1460 LIBCFS_CPT_ALLOC(rx_scratch_pgs, lnet_cpt_table(), sched->kss_cpt,
1461 sizeof(*rx_scratch_pgs) * LNET_MAX_IOV);
1462 if (!rx_scratch_pgs) {
1463 CERROR("Unable to allocate scratch pages\n");
1467 LIBCFS_CPT_ALLOC(scratch_iov, lnet_cpt_table(), sched->kss_cpt,
1468 sizeof(*scratch_iov) * LNET_MAX_IOV);
1470 CERROR("Unable to allocate scratch iov\n");
1474 rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
1476 CWARN("Can't set CPU partition affinity to %d: %d\n",
1477 sched->kss_cpt, rc);
1480 spin_lock_bh(&sched->kss_lock);
1482 while (!ksocknal_data.ksnd_shuttingdown) {
1483 int did_something = 0;
1485 /* Ensure I progress everything semi-fairly */
1487 if (!list_empty(&sched->kss_rx_conns)) {
1488 conn = list_entry(sched->kss_rx_conns.next,
1489 struct ksock_conn, ksnc_rx_list);
1490 list_del(&conn->ksnc_rx_list);
1492 LASSERT(conn->ksnc_rx_scheduled);
1493 LASSERT(conn->ksnc_rx_ready);
1495 /* clear rx_ready in case receive isn't complete.
1496 * Do it BEFORE we call process_recv, since
1497 * data_ready can set it any time after we release
1499 conn->ksnc_rx_ready = 0;
1500 spin_unlock_bh(&sched->kss_lock);
1502 rc = ksocknal_process_receive(conn, rx_scratch_pgs,
1505 spin_lock_bh(&sched->kss_lock);
1507 /* I'm the only one that can clear this flag */
1508 LASSERT(conn->ksnc_rx_scheduled);
1510 /* Did process_receive get everything it wanted? */
1512 conn->ksnc_rx_ready = 1;
1514 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1515 /* Conn blocked waiting for ksocknal_recv()
1516 * I change its state (under lock) to signal
1517 * it can be rescheduled */
1518 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1519 } else if (conn->ksnc_rx_ready) {
1520 /* reschedule for rx */
1521 list_add_tail(&conn->ksnc_rx_list,
1522 &sched->kss_rx_conns);
1524 conn->ksnc_rx_scheduled = 0;
1526 ksocknal_conn_decref(conn);
1532 if (!list_empty(&sched->kss_tx_conns)) {
1535 list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
1537 conn = list_entry(sched->kss_tx_conns.next,
1538 struct ksock_conn, ksnc_tx_list);
1539 list_del(&conn->ksnc_tx_list);
1541 LASSERT(conn->ksnc_tx_scheduled);
1542 LASSERT(conn->ksnc_tx_ready);
1543 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1545 tx = list_entry(conn->ksnc_tx_queue.next,
1546 struct ksock_tx, tx_list);
1548 if (conn->ksnc_tx_carrier == tx)
1549 ksocknal_next_tx_carrier(conn);
1551 /* dequeue now so empty list => more to send */
1552 list_del(&tx->tx_list);
1554 /* Clear tx_ready in case send isn't complete. Do
1555 * it BEFORE we call process_transmit, since
1556 * write_space can set it any time after we release
1558 conn->ksnc_tx_ready = 0;
1559 spin_unlock_bh(&sched->kss_lock);
1561 if (!list_empty(&zlist)) {
1562 /* free zombie noop txs, it's fast because
1563 * noop txs are just put in freelist */
1564 ksocknal_txlist_done(NULL, &zlist, 0);
1567 rc = ksocknal_process_transmit(conn, tx, scratch_iov);
1569 if (rc == -ENOMEM || rc == -EAGAIN) {
1570 /* Incomplete send: replace tx on HEAD of tx_queue */
1571 spin_lock_bh(&sched->kss_lock);
1572 list_add(&tx->tx_list,
1573 &conn->ksnc_tx_queue);
1575 /* Complete send; tx -ref */
1576 ksocknal_tx_decref(tx);
1578 spin_lock_bh(&sched->kss_lock);
1579 /* assume space for more */
1580 conn->ksnc_tx_ready = 1;
1583 if (rc == -ENOMEM) {
1584 /* Do nothing; after a short timeout, this
1585 * conn will be reposted on kss_tx_conns. */
1586 } else if (conn->ksnc_tx_ready &&
1587 !list_empty(&conn->ksnc_tx_queue)) {
1588 /* reschedule for tx */
1589 list_add_tail(&conn->ksnc_tx_list,
1590 &sched->kss_tx_conns);
1592 conn->ksnc_tx_scheduled = 0;
1594 ksocknal_conn_decref(conn);
1599 if (!did_something || /* nothing to do */
1600 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1601 spin_unlock_bh(&sched->kss_lock);
1605 if (!did_something) { /* wait for something to do */
1606 rc = wait_event_interruptible_exclusive(
1608 !ksocknal_sched_cansleep(sched));
1614 spin_lock_bh(&sched->kss_lock);
1618 spin_unlock_bh(&sched->kss_lock);
1619 CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
1620 CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
1621 ksocknal_thread_fini();
1626 * Add connection to kss_rx_conns of scheduler
1627 * and wakeup the scheduler.
1629 void ksocknal_read_callback(struct ksock_conn *conn)
1631 struct ksock_sched *sched;
1634 sched = conn->ksnc_scheduler;
1636 spin_lock_bh(&sched->kss_lock);
1638 conn->ksnc_rx_ready = 1;
1640 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1641 list_add_tail(&conn->ksnc_rx_list,
1642 &sched->kss_rx_conns);
1643 conn->ksnc_rx_scheduled = 1;
1644 /* extra ref for scheduler */
1645 ksocknal_conn_addref(conn);
1647 wake_up (&sched->kss_waitq);
1649 spin_unlock_bh(&sched->kss_lock);
1655 * Add connection to kss_tx_conns of scheduler
1656 * and wakeup the scheduler.
1658 void ksocknal_write_callback(struct ksock_conn *conn)
1660 struct ksock_sched *sched;
1663 sched = conn->ksnc_scheduler;
1665 spin_lock_bh(&sched->kss_lock);
1667 conn->ksnc_tx_ready = 1;
1669 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1670 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1671 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1672 conn->ksnc_tx_scheduled = 1;
1673 /* extra ref for scheduler */
1674 ksocknal_conn_addref(conn);
1676 wake_up(&sched->kss_waitq);
1679 spin_unlock_bh(&sched->kss_lock);
1684 static const struct ksock_proto *
1685 ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
1689 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1690 version = hello->kshm_version;
1691 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1692 version = __swab32(hello->kshm_version);
1695 #if SOCKNAL_VERSION_DEBUG
1696 if (*ksocknal_tunables.ksnd_protocol == 1)
1699 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1700 version == KSOCK_PROTO_V3)
1703 if (version == KSOCK_PROTO_V2)
1704 return &ksocknal_protocol_v2x;
1706 if (version == KSOCK_PROTO_V3)
1707 return &ksocknal_protocol_v3x;
1712 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1713 struct lnet_magicversion *hmv;
1715 BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
1716 offsetof(struct ksock_hello_msg, kshm_src_nid));
1718 hmv = (struct lnet_magicversion *)hello;
1720 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1721 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1722 return &ksocknal_protocol_v1x;
1729 ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1730 lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
1732 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1733 struct ksock_net *net = (struct ksock_net *)ni->ni_data;
1735 LASSERT(hello->kshm_nips <= LNET_INTERFACES_NUM);
1737 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1738 LASSERT(conn->ksnc_proto != NULL);
1740 hello->kshm_src_nid = ni->ni_nid;
1741 hello->kshm_dst_nid = peer_nid;
1742 hello->kshm_src_pid = the_lnet.ln_pid;
1744 hello->kshm_src_incarnation = net->ksnn_incarnation;
1745 hello->kshm_ctype = conn->ksnc_type;
1747 return conn->ksnc_proto->pro_send_hello(conn, hello);
1751 ksocknal_invert_type(int type)
1755 case SOCKLND_CONN_ANY:
1756 case SOCKLND_CONN_CONTROL:
1758 case SOCKLND_CONN_BULK_IN:
1759 return SOCKLND_CONN_BULK_OUT;
1760 case SOCKLND_CONN_BULK_OUT:
1761 return SOCKLND_CONN_BULK_IN;
1763 return (SOCKLND_CONN_NONE);
1768 ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1769 struct ksock_hello_msg *hello,
1770 struct lnet_process_id *peerid,
1773 /* Return < 0 fatal error
1775 * EALREADY lost connection race
1776 * EPROTO protocol version mismatch
1778 struct socket *sock = conn->ksnc_sock;
1779 int active = (conn->ksnc_proto != NULL);
1783 const struct ksock_proto *proto;
1784 struct lnet_process_id recv_id;
1786 /* socket type set on active connections - not set on passive */
1787 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1789 timeout = active ? lnet_get_lnd_timeout() :
1790 lnet_acceptor_timeout();
1792 rc = lnet_sock_read(sock, &hello->kshm_magic,
1793 sizeof(hello->kshm_magic), timeout);
1795 CERROR("Error %d reading HELLO from %pI4h\n",
1796 rc, &conn->ksnc_ipaddr);
1801 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1802 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1803 hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
1804 /* Unexpected magic! */
1805 CERROR ("Bad magic(1) %#08x (%#08x expected) from "
1806 "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
1807 LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr);
1811 rc = lnet_sock_read(sock, &hello->kshm_version,
1812 sizeof(hello->kshm_version), timeout);
1814 CERROR("Error %d reading HELLO from %pI4h\n",
1815 rc, &conn->ksnc_ipaddr);
1820 proto = ksocknal_parse_proto_version(hello);
1821 if (proto == NULL) {
1823 /* unknown protocol from peer_ni, tell peer_ni my protocol */
1824 conn->ksnc_proto = &ksocknal_protocol_v3x;
1825 #if SOCKNAL_VERSION_DEBUG
1826 if (*ksocknal_tunables.ksnd_protocol == 2)
1827 conn->ksnc_proto = &ksocknal_protocol_v2x;
1828 else if (*ksocknal_tunables.ksnd_protocol == 1)
1829 conn->ksnc_proto = &ksocknal_protocol_v1x;
1831 hello->kshm_nips = 0;
1832 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1835 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1836 conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr);
1841 proto_match = (conn->ksnc_proto == proto);
1842 conn->ksnc_proto = proto;
1844 /* receive the rest of hello message anyway */
1845 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1847 CERROR("Error %d reading or checking hello from from %pI4h\n",
1848 rc, &conn->ksnc_ipaddr);
1853 *incarnation = hello->kshm_src_incarnation;
1855 if (hello->kshm_src_nid == LNET_NID_ANY) {
1856 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
1857 &conn->ksnc_ipaddr);
1862 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1863 /* Userspace NAL assigns peer_ni process ID from socket */
1864 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1865 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1867 recv_id.nid = hello->kshm_src_nid;
1868 recv_id.pid = hello->kshm_src_pid;
1874 /* peer_ni determines type */
1875 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1876 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1877 CERROR("Unexpected type %d from %s ip %pI4h\n",
1878 hello->kshm_ctype, libcfs_id2str(*peerid),
1879 &conn->ksnc_ipaddr);
1885 if (peerid->pid != recv_id.pid ||
1886 peerid->nid != recv_id.nid) {
1887 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
1888 " %pI4h, but they claimed they were "
1889 "%s; please check your Lustre "
1891 libcfs_id2str(*peerid),
1893 libcfs_id2str(recv_id));
1897 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1898 /* Possible protocol mismatch or I lost the connection race */
1899 return proto_match ? EALREADY : EPROTO;
1902 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1903 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
1904 conn->ksnc_type, libcfs_id2str(*peerid),
1913 ksocknal_connect(struct ksock_route *route)
1916 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
1919 struct socket *sock;
1921 int retry_later = 0;
1924 deadline = ktime_get_seconds() + lnet_get_lnd_timeout();
1926 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1928 LASSERT (route->ksnr_scheduled);
1929 LASSERT (!route->ksnr_connecting);
1931 route->ksnr_connecting = 1;
1934 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1936 /* stop connecting if peer_ni/route got closed under me, or
1937 * route got connected while queued */
1938 if (peer_ni->ksnp_closing || route->ksnr_deleted ||
1944 /* reschedule if peer_ni is connecting to me */
1945 if (peer_ni->ksnp_accepting > 0) {
1947 "peer_ni %s(%d) already connecting to me, retry later.\n",
1948 libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
1952 if (retry_later) /* needs reschedule */
1955 if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) {
1956 type = SOCKLND_CONN_ANY;
1957 } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
1958 type = SOCKLND_CONN_CONTROL;
1959 } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0) {
1960 type = SOCKLND_CONN_BULK_IN;
1962 LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
1963 type = SOCKLND_CONN_BULK_OUT;
1966 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1968 if (ktime_get_seconds() >= deadline) {
1970 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1976 sock = lnet_connect(peer_ni->ksnp_id.nid,
1977 route->ksnr_myiface,
1978 route->ksnr_ipaddr, route->ksnr_port,
1979 peer_ni->ksnp_ni->ni_net_ns);
1985 rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
1987 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1993 /* A +ve RC means I have to retry because I lost the connection
1994 * race or I have to renegotiate protocol version */
1995 retry_later = (rc != 0);
1997 CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
1998 libcfs_nid2str(peer_ni->ksnp_id.nid));
2000 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2003 route->ksnr_scheduled = 0;
2004 route->ksnr_connecting = 0;
2007 /* re-queue for attention; this frees me up to handle
2008 * the peer_ni's incoming connection request */
2010 if (rc == EALREADY ||
2011 (rc == 0 && peer_ni->ksnp_accepting > 0)) {
2012 /* We want to introduce a delay before next
2013 * attempt to connect if we lost conn race,
2014 * but the race is resolved quickly usually,
2015 * so min_reconnectms should be good heuristic */
2016 route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000;
2017 route->ksnr_timeout = ktime_get_seconds() +
2018 route->ksnr_retry_interval;
2021 ksocknal_launch_connection_locked(route);
2024 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2028 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2030 route->ksnr_scheduled = 0;
2031 route->ksnr_connecting = 0;
2033 /* This is a retry rather than a new connection */
2034 route->ksnr_retry_interval *= 2;
2035 route->ksnr_retry_interval =
2036 max_t(time64_t, route->ksnr_retry_interval,
2037 *ksocknal_tunables.ksnd_min_reconnectms / 1000);
2038 route->ksnr_retry_interval =
2039 min_t(time64_t, route->ksnr_retry_interval,
2040 *ksocknal_tunables.ksnd_max_reconnectms / 1000);
2042 LASSERT(route->ksnr_retry_interval);
2043 route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval;
2045 if (!list_empty(&peer_ni->ksnp_tx_queue) &&
2046 peer_ni->ksnp_accepting == 0 &&
2047 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
2048 struct ksock_conn *conn;
2050 /* ksnp_tx_queue is queued on a conn on successful
2051 * connection for V1.x and V2.x */
2052 if (!list_empty(&peer_ni->ksnp_conns)) {
2053 conn = list_entry(peer_ni->ksnp_conns.next,
2054 struct ksock_conn, ksnc_list);
2055 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
2058 /* take all the blocked packets while I've got the lock and
2059 * complete below... */
2060 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
2063 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2065 ksocknal_peer_failed(peer_ni);
2066 ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, rc);
2071 * check whether we need to create more connds.
2072 * It will try to create new thread if it's necessary, @timeout can
2073 * be updated if failed to create, so caller wouldn't keep try while
2074 * running out of resource.
2077 ksocknal_connd_check_start(time64_t sec, long *timeout)
2081 int total = ksocknal_data.ksnd_connd_starting +
2082 ksocknal_data.ksnd_connd_running;
2084 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2085 /* still in initializing */
2089 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2090 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2091 /* can't create more connd, or still have enough
2092 * threads to handle more connecting */
2096 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2097 /* no pending connecting request */
2101 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2102 /* may run out of resource, retry later */
2103 *timeout = cfs_time_seconds(1);
2107 if (ksocknal_data.ksnd_connd_starting > 0) {
2108 /* serialize starting to avoid flood */
2112 ksocknal_data.ksnd_connd_starting_stamp = sec;
2113 ksocknal_data.ksnd_connd_starting++;
2114 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2116 /* NB: total is the next id */
2117 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2118 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2120 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2125 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2126 ksocknal_data.ksnd_connd_starting--;
2127 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2133 * check whether current thread can exit, it will return 1 if there are too
2134 * many threads and no creating in past 120 seconds.
2135 * Also, this function may update @timeout to make caller come back
2136 * again to recheck these conditions.
2139 ksocknal_connd_check_stop(time64_t sec, long *timeout)
2143 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2144 /* still in initializing */
2148 if (ksocknal_data.ksnd_connd_starting > 0) {
2149 /* in progress of starting new thread */
2153 if (ksocknal_data.ksnd_connd_running <=
2154 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2158 /* created thread in past 120 seconds? */
2159 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2160 SOCKNAL_CONND_TIMEOUT - sec);
2162 *timeout = (val > 0) ? cfs_time_seconds(val) :
2163 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2167 /* no creating in past 120 seconds */
2169 return ksocknal_data.ksnd_connd_running >
2170 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2173 /* Go through connd_routes queue looking for a route that we can process
2174 * right now, @timeout_p can be updated if we need to come back later */
2175 static struct ksock_route *
2176 ksocknal_connd_get_route_locked(signed long *timeout_p)
2178 time64_t now = ktime_get_seconds();
2179 struct ksock_route *route;
2181 /* connd_routes can contain both pending and ordinary routes */
2182 list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2185 if (route->ksnr_retry_interval == 0 ||
2186 now >= route->ksnr_timeout)
2189 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2190 *timeout_p > cfs_time_seconds(route->ksnr_timeout - now))
2191 *timeout_p = cfs_time_seconds(route->ksnr_timeout - now);
2198 ksocknal_connd(void *arg)
2200 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2201 struct ksock_connreq *cr;
2202 wait_queue_entry_t wait;
2206 init_waitqueue_entry(&wait, current);
2208 spin_lock_bh(connd_lock);
2210 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2211 ksocknal_data.ksnd_connd_starting--;
2212 ksocknal_data.ksnd_connd_running++;
2214 while (!ksocknal_data.ksnd_shuttingdown) {
2215 struct ksock_route *route = NULL;
2216 time64_t sec = ktime_get_real_seconds();
2217 long timeout = MAX_SCHEDULE_TIMEOUT;
2218 int dropped_lock = 0;
2220 if (ksocknal_connd_check_stop(sec, &timeout)) {
2221 /* wakeup another one to check stop */
2222 wake_up(&ksocknal_data.ksnd_connd_waitq);
2226 if (ksocknal_connd_check_start(sec, &timeout)) {
2227 /* created new thread */
2231 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2232 /* Connection accepted by the listener */
2233 cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
2234 struct ksock_connreq, ksncr_list);
2236 list_del(&cr->ksncr_list);
2237 spin_unlock_bh(connd_lock);
2240 ksocknal_create_conn(cr->ksncr_ni, NULL,
2241 cr->ksncr_sock, SOCKLND_CONN_NONE);
2242 lnet_ni_decref(cr->ksncr_ni);
2243 LIBCFS_FREE(cr, sizeof(*cr));
2245 spin_lock_bh(connd_lock);
2248 /* Only handle an outgoing connection request if there
2249 * is a thread left to handle incoming connections and
2250 * create new connd */
2251 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2252 ksocknal_data.ksnd_connd_running) {
2253 route = ksocknal_connd_get_route_locked(&timeout);
2255 if (route != NULL) {
2256 list_del(&route->ksnr_connd_list);
2257 ksocknal_data.ksnd_connd_connecting++;
2258 spin_unlock_bh(connd_lock);
2261 if (ksocknal_connect(route)) {
2262 /* consecutive retry */
2263 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2264 CWARN("massive consecutive "
2265 "re-connecting to %pI4h\n",
2266 &route->ksnr_ipaddr);
2273 ksocknal_route_decref(route);
2275 spin_lock_bh(connd_lock);
2276 ksocknal_data.ksnd_connd_connecting--;
2280 if (++nloops < SOCKNAL_RESCHED)
2282 spin_unlock_bh(connd_lock);
2285 spin_lock_bh(connd_lock);
2289 /* Nothing to do for 'timeout' */
2290 set_current_state(TASK_INTERRUPTIBLE);
2291 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2292 spin_unlock_bh(connd_lock);
2295 schedule_timeout(timeout);
2297 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2298 spin_lock_bh(connd_lock);
2300 ksocknal_data.ksnd_connd_running--;
2301 spin_unlock_bh(connd_lock);
2303 ksocknal_thread_fini();
2307 static struct ksock_conn *
2308 ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
2310 /* We're called with a shared lock on ksnd_global_lock */
2311 struct ksock_conn *conn;
2312 struct list_head *ctmp;
2313 struct ksock_tx *tx;
2315 list_for_each(ctmp, &peer_ni->ksnp_conns) {
2318 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
2320 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2321 LASSERT (!conn->ksnc_closing);
2323 error = conn->ksnc_sock->sk->sk_err;
2325 ksocknal_conn_addref(conn);
2329 CNETERR("A connection with %s "
2330 "(%pI4h:%d) was reset; "
2331 "it may have rebooted.\n",
2332 libcfs_id2str(peer_ni->ksnp_id),
2337 CNETERR("A connection with %s "
2338 "(%pI4h:%d) timed out; the "
2339 "network or node may be down.\n",
2340 libcfs_id2str(peer_ni->ksnp_id),
2345 CNETERR("An unexpected network error %d "
2347 "(%pI4h:%d\n", error,
2348 libcfs_id2str(peer_ni->ksnp_id),
2357 if (conn->ksnc_rx_started &&
2358 ktime_get_seconds() >= conn->ksnc_rx_deadline) {
2359 /* Timed out incomplete incoming message */
2360 ksocknal_conn_addref(conn);
2361 CNETERR("Timeout receiving from %s (%pI4h:%d), "
2362 "state %d wanted %d left %d\n",
2363 libcfs_id2str(peer_ni->ksnp_id),
2366 conn->ksnc_rx_state,
2367 conn->ksnc_rx_nob_wanted,
2368 conn->ksnc_rx_nob_left);
2372 if ((!list_empty(&conn->ksnc_tx_queue) ||
2373 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2374 ktime_get_seconds() >= conn->ksnc_tx_deadline) {
2375 /* Timed out messages queued for sending or
2376 * buffered in the socket's send buffer */
2377 ksocknal_conn_addref(conn);
2378 list_for_each_entry(tx, &conn->ksnc_tx_queue,
2381 LNET_MSG_STATUS_LOCAL_TIMEOUT;
2382 CNETERR("Timeout sending data to %s (%pI4h:%d) "
2383 "the network or that node may be down.\n",
2384 libcfs_id2str(peer_ni->ksnp_id),
2385 &conn->ksnc_ipaddr, conn->ksnc_port);
2394 ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
2396 struct ksock_tx *tx;
2397 LIST_HEAD(stale_txs);
2399 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2401 while (!list_empty(&peer_ni->ksnp_tx_queue)) {
2402 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2403 struct ksock_tx, tx_list);
2405 if (ktime_get_seconds() < tx->tx_deadline)
2408 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2410 list_move_tail(&tx->tx_list, &stale_txs);
2413 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2415 ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, -ETIMEDOUT);
2419 ksocknal_send_keepalive_locked(struct ksock_peer_ni *peer_ni)
2420 __must_hold(&ksocknal_data.ksnd_global_lock)
2422 struct ksock_sched *sched;
2423 struct ksock_conn *conn;
2424 struct ksock_tx *tx;
2426 /* last_alive will be updated by create_conn */
2427 if (list_empty(&peer_ni->ksnp_conns))
2430 if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x)
2433 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2434 ktime_get_seconds() < peer_ni->ksnp_last_alive +
2435 *ksocknal_tunables.ksnd_keepalive)
2438 if (ktime_get_seconds() < peer_ni->ksnp_send_keepalive)
2441 /* retry 10 secs later, so we wouldn't put pressure
2442 * on this peer_ni if we failed to send keepalive this time */
2443 peer_ni->ksnp_send_keepalive = ktime_get_seconds() + 10;
2445 conn = ksocknal_find_conn_locked(peer_ni, NULL, 1);
2447 sched = conn->ksnc_scheduler;
2449 spin_lock_bh(&sched->kss_lock);
2450 if (!list_empty(&conn->ksnc_tx_queue)) {
2451 spin_unlock_bh(&sched->kss_lock);
2452 /* there is an queued ACK, don't need keepalive */
2456 spin_unlock_bh(&sched->kss_lock);
2459 read_unlock(&ksocknal_data.ksnd_global_lock);
2461 /* cookie = 1 is reserved for keepalive PING */
2462 tx = ksocknal_alloc_tx_noop(1, 1);
2464 read_lock(&ksocknal_data.ksnd_global_lock);
2468 if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id) == 0) {
2469 read_lock(&ksocknal_data.ksnd_global_lock);
2473 ksocknal_free_tx(tx);
2474 read_lock(&ksocknal_data.ksnd_global_lock);
2481 ksocknal_check_peer_timeouts(int idx)
2483 struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
2484 struct ksock_peer_ni *peer_ni;
2485 struct ksock_conn *conn;
2486 struct ksock_tx *tx;
2489 /* NB. We expect to have a look at all the peers and not find any
2490 * connections to time out, so we just use a shared lock while we
2493 read_lock(&ksocknal_data.ksnd_global_lock);
2495 hlist_for_each_entry(peer_ni, peers, ksnp_list) {
2496 struct ksock_tx *tx_stale;
2497 time64_t deadline = 0;
2501 if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
2502 read_unlock(&ksocknal_data.ksnd_global_lock);
2506 conn = ksocknal_find_timed_out_conn(peer_ni);
2509 read_unlock(&ksocknal_data.ksnd_global_lock);
2511 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
2513 /* NB we won't find this one again, but we can't
2514 * just proceed with the next peer_ni, since we dropped
2515 * ksnd_global_lock and it might be dead already!
2517 ksocknal_conn_decref(conn);
2521 /* we can't process stale txs right here because we're
2522 * holding only shared lock
2524 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
2525 struct ksock_tx *tx;
2527 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2528 struct ksock_tx, tx_list);
2529 if (ktime_get_seconds() >= tx->tx_deadline) {
2530 ksocknal_peer_addref(peer_ni);
2531 read_unlock(&ksocknal_data.ksnd_global_lock);
2533 ksocknal_flush_stale_txs(peer_ni);
2535 ksocknal_peer_decref(peer_ni);
2540 if (list_empty(&peer_ni->ksnp_zc_req_list))
2544 spin_lock(&peer_ni->ksnp_lock);
2545 list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
2546 if (ktime_get_seconds() < tx->tx_deadline)
2548 /* ignore the TX if connection is being closed */
2549 if (tx->tx_conn->ksnc_closing)
2552 if (tx_stale == NULL)
2556 if (tx_stale == NULL) {
2557 spin_unlock(&peer_ni->ksnp_lock);
2561 deadline = tx_stale->tx_deadline;
2562 resid = tx_stale->tx_resid;
2563 conn = tx_stale->tx_conn;
2564 ksocknal_conn_addref(conn);
2566 spin_unlock(&peer_ni->ksnp_lock);
2567 read_unlock(&ksocknal_data.ksnd_global_lock);
2569 CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the "
2570 "oldest(%p) timed out %lld secs ago, "
2571 "resid: %d, wmem: %d\n",
2572 n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale,
2573 ktime_get_seconds() - deadline,
2574 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2576 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2577 ksocknal_conn_decref(conn);
2581 read_unlock(&ksocknal_data.ksnd_global_lock);
2584 int ksocknal_reaper(void *arg)
2586 wait_queue_entry_t wait;
2587 struct ksock_conn *conn;
2588 struct ksock_sched *sched;
2589 LIST_HEAD(enomem_conns);
2594 time64_t deadline = ktime_get_seconds();
2596 init_waitqueue_entry(&wait, current);
2598 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2600 while (!ksocknal_data.ksnd_shuttingdown) {
2601 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2602 conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
2603 struct ksock_conn, ksnc_list);
2604 list_del(&conn->ksnc_list);
2606 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2608 ksocknal_terminate_conn(conn);
2609 ksocknal_conn_decref(conn);
2611 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2615 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2616 conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
2617 struct ksock_conn, ksnc_list);
2618 list_del(&conn->ksnc_list);
2620 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2622 ksocknal_destroy_conn(conn);
2624 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2628 list_splice_init(&ksocknal_data.ksnd_enomem_conns,
2631 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2633 /* reschedule all the connections that stalled with ENOMEM... */
2635 while (!list_empty(&enomem_conns)) {
2636 conn = list_entry(enomem_conns.next,
2637 struct ksock_conn, ksnc_tx_list);
2638 list_del(&conn->ksnc_tx_list);
2640 sched = conn->ksnc_scheduler;
2642 spin_lock_bh(&sched->kss_lock);
2644 LASSERT(conn->ksnc_tx_scheduled);
2645 conn->ksnc_tx_ready = 1;
2646 list_add_tail(&conn->ksnc_tx_list,
2647 &sched->kss_tx_conns);
2648 wake_up(&sched->kss_waitq);
2650 spin_unlock_bh(&sched->kss_lock);
2654 /* careful with the jiffy wrap... */
2655 while ((timeout = deadline - ktime_get_seconds()) <= 0) {
2658 int chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
2659 unsigned int lnd_timeout;
2661 /* Time to check for timeouts on a few more peers: I
2662 * do checks every 'p' seconds on a proportion of the
2663 * peer_ni table and I need to check every connection
2664 * 'n' times within a timeout interval, to ensure I
2665 * detect a timeout on any connection within (n+1)/n
2666 * times the timeout interval.
2669 lnd_timeout = lnet_get_lnd_timeout();
2670 if (lnd_timeout > n * p)
2671 chunk = (chunk * n * p) / lnd_timeout;
2675 for (i = 0; i < chunk; i++) {
2676 ksocknal_check_peer_timeouts(peer_index);
2677 peer_index = (peer_index + 1) %
2678 HASH_SIZE(ksocknal_data.ksnd_peers);
2684 if (nenomem_conns != 0) {
2685 /* Reduce my timeout if I rescheduled ENOMEM conns.
2686 * This also prevents me getting woken immediately
2687 * if any go back on my enomem list. */
2688 timeout = SOCKNAL_ENOMEM_RETRY;
2690 ksocknal_data.ksnd_reaper_waketime = ktime_get_seconds() +
2693 set_current_state(TASK_INTERRUPTIBLE);
2694 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2696 if (!ksocknal_data.ksnd_shuttingdown &&
2697 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2698 list_empty(&ksocknal_data.ksnd_zombie_conns))
2699 schedule_timeout(cfs_time_seconds(timeout));
2701 set_current_state(TASK_RUNNING);
2702 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2704 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2707 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2709 ksocknal_thread_fini();