2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Lustre, https://wiki.whamcloud.com/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <libcfs/linux/linux-mem.h>
31 ksocknal_alloc_tx(int type, int size)
33 struct ksock_tx *tx = NULL;
35 if (type == KSOCK_MSG_NOOP) {
36 LASSERT(size == KSOCK_NOOP_TX_SIZE);
38 /* searching for a noop tx in free list */
39 spin_lock(&ksocknal_data.ksnd_tx_lock);
41 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
42 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
43 struct ksock_tx, tx_list);
44 LASSERT(tx->tx_desc_size == size);
45 list_del(&tx->tx_list);
48 spin_unlock(&ksocknal_data.ksnd_tx_lock);
52 LIBCFS_ALLOC(tx, size);
57 atomic_set(&tx->tx_refcount, 1);
58 tx->tx_zc_aborted = 0;
59 tx->tx_zc_capable = 0;
60 tx->tx_zc_checked = 0;
61 tx->tx_hstatus = LNET_MSG_STATUS_OK;
62 tx->tx_desc_size = size;
64 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
70 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
74 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
76 CERROR("Can't allocate noop tx desc\n");
81 tx->tx_lnetmsg = NULL;
85 tx->tx_nonblk = nonblk;
87 tx->tx_msg.ksm_csum = 0;
88 tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
89 tx->tx_msg.ksm_zc_cookies[0] = 0;
90 tx->tx_msg.ksm_zc_cookies[1] = cookie;
97 ksocknal_free_tx(struct ksock_tx *tx)
99 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
101 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
103 spin_lock(&ksocknal_data.ksnd_tx_lock);
105 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
107 spin_unlock(&ksocknal_data.ksnd_tx_lock);
109 LIBCFS_FREE(tx, tx->tx_desc_size);
114 ksocknal_send_hdr(struct ksock_conn *conn, struct ksock_tx *tx,
115 struct kvec *scratch_iov)
117 struct kvec *iov = &tx->tx_hdr;
121 LASSERT(tx->tx_niov > 0);
123 /* Never touch tx->tx_hdr inside ksocknal_lib_send_hdr() */
124 rc = ksocknal_lib_send_hdr(conn, tx, scratch_iov);
126 if (rc <= 0) /* sent nothing? */
130 LASSERT(nob <= tx->tx_resid);
134 LASSERT(tx->tx_niov == 1);
136 if (nob < (int) iov->iov_len) {
137 iov->iov_base += nob;
142 LASSERT(nob == iov->iov_len);
149 ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
150 struct kvec *scratch_iov)
152 struct bio_vec *kiov = tx->tx_kiov;
156 LASSERT(tx->tx_niov == 0);
157 LASSERT(tx->tx_nkiov > 0);
159 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
160 rc = ksocknal_lib_send_kiov(conn, tx, scratch_iov);
162 if (rc <= 0) /* sent nothing? */
166 LASSERT(nob <= tx->tx_resid);
171 LASSERT(tx->tx_nkiov > 0);
173 if (nob < (int)kiov->bv_len) {
174 kiov->bv_offset += nob;
179 nob -= (int)kiov->bv_len;
180 tx->tx_kiov = ++kiov;
188 ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
189 struct kvec *scratch_iov)
194 if (ksocknal_data.ksnd_stall_tx != 0)
195 schedule_timeout_uninterruptible(
196 cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
198 LASSERT(tx->tx_resid != 0);
200 rc = ksocknal_connsock_addref(conn);
202 LASSERT(conn->ksnc_closing);
207 if (ksocknal_data.ksnd_enomem_tx > 0) {
209 ksocknal_data.ksnd_enomem_tx--;
211 } else if (tx->tx_niov != 0) {
212 rc = ksocknal_send_hdr(conn, tx, scratch_iov);
214 rc = ksocknal_send_kiov(conn, tx, scratch_iov);
217 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
218 if (rc > 0) /* sent something? */
219 conn->ksnc_tx_bufnob += rc; /* account it */
221 if (bufnob < conn->ksnc_tx_bufnob) {
222 /* allocated send buffer bytes < computed; infer
223 * something got ACKed */
224 conn->ksnc_tx_deadline = ktime_get_seconds() +
226 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
227 conn->ksnc_tx_bufnob = bufnob;
231 if (rc <= 0) { /* Didn't write anything? */
232 /* some stacks return 0 instead of -EAGAIN */
236 /* Check if EAGAIN is due to memory pressure */
237 if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
243 /* socket's wmem_queued now includes 'rc' bytes */
244 atomic_sub (rc, &conn->ksnc_tx_nob);
247 } while (tx->tx_resid != 0);
249 ksocknal_connsock_decref(conn);
254 ksocknal_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov)
256 struct kvec *iov = conn->ksnc_rx_iov;
260 LASSERT(conn->ksnc_rx_niov > 0);
262 /* Never touch conn->ksnc_rx_iov or change connection
263 * status inside ksocknal_lib_recv_iov */
264 rc = ksocknal_lib_recv_iov(conn, scratchiov);
269 /* received something... */
272 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
273 conn->ksnc_rx_deadline = ktime_get_seconds() +
275 smp_mb(); /* order with setting rx_started */
276 conn->ksnc_rx_started = 1;
278 conn->ksnc_rx_nob_wanted -= nob;
279 conn->ksnc_rx_nob_left -= nob;
282 LASSERT(conn->ksnc_rx_niov > 0);
284 if (nob < (int)iov->iov_len) {
286 iov->iov_base += nob;
291 conn->ksnc_rx_iov = ++iov;
292 conn->ksnc_rx_niov--;
299 ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
300 struct kvec *scratch_iov)
302 struct bio_vec *kiov = conn->ksnc_rx_kiov;
305 LASSERT(conn->ksnc_rx_nkiov > 0);
307 /* Never touch conn->ksnc_rx_kiov or change connection
308 * status inside ksocknal_lib_recv_iov */
309 rc = ksocknal_lib_recv_kiov(conn, rx_scratch_pgs, scratch_iov);
314 /* received something... */
317 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
318 conn->ksnc_rx_deadline = ktime_get_seconds() +
320 smp_mb(); /* order with setting rx_started */
321 conn->ksnc_rx_started = 1;
323 conn->ksnc_rx_nob_wanted -= nob;
324 conn->ksnc_rx_nob_left -= nob;
327 LASSERT(conn->ksnc_rx_nkiov > 0);
329 if (nob < (int) kiov->bv_len) {
330 kiov->bv_offset += nob;
336 conn->ksnc_rx_kiov = ++kiov;
337 conn->ksnc_rx_nkiov--;
344 ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs,
345 struct kvec *scratch_iov)
347 /* Return 1 on success, 0 on EOF, < 0 on error.
348 * Caller checks ksnc_rx_nob_wanted to determine
349 * progress/completion. */
353 if (ksocknal_data.ksnd_stall_rx != 0)
354 schedule_timeout_uninterruptible(
355 cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
357 rc = ksocknal_connsock_addref(conn);
359 LASSERT(conn->ksnc_closing);
364 if (conn->ksnc_rx_niov != 0)
365 rc = ksocknal_recv_iov(conn, scratch_iov);
367 rc = ksocknal_recv_kiov(conn, rx_scratch_pgs,
371 /* error/EOF or partial receive */
374 } else if (rc == 0 && conn->ksnc_rx_started) {
375 /* EOF in the middle of a message */
381 /* Completed a fragment */
383 if (conn->ksnc_rx_nob_wanted == 0) {
389 ksocknal_connsock_decref(conn);
394 ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
396 struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
397 enum lnet_msg_hstatus hstatus = tx->tx_hstatus;
400 LASSERT(ni != NULL || tx->tx_conn != NULL);
402 if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) {
404 if (hstatus == LNET_MSG_STATUS_OK)
405 hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
408 if (tx->tx_conn != NULL)
409 ksocknal_conn_decref(tx->tx_conn);
411 ksocknal_free_tx(tx);
412 if (lnetmsg != NULL) { /* KSOCK_MSG_NOOP go without lnetmsg */
413 lnetmsg->msg_health_status = hstatus;
414 lnet_finalize(lnetmsg, rc);
421 ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
425 while (!list_empty(txlist)) {
426 tx = list_entry(txlist->next, struct ksock_tx, tx_list);
428 if (error && tx->tx_lnetmsg != NULL) {
429 CNETERR("Deleting packet type %d len %d %s->%s\n",
430 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
431 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
432 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
433 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
435 CNETERR("Deleting noop packet\n");
438 list_del(&tx->tx_list);
440 if (tx->tx_hstatus == LNET_MSG_STATUS_OK) {
441 if (error == -ETIMEDOUT)
443 LNET_MSG_STATUS_LOCAL_TIMEOUT;
444 else if (error == -ENETDOWN ||
445 error == -EHOSTUNREACH ||
446 error == -ENETUNREACH ||
447 error == -ECONNREFUSED ||
448 error == -ECONNRESET)
449 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
451 * for all other errors we don't want to
455 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
458 LASSERT(atomic_read(&tx->tx_refcount) == 1);
459 ksocknal_tx_done(ni, tx, error);
464 ksocknal_check_zc_req(struct ksock_tx *tx)
466 struct ksock_conn *conn = tx->tx_conn;
467 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
469 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
470 * to ksnp_zc_req_list if some fragment of this message should be sent
471 * zero-copy. Our peer_ni will send an ACK containing this cookie when
472 * she has received this message to tell us we can signal completion.
473 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
474 * ksnp_zc_req_list. */
475 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
476 LASSERT (tx->tx_zc_capable);
478 tx->tx_zc_checked = 1;
480 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
481 !conn->ksnc_zc_capable)
484 /* assign cookie and queue tx to pending list, it will be released when
485 * a matching ack is received. See ksocknal_handle_zcack() */
487 ksocknal_tx_addref(tx);
489 spin_lock(&peer_ni->ksnp_lock);
491 /* ZC_REQ is going to be pinned to the peer_ni */
492 tx->tx_deadline = ktime_get_seconds() +
495 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
497 tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++;
499 if (peer_ni->ksnp_zc_next_cookie == 0)
500 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
502 list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list);
504 spin_unlock(&peer_ni->ksnp_lock);
508 ksocknal_uncheck_zc_req(struct ksock_tx *tx)
510 struct ksock_peer_ni *peer_ni = tx->tx_conn->ksnc_peer;
512 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
513 LASSERT(tx->tx_zc_capable);
515 tx->tx_zc_checked = 0;
517 spin_lock(&peer_ni->ksnp_lock);
519 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
520 /* Not waiting for an ACK */
521 spin_unlock(&peer_ni->ksnp_lock);
525 tx->tx_msg.ksm_zc_cookies[0] = 0;
526 list_del(&tx->tx_zc_list);
528 spin_unlock(&peer_ni->ksnp_lock);
530 ksocknal_tx_decref(tx);
534 ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
535 struct kvec *scratch_iov)
538 bool error_sim = false;
540 if (lnet_send_error_simulation(tx->tx_lnetmsg, &tx->tx_hstatus)) {
546 if (tx->tx_zc_capable && !tx->tx_zc_checked)
547 ksocknal_check_zc_req(tx);
549 rc = ksocknal_transmit(conn, tx, scratch_iov);
551 CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
553 if (tx->tx_resid == 0) {
554 /* Sent everything OK */
566 counter++; /* exponential backoff warnings */
567 if ((counter & (-counter)) == counter)
568 CWARN("%u ENOMEM tx %p (%u allocated)\n",
569 counter, conn, atomic_read(&libcfs_kmemory));
571 /* Queue on ksnd_enomem_conns for retry after a timeout */
572 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
574 /* enomem list takes over scheduler's ref... */
575 LASSERT(conn->ksnc_tx_scheduled);
576 list_add_tail(&conn->ksnc_tx_list,
577 &ksocknal_data.ksnd_enomem_conns);
578 if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY <
579 ksocknal_data.ksnd_reaper_waketime)
580 wake_up(&ksocknal_data.ksnd_reaper_waitq);
582 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
585 * set the health status of the message which determines
586 * whether we should retry the transmit
588 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
599 * set the health status of the message which determines
600 * whether we should retry the transmit
602 if (rc == -ETIMEDOUT)
603 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
605 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
608 if (!conn->ksnc_closing) {
611 LCONSOLE_WARN("Host %pI4h reset our connection "
612 "while we were sending data; it may have "
617 LCONSOLE_WARN("There was an unexpected network error "
618 "while writing to %pI4h: %d.\n",
619 &conn->ksnc_ipaddr, rc);
622 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
623 conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
624 &conn->ksnc_ipaddr, conn->ksnc_port);
627 if (tx->tx_zc_checked)
628 ksocknal_uncheck_zc_req(tx);
630 /* it's not an error if conn is being closed */
631 ksocknal_close_conn_and_siblings(conn,
632 (conn->ksnc_closing) ? 0 : rc);
638 ksocknal_launch_connection_locked(struct ksock_route *route)
641 /* called holding write lock on ksnd_global_lock */
643 LASSERT (!route->ksnr_scheduled);
644 LASSERT (!route->ksnr_connecting);
645 LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
647 route->ksnr_scheduled = 1; /* scheduling conn for connd */
648 ksocknal_route_addref(route); /* extra ref for connd */
650 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
652 list_add_tail(&route->ksnr_connd_list,
653 &ksocknal_data.ksnd_connd_routes);
654 wake_up(&ksocknal_data.ksnd_connd_waitq);
656 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
660 ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
662 struct ksock_route *route;
664 /* called holding write lock on ksnd_global_lock */
666 /* launch any/all connections that need it */
667 route = ksocknal_find_connectable_route_locked(peer_ni);
671 ksocknal_launch_connection_locked(route);
676 ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
678 struct list_head *tmp;
679 struct ksock_conn *conn;
680 struct ksock_conn *typed = NULL;
681 struct ksock_conn *fallback = NULL;
685 list_for_each(tmp, &peer_ni->ksnp_conns) {
686 struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
688 int nob = atomic_read(&c->ksnc_tx_nob) +
689 c->ksnc_sock->sk->sk_wmem_queued;
692 LASSERT (!c->ksnc_closing);
693 LASSERT (c->ksnc_proto != NULL &&
694 c->ksnc_proto->pro_match_tx != NULL);
696 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
701 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
704 case SOCKNAL_MATCH_YES: /* typed connection */
705 if (typed == NULL || tnob > nob ||
706 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
707 typed->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
713 case SOCKNAL_MATCH_MAY: /* fallback connection */
714 if (fallback == NULL || fnob > nob ||
715 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
716 fallback->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
724 /* prefer the typed selection */
725 conn = (typed != NULL) ? typed : fallback;
728 conn->ksnc_tx_last_post = ktime_get_seconds();
734 ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
736 conn->ksnc_proto->pro_pack(tx);
738 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
739 ksocknal_conn_addref(conn); /* +1 ref for tx */
744 ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
746 struct ksock_sched *sched = conn->ksnc_scheduler;
747 struct ksock_msg *msg = &tx->tx_msg;
748 struct ksock_tx *ztx = NULL;
751 /* called holding global lock (read or irq-write) and caller may
752 * not have dropped this lock between finding conn and calling me,
753 * so we don't need the {get,put}connsock dance to deref
755 LASSERT(!conn->ksnc_closing);
757 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
758 libcfs_id2str(conn->ksnc_peer->ksnp_id),
759 &conn->ksnc_ipaddr, conn->ksnc_port);
761 ksocknal_tx_prep(conn, tx);
763 /* Ensure the frags we've been given EXACTLY match the number of
764 * bytes we want to send. Many TCP/IP stacks disregard any total
765 * size parameters passed to them and just look at the frags.
767 * We always expect at least 1 mapped fragment containing the
768 * complete ksocknal message header.
770 LASSERT(lnet_iov_nob(tx->tx_niov, &tx->tx_hdr) +
771 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
772 (unsigned int)tx->tx_nob);
773 LASSERT(tx->tx_niov >= 1);
774 LASSERT(tx->tx_resid == tx->tx_nob);
776 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
777 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
779 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
781 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
782 spin_lock_bh(&sched->kss_lock);
784 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
785 /* First packet starts the timeout */
786 conn->ksnc_tx_deadline = ktime_get_seconds() +
788 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
789 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
790 conn->ksnc_tx_bufnob = 0;
791 smp_mb(); /* order with adding to tx_queue */
794 if (msg->ksm_type == KSOCK_MSG_NOOP) {
795 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
796 * on a normal packet so I don't need to send it */
797 LASSERT (msg->ksm_zc_cookies[1] != 0);
798 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
800 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
801 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
804 /* It's a normal packet - can it piggback a noop zc-ack that
805 * has been queued already? */
806 LASSERT (msg->ksm_zc_cookies[1] == 0);
807 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
809 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
810 /* ztx will be released later */
814 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
815 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
818 if (conn->ksnc_tx_ready && /* able to send */
819 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
820 /* +1 ref for scheduler */
821 ksocknal_conn_addref(conn);
822 list_add_tail(&conn->ksnc_tx_list,
823 &sched->kss_tx_conns);
824 conn->ksnc_tx_scheduled = 1;
825 wake_up(&sched->kss_waitq);
828 spin_unlock_bh(&sched->kss_lock);
833 ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni)
835 time64_t now = ktime_get_seconds();
836 struct list_head *tmp;
837 struct ksock_route *route;
839 list_for_each(tmp, &peer_ni->ksnp_routes) {
840 route = list_entry(tmp, struct ksock_route, ksnr_list);
842 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
844 if (route->ksnr_scheduled) /* connections being established */
847 /* all route types connected ? */
848 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
851 if (!(route->ksnr_retry_interval == 0 || /* first attempt */
852 now >= route->ksnr_timeout)) {
854 "Too soon to retry route %pI4h "
855 "(cnted %d, interval %lld, %lld secs later)\n",
857 route->ksnr_connected,
858 route->ksnr_retry_interval,
859 route->ksnr_timeout - now);
870 ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni)
872 struct list_head *tmp;
873 struct ksock_route *route;
875 list_for_each(tmp, &peer_ni->ksnp_routes) {
876 route = list_entry(tmp, struct ksock_route, ksnr_list);
878 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
880 if (route->ksnr_scheduled)
888 ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
889 struct lnet_process_id id)
891 struct ksock_peer_ni *peer_ni;
892 struct ksock_conn *conn;
897 LASSERT (tx->tx_conn == NULL);
899 g_lock = &ksocknal_data.ksnd_global_lock;
901 for (retry = 0;; retry = 1) {
903 peer_ni = ksocknal_find_peer_locked(ni, id);
904 if (peer_ni != NULL) {
905 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) {
906 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
908 /* I've got no routes that need to be
909 * connecting and I do have an actual
911 ksocknal_queue_tx_locked (tx, conn);
918 /* I'll need a write lock... */
921 write_lock_bh(g_lock);
923 peer_ni = ksocknal_find_peer_locked(ni, id);
927 write_unlock_bh(g_lock);
929 if ((id.pid & LNET_PID_USERFLAG) != 0) {
930 CERROR("Refusing to create a connection to "
931 "userspace process %s\n", libcfs_id2str(id));
932 return -EHOSTUNREACH;
936 CERROR("Can't find peer_ni %s\n", libcfs_id2str(id));
937 return -EHOSTUNREACH;
940 rc = ksocknal_add_peer(ni, id,
941 LNET_NIDADDR(id.nid),
942 lnet_acceptor_port());
944 CERROR("Can't add peer_ni %s: %d\n",
945 libcfs_id2str(id), rc);
950 ksocknal_launch_all_connections_locked(peer_ni);
952 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
954 /* Connection exists; queue message on it */
955 ksocknal_queue_tx_locked (tx, conn);
956 write_unlock_bh(g_lock);
960 if (peer_ni->ksnp_accepting > 0 ||
961 ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
962 /* the message is going to be pinned to the peer_ni */
963 tx->tx_deadline = ktime_get_seconds() +
966 /* Queue the message until a connection is established */
967 list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
968 write_unlock_bh(g_lock);
972 write_unlock_bh(g_lock);
974 /* NB Routes may be ignored if connections to them failed recently */
975 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
976 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
977 return (-EHOSTUNREACH);
981 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
983 /* '1' for consistency with code that checks !mpflag to restore */
984 unsigned int mpflag = 1;
985 int type = lntmsg->msg_type;
986 struct lnet_process_id target = lntmsg->msg_target;
987 unsigned int payload_niov = lntmsg->msg_niov;
988 struct bio_vec *payload_kiov = lntmsg->msg_kiov;
989 unsigned int payload_offset = lntmsg->msg_offset;
990 unsigned int payload_nob = lntmsg->msg_len;
995 /* NB 'private' is different depending on what we're sending.
996 * Just ignore it... */
998 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
999 payload_nob, payload_niov, libcfs_id2str(target));
1001 LASSERT (payload_nob == 0 || payload_niov > 0);
1002 LASSERT (payload_niov <= LNET_MAX_IOV);
1003 LASSERT (!in_interrupt ());
1005 desc_size = offsetof(struct ksock_tx,
1006 tx_payload[payload_niov]);
1008 if (lntmsg->msg_vmflush)
1009 mpflag = memalloc_noreclaim_save();
1011 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
1013 CERROR("Can't allocate tx desc type %d size %d\n",
1015 if (lntmsg->msg_vmflush)
1016 memalloc_noreclaim_restore(mpflag);
1020 tx->tx_conn = NULL; /* set when assigned a conn */
1021 tx->tx_lnetmsg = lntmsg;
1024 tx->tx_kiov = tx->tx_payload;
1025 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
1026 payload_niov, payload_kiov,
1027 payload_offset, payload_nob);
1029 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
1030 tx->tx_zc_capable = 1;
1032 tx->tx_msg.ksm_csum = 0;
1033 tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
1034 tx->tx_msg.ksm_zc_cookies[0] = 0;
1035 tx->tx_msg.ksm_zc_cookies[1] = 0;
1037 /* The first fragment will be set later in pro_pack */
1038 rc = ksocknal_launch_packet(ni, tx, target);
1040 * We can't test lntsmg->msg_vmflush again as lntmsg may
1044 memalloc_noreclaim_restore(mpflag);
1049 lntmsg->msg_health_status = tx->tx_hstatus;
1050 ksocknal_free_tx(tx);
1055 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1057 struct task_struct *task = kthread_run(fn, arg, name);
1060 return PTR_ERR(task);
1062 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1063 ksocknal_data.ksnd_nthreads++;
1064 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1069 ksocknal_thread_fini (void)
1071 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1072 if (--ksocknal_data.ksnd_nthreads == 0)
1073 wake_up_var(&ksocknal_data.ksnd_nthreads);
1074 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1078 ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
1080 static char ksocknal_slop_buffer[4096];
1085 LASSERT(conn->ksnc_proto != NULL);
1087 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1088 /* Remind the socket to ack eagerly... */
1089 ksocknal_lib_eager_ack(conn);
1092 if (nob_to_skip == 0) { /* right at next packet boundary now */
1093 conn->ksnc_rx_started = 0;
1094 smp_mb(); /* racing with timeout thread */
1096 switch (conn->ksnc_proto->pro_version) {
1097 case KSOCK_PROTO_V2:
1098 case KSOCK_PROTO_V3:
1099 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1100 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1101 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
1103 conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
1104 conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
1105 conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
1108 case KSOCK_PROTO_V1:
1109 /* Receiving bare struct lnet_hdr */
1110 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1111 conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr);
1112 conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
1114 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1115 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1116 conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr);
1122 conn->ksnc_rx_niov = 1;
1124 conn->ksnc_rx_kiov = NULL;
1125 conn->ksnc_rx_nkiov = 0;
1126 conn->ksnc_rx_csum = ~0;
1130 /* Set up to skip as much as possible now. If there's more left
1131 * (ran out of iov entries) we'll get called again */
1133 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1134 conn->ksnc_rx_nob_left = nob_to_skip;
1135 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1140 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
1142 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1143 conn->ksnc_rx_iov[niov].iov_len = nob;
1148 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1149 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
1151 conn->ksnc_rx_niov = niov;
1152 conn->ksnc_rx_kiov = NULL;
1153 conn->ksnc_rx_nkiov = 0;
1154 conn->ksnc_rx_nob_wanted = skipped;
1159 ksocknal_process_receive(struct ksock_conn *conn,
1160 struct page **rx_scratch_pgs,
1161 struct kvec *scratch_iov)
1163 struct lnet_hdr *lhdr;
1164 struct lnet_process_id *id;
1167 LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
1169 /* NB: sched lock NOT held */
1170 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1171 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1172 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1173 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1174 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1176 if (conn->ksnc_rx_nob_wanted != 0) {
1177 rc = ksocknal_receive(conn, rx_scratch_pgs,
1181 struct lnet_process_id ksnp_id;
1183 ksnp_id = conn->ksnc_peer->ksnp_id;
1185 LASSERT(rc != -EAGAIN);
1187 CDEBUG(D_NET, "[%p] EOF from %s "
1188 "ip %pI4h:%d\n", conn,
1189 libcfs_id2str(ksnp_id),
1192 else if (!conn->ksnc_closing)
1193 CERROR("[%p] Error %d on read from %s "
1194 "ip %pI4h:%d\n", conn, rc,
1195 libcfs_id2str(ksnp_id),
1199 /* it's not an error if conn is being closed */
1200 ksocknal_close_conn_and_siblings (conn,
1201 (conn->ksnc_closing) ? 0 : rc);
1202 return (rc == 0 ? -ESHUTDOWN : rc);
1205 if (conn->ksnc_rx_nob_wanted != 0) {
1210 switch (conn->ksnc_rx_state) {
1211 case SOCKNAL_RX_KSM_HEADER:
1212 if (conn->ksnc_flip) {
1213 __swab32s(&conn->ksnc_msg.ksm_type);
1214 __swab32s(&conn->ksnc_msg.ksm_csum);
1215 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1216 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1219 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1220 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1221 CERROR("%s: Unknown message type: %x\n",
1222 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1223 conn->ksnc_msg.ksm_type);
1224 ksocknal_new_packet(conn, 0);
1225 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1229 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1230 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1231 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1232 /* NOOP Checksum error */
1233 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1234 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1235 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1236 ksocknal_new_packet(conn, 0);
1237 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1241 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1244 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1246 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1247 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1249 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1250 conn->ksnc_msg.ksm_zc_cookies[1]);
1253 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1254 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1255 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1256 ksocknal_new_packet(conn, 0);
1257 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1262 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1263 ksocknal_new_packet (conn, 0);
1264 return 0; /* NOOP is done and just return */
1267 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1268 conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
1269 conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
1271 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1272 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1273 conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
1275 conn->ksnc_rx_niov = 1;
1276 conn->ksnc_rx_kiov = NULL;
1277 conn->ksnc_rx_nkiov = 0;
1279 goto again; /* read lnet header now */
1281 case SOCKNAL_RX_LNET_HEADER:
1282 /* unpack message header */
1283 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1285 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1286 /* Userspace peer_ni */
1287 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1288 id = &conn->ksnc_peer->ksnp_id;
1290 /* Substitute process ID assigned at connection time */
1291 lhdr->src_pid = cpu_to_le32(id->pid);
1292 lhdr->src_nid = cpu_to_le64(id->nid);
1295 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1296 ksocknal_conn_addref(conn); /* ++ref while parsing */
1298 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1299 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1300 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1302 /* I just received garbage: give up on this conn */
1303 ksocknal_new_packet(conn, 0);
1304 ksocknal_close_conn_and_siblings (conn, rc);
1305 ksocknal_conn_decref(conn);
1309 /* I'm racing with ksocknal_recv() */
1310 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1311 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1313 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1316 /* ksocknal_recv() got called */
1319 case SOCKNAL_RX_LNET_PAYLOAD:
1320 /* payload all received */
1323 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1324 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1325 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1326 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1327 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1328 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1332 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1333 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1335 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1336 id = &conn->ksnc_peer->ksnp_id;
1338 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1339 conn->ksnc_msg.ksm_zc_cookies[0],
1340 *ksocknal_tunables.ksnd_nonblk_zcack ||
1341 le64_to_cpu(lhdr->src_nid) != id->nid);
1344 if (rc && conn->ksnc_lnet_msg)
1345 conn->ksnc_lnet_msg->msg_health_status =
1346 LNET_MSG_STATUS_REMOTE_ERROR;
1347 lnet_finalize(conn->ksnc_lnet_msg, rc);
1350 ksocknal_new_packet(conn, 0);
1351 ksocknal_close_conn_and_siblings (conn, rc);
1356 case SOCKNAL_RX_SLOP:
1357 /* starting new packet? */
1358 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1359 return 0; /* come back later */
1360 goto again; /* try to finish reading slop now */
1368 return (-EINVAL); /* keep gcc happy */
1372 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
1373 int delayed, unsigned int niov,
1374 struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
1377 struct ksock_conn *conn = private;
1378 struct ksock_sched *sched = conn->ksnc_scheduler;
1380 LASSERT (mlen <= rlen);
1381 LASSERT (niov <= LNET_MAX_IOV);
1383 conn->ksnc_lnet_msg = msg;
1384 conn->ksnc_rx_nob_wanted = mlen;
1385 conn->ksnc_rx_nob_left = rlen;
1388 conn->ksnc_rx_nkiov = 0;
1389 conn->ksnc_rx_kiov = NULL;
1390 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1391 conn->ksnc_rx_niov = 0;
1393 conn->ksnc_rx_niov = 0;
1394 conn->ksnc_rx_iov = NULL;
1395 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1396 conn->ksnc_rx_nkiov =
1397 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1398 niov, kiov, offset, mlen);
1402 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1403 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1405 LASSERT (conn->ksnc_rx_scheduled);
1407 spin_lock_bh(&sched->kss_lock);
1409 switch (conn->ksnc_rx_state) {
1410 case SOCKNAL_RX_PARSE_WAIT:
1411 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1412 wake_up(&sched->kss_waitq);
1413 LASSERT(conn->ksnc_rx_ready);
1416 case SOCKNAL_RX_PARSE:
1417 /* scheduler hasn't noticed I'm parsing yet */
1421 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1423 spin_unlock_bh(&sched->kss_lock);
1424 ksocknal_conn_decref(conn);
1429 ksocknal_sched_cansleep(struct ksock_sched *sched)
1433 spin_lock_bh(&sched->kss_lock);
1435 rc = (!ksocknal_data.ksnd_shuttingdown &&
1436 list_empty(&sched->kss_rx_conns) &&
1437 list_empty(&sched->kss_tx_conns));
1439 spin_unlock_bh(&sched->kss_lock);
1443 int ksocknal_scheduler(void *arg)
1445 struct ksock_sched *sched;
1446 struct ksock_conn *conn;
1447 struct ksock_tx *tx;
1450 long id = (long)arg;
1451 struct page **rx_scratch_pgs;
1452 struct kvec *scratch_iov;
1454 sched = ksocknal_data.ksnd_schedulers[KSOCK_THREAD_CPT(id)];
1456 LIBCFS_CPT_ALLOC(rx_scratch_pgs, lnet_cpt_table(), sched->kss_cpt,
1457 sizeof(*rx_scratch_pgs) * LNET_MAX_IOV);
1458 if (!rx_scratch_pgs) {
1459 CERROR("Unable to allocate scratch pages\n");
1463 LIBCFS_CPT_ALLOC(scratch_iov, lnet_cpt_table(), sched->kss_cpt,
1464 sizeof(*scratch_iov) * LNET_MAX_IOV);
1466 CERROR("Unable to allocate scratch iov\n");
1470 rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
1472 CWARN("Can't set CPU partition affinity to %d: %d\n",
1473 sched->kss_cpt, rc);
1476 spin_lock_bh(&sched->kss_lock);
1478 while (!ksocknal_data.ksnd_shuttingdown) {
1479 int did_something = 0;
1481 /* Ensure I progress everything semi-fairly */
1483 if (!list_empty(&sched->kss_rx_conns)) {
1484 conn = list_entry(sched->kss_rx_conns.next,
1485 struct ksock_conn, ksnc_rx_list);
1486 list_del(&conn->ksnc_rx_list);
1488 LASSERT(conn->ksnc_rx_scheduled);
1489 LASSERT(conn->ksnc_rx_ready);
1491 /* clear rx_ready in case receive isn't complete.
1492 * Do it BEFORE we call process_recv, since
1493 * data_ready can set it any time after we release
1495 conn->ksnc_rx_ready = 0;
1496 spin_unlock_bh(&sched->kss_lock);
1498 rc = ksocknal_process_receive(conn, rx_scratch_pgs,
1501 spin_lock_bh(&sched->kss_lock);
1503 /* I'm the only one that can clear this flag */
1504 LASSERT(conn->ksnc_rx_scheduled);
1506 /* Did process_receive get everything it wanted? */
1508 conn->ksnc_rx_ready = 1;
1510 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1511 /* Conn blocked waiting for ksocknal_recv()
1512 * I change its state (under lock) to signal
1513 * it can be rescheduled */
1514 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1515 } else if (conn->ksnc_rx_ready) {
1516 /* reschedule for rx */
1517 list_add_tail(&conn->ksnc_rx_list,
1518 &sched->kss_rx_conns);
1520 conn->ksnc_rx_scheduled = 0;
1522 ksocknal_conn_decref(conn);
1528 if (!list_empty(&sched->kss_tx_conns)) {
1531 list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
1533 conn = list_entry(sched->kss_tx_conns.next,
1534 struct ksock_conn, ksnc_tx_list);
1535 list_del(&conn->ksnc_tx_list);
1537 LASSERT(conn->ksnc_tx_scheduled);
1538 LASSERT(conn->ksnc_tx_ready);
1539 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1541 tx = list_entry(conn->ksnc_tx_queue.next,
1542 struct ksock_tx, tx_list);
1544 if (conn->ksnc_tx_carrier == tx)
1545 ksocknal_next_tx_carrier(conn);
1547 /* dequeue now so empty list => more to send */
1548 list_del(&tx->tx_list);
1550 /* Clear tx_ready in case send isn't complete. Do
1551 * it BEFORE we call process_transmit, since
1552 * write_space can set it any time after we release
1554 conn->ksnc_tx_ready = 0;
1555 spin_unlock_bh(&sched->kss_lock);
1557 if (!list_empty(&zlist)) {
1558 /* free zombie noop txs, it's fast because
1559 * noop txs are just put in freelist */
1560 ksocknal_txlist_done(NULL, &zlist, 0);
1563 rc = ksocknal_process_transmit(conn, tx, scratch_iov);
1565 if (rc == -ENOMEM || rc == -EAGAIN) {
1566 /* Incomplete send: replace tx on HEAD of tx_queue */
1567 spin_lock_bh(&sched->kss_lock);
1568 list_add(&tx->tx_list,
1569 &conn->ksnc_tx_queue);
1571 /* Complete send; tx -ref */
1572 ksocknal_tx_decref(tx);
1574 spin_lock_bh(&sched->kss_lock);
1575 /* assume space for more */
1576 conn->ksnc_tx_ready = 1;
1579 if (rc == -ENOMEM) {
1580 /* Do nothing; after a short timeout, this
1581 * conn will be reposted on kss_tx_conns. */
1582 } else if (conn->ksnc_tx_ready &&
1583 !list_empty(&conn->ksnc_tx_queue)) {
1584 /* reschedule for tx */
1585 list_add_tail(&conn->ksnc_tx_list,
1586 &sched->kss_tx_conns);
1588 conn->ksnc_tx_scheduled = 0;
1590 ksocknal_conn_decref(conn);
1595 if (!did_something || /* nothing to do */
1596 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1597 spin_unlock_bh(&sched->kss_lock);
1601 if (!did_something) { /* wait for something to do */
1602 rc = wait_event_interruptible_exclusive(
1604 !ksocknal_sched_cansleep(sched));
1610 spin_lock_bh(&sched->kss_lock);
1614 spin_unlock_bh(&sched->kss_lock);
1615 CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
1616 CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
1617 ksocknal_thread_fini();
1622 * Add connection to kss_rx_conns of scheduler
1623 * and wakeup the scheduler.
1625 void ksocknal_read_callback(struct ksock_conn *conn)
1627 struct ksock_sched *sched;
1630 sched = conn->ksnc_scheduler;
1632 spin_lock_bh(&sched->kss_lock);
1634 conn->ksnc_rx_ready = 1;
1636 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1637 list_add_tail(&conn->ksnc_rx_list,
1638 &sched->kss_rx_conns);
1639 conn->ksnc_rx_scheduled = 1;
1640 /* extra ref for scheduler */
1641 ksocknal_conn_addref(conn);
1643 wake_up (&sched->kss_waitq);
1645 spin_unlock_bh(&sched->kss_lock);
1651 * Add connection to kss_tx_conns of scheduler
1652 * and wakeup the scheduler.
1654 void ksocknal_write_callback(struct ksock_conn *conn)
1656 struct ksock_sched *sched;
1659 sched = conn->ksnc_scheduler;
1661 spin_lock_bh(&sched->kss_lock);
1663 conn->ksnc_tx_ready = 1;
1665 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1666 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1667 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1668 conn->ksnc_tx_scheduled = 1;
1669 /* extra ref for scheduler */
1670 ksocknal_conn_addref(conn);
1672 wake_up(&sched->kss_waitq);
1675 spin_unlock_bh(&sched->kss_lock);
1680 static const struct ksock_proto *
1681 ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
1685 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1686 version = hello->kshm_version;
1687 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1688 version = __swab32(hello->kshm_version);
1691 #if SOCKNAL_VERSION_DEBUG
1692 if (*ksocknal_tunables.ksnd_protocol == 1)
1695 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1696 version == KSOCK_PROTO_V3)
1699 if (version == KSOCK_PROTO_V2)
1700 return &ksocknal_protocol_v2x;
1702 if (version == KSOCK_PROTO_V3)
1703 return &ksocknal_protocol_v3x;
1708 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1709 struct lnet_magicversion *hmv;
1711 BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
1712 offsetof(struct ksock_hello_msg, kshm_src_nid));
1714 hmv = (struct lnet_magicversion *)hello;
1716 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1717 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1718 return &ksocknal_protocol_v1x;
1725 ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1726 lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
1728 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1729 struct ksock_net *net = (struct ksock_net *)ni->ni_data;
1731 LASSERT(hello->kshm_nips <= LNET_INTERFACES_NUM);
1733 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1734 LASSERT(conn->ksnc_proto != NULL);
1736 hello->kshm_src_nid = ni->ni_nid;
1737 hello->kshm_dst_nid = peer_nid;
1738 hello->kshm_src_pid = the_lnet.ln_pid;
1740 hello->kshm_src_incarnation = net->ksnn_incarnation;
1741 hello->kshm_ctype = conn->ksnc_type;
1743 return conn->ksnc_proto->pro_send_hello(conn, hello);
1747 ksocknal_invert_type(int type)
1751 case SOCKLND_CONN_ANY:
1752 case SOCKLND_CONN_CONTROL:
1754 case SOCKLND_CONN_BULK_IN:
1755 return SOCKLND_CONN_BULK_OUT;
1756 case SOCKLND_CONN_BULK_OUT:
1757 return SOCKLND_CONN_BULK_IN;
1759 return (SOCKLND_CONN_NONE);
1764 ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1765 struct ksock_hello_msg *hello,
1766 struct lnet_process_id *peerid,
1769 /* Return < 0 fatal error
1771 * EALREADY lost connection race
1772 * EPROTO protocol version mismatch
1774 struct socket *sock = conn->ksnc_sock;
1775 int active = (conn->ksnc_proto != NULL);
1779 const struct ksock_proto *proto;
1780 struct lnet_process_id recv_id;
1782 /* socket type set on active connections - not set on passive */
1783 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1785 timeout = active ? ksocknal_timeout() :
1786 lnet_acceptor_timeout();
1788 rc = lnet_sock_read(sock, &hello->kshm_magic,
1789 sizeof(hello->kshm_magic), timeout);
1791 CERROR("Error %d reading HELLO from %pI4h\n",
1792 rc, &conn->ksnc_ipaddr);
1797 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1798 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1799 hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
1800 /* Unexpected magic! */
1801 CERROR ("Bad magic(1) %#08x (%#08x expected) from "
1802 "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
1803 LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr);
1807 rc = lnet_sock_read(sock, &hello->kshm_version,
1808 sizeof(hello->kshm_version), timeout);
1810 CERROR("Error %d reading HELLO from %pI4h\n",
1811 rc, &conn->ksnc_ipaddr);
1816 proto = ksocknal_parse_proto_version(hello);
1817 if (proto == NULL) {
1819 /* unknown protocol from peer_ni, tell peer_ni my protocol */
1820 conn->ksnc_proto = &ksocknal_protocol_v3x;
1821 #if SOCKNAL_VERSION_DEBUG
1822 if (*ksocknal_tunables.ksnd_protocol == 2)
1823 conn->ksnc_proto = &ksocknal_protocol_v2x;
1824 else if (*ksocknal_tunables.ksnd_protocol == 1)
1825 conn->ksnc_proto = &ksocknal_protocol_v1x;
1827 hello->kshm_nips = 0;
1828 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1831 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1832 conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr);
1837 proto_match = (conn->ksnc_proto == proto);
1838 conn->ksnc_proto = proto;
1840 /* receive the rest of hello message anyway */
1841 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1843 CERROR("Error %d reading or checking hello from from %pI4h\n",
1844 rc, &conn->ksnc_ipaddr);
1849 *incarnation = hello->kshm_src_incarnation;
1851 if (hello->kshm_src_nid == LNET_NID_ANY) {
1852 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
1853 &conn->ksnc_ipaddr);
1858 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1859 /* Userspace NAL assigns peer_ni process ID from socket */
1860 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1861 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1863 recv_id.nid = hello->kshm_src_nid;
1864 recv_id.pid = hello->kshm_src_pid;
1870 /* peer_ni determines type */
1871 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1872 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1873 CERROR("Unexpected type %d from %s ip %pI4h\n",
1874 hello->kshm_ctype, libcfs_id2str(*peerid),
1875 &conn->ksnc_ipaddr);
1881 if (peerid->pid != recv_id.pid ||
1882 peerid->nid != recv_id.nid) {
1883 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
1884 " %pI4h, but they claimed they were "
1885 "%s; please check your Lustre "
1887 libcfs_id2str(*peerid),
1889 libcfs_id2str(recv_id));
1893 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1894 /* Possible protocol mismatch or I lost the connection race */
1895 return proto_match ? EALREADY : EPROTO;
1898 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1899 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
1900 conn->ksnc_type, libcfs_id2str(*peerid),
1909 ksocknal_connect(struct ksock_route *route)
1912 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
1915 struct socket *sock;
1917 int retry_later = 0;
1920 deadline = ktime_get_seconds() + ksocknal_timeout();
1922 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1924 LASSERT (route->ksnr_scheduled);
1925 LASSERT (!route->ksnr_connecting);
1927 route->ksnr_connecting = 1;
1930 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1932 /* stop connecting if peer_ni/route got closed under me, or
1933 * route got connected while queued */
1934 if (peer_ni->ksnp_closing || route->ksnr_deleted ||
1940 /* reschedule if peer_ni is connecting to me */
1941 if (peer_ni->ksnp_accepting > 0) {
1943 "peer_ni %s(%d) already connecting to me, retry later.\n",
1944 libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
1948 if (retry_later) /* needs reschedule */
1951 if ((wanted & BIT(SOCKLND_CONN_ANY)) != 0) {
1952 type = SOCKLND_CONN_ANY;
1953 } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
1954 type = SOCKLND_CONN_CONTROL;
1955 } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0) {
1956 type = SOCKLND_CONN_BULK_IN;
1958 LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
1959 type = SOCKLND_CONN_BULK_OUT;
1962 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1964 if (ktime_get_seconds() >= deadline) {
1966 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1972 sock = lnet_connect(peer_ni->ksnp_id.nid,
1973 route->ksnr_myiface,
1974 route->ksnr_ipaddr, route->ksnr_port,
1975 peer_ni->ksnp_ni->ni_net_ns);
1981 rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
1983 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1989 /* A +ve RC means I have to retry because I lost the connection
1990 * race or I have to renegotiate protocol version */
1991 retry_later = (rc != 0);
1993 CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
1994 libcfs_nid2str(peer_ni->ksnp_id.nid));
1996 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1999 route->ksnr_scheduled = 0;
2000 route->ksnr_connecting = 0;
2003 /* re-queue for attention; this frees me up to handle
2004 * the peer_ni's incoming connection request */
2006 if (rc == EALREADY ||
2007 (rc == 0 && peer_ni->ksnp_accepting > 0)) {
2008 /* We want to introduce a delay before next
2009 * attempt to connect if we lost conn race,
2010 * but the race is resolved quickly usually,
2011 * so min_reconnectms should be good heuristic */
2012 route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000;
2013 route->ksnr_timeout = ktime_get_seconds() +
2014 route->ksnr_retry_interval;
2017 ksocknal_launch_connection_locked(route);
2020 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2024 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2026 route->ksnr_scheduled = 0;
2027 route->ksnr_connecting = 0;
2029 /* This is a retry rather than a new connection */
2030 route->ksnr_retry_interval *= 2;
2031 route->ksnr_retry_interval =
2032 max_t(time64_t, route->ksnr_retry_interval,
2033 *ksocknal_tunables.ksnd_min_reconnectms / 1000);
2034 route->ksnr_retry_interval =
2035 min_t(time64_t, route->ksnr_retry_interval,
2036 *ksocknal_tunables.ksnd_max_reconnectms / 1000);
2038 LASSERT(route->ksnr_retry_interval);
2039 route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval;
2041 if (!list_empty(&peer_ni->ksnp_tx_queue) &&
2042 peer_ni->ksnp_accepting == 0 &&
2043 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
2044 struct ksock_conn *conn;
2046 /* ksnp_tx_queue is queued on a conn on successful
2047 * connection for V1.x and V2.x */
2048 if (!list_empty(&peer_ni->ksnp_conns)) {
2049 conn = list_entry(peer_ni->ksnp_conns.next,
2050 struct ksock_conn, ksnc_list);
2051 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
2054 /* take all the blocked packets while I've got the lock and
2055 * complete below... */
2056 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
2059 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2061 ksocknal_peer_failed(peer_ni);
2062 ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, rc);
2067 * check whether we need to create more connds.
2068 * It will try to create new thread if it's necessary, @timeout can
2069 * be updated if failed to create, so caller wouldn't keep try while
2070 * running out of resource.
2073 ksocknal_connd_check_start(time64_t sec, long *timeout)
2077 int total = ksocknal_data.ksnd_connd_starting +
2078 ksocknal_data.ksnd_connd_running;
2080 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2081 /* still in initializing */
2085 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2086 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2087 /* can't create more connd, or still have enough
2088 * threads to handle more connecting */
2092 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2093 /* no pending connecting request */
2097 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2098 /* may run out of resource, retry later */
2099 *timeout = cfs_time_seconds(1);
2103 if (ksocknal_data.ksnd_connd_starting > 0) {
2104 /* serialize starting to avoid flood */
2108 ksocknal_data.ksnd_connd_starting_stamp = sec;
2109 ksocknal_data.ksnd_connd_starting++;
2110 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2112 /* NB: total is the next id */
2113 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2114 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2116 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2121 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2122 ksocknal_data.ksnd_connd_starting--;
2123 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2129 * check whether current thread can exit, it will return 1 if there are too
2130 * many threads and no creating in past 120 seconds.
2131 * Also, this function may update @timeout to make caller come back
2132 * again to recheck these conditions.
2135 ksocknal_connd_check_stop(time64_t sec, long *timeout)
2139 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2140 /* still in initializing */
2144 if (ksocknal_data.ksnd_connd_starting > 0) {
2145 /* in progress of starting new thread */
2149 if (ksocknal_data.ksnd_connd_running <=
2150 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2154 /* created thread in past 120 seconds? */
2155 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2156 SOCKNAL_CONND_TIMEOUT - sec);
2158 *timeout = (val > 0) ? cfs_time_seconds(val) :
2159 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2163 /* no creating in past 120 seconds */
2165 return ksocknal_data.ksnd_connd_running >
2166 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2169 /* Go through connd_routes queue looking for a route that we can process
2170 * right now, @timeout_p can be updated if we need to come back later */
2171 static struct ksock_route *
2172 ksocknal_connd_get_route_locked(signed long *timeout_p)
2174 time64_t now = ktime_get_seconds();
2175 struct ksock_route *route;
2177 /* connd_routes can contain both pending and ordinary routes */
2178 list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2181 if (route->ksnr_retry_interval == 0 ||
2182 now >= route->ksnr_timeout)
2185 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2186 *timeout_p > cfs_time_seconds(route->ksnr_timeout - now))
2187 *timeout_p = cfs_time_seconds(route->ksnr_timeout - now);
2194 ksocknal_connd(void *arg)
2196 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2197 struct ksock_connreq *cr;
2198 wait_queue_entry_t wait;
2202 init_waitqueue_entry(&wait, current);
2204 spin_lock_bh(connd_lock);
2206 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2207 ksocknal_data.ksnd_connd_starting--;
2208 ksocknal_data.ksnd_connd_running++;
2210 while (!ksocknal_data.ksnd_shuttingdown) {
2211 struct ksock_route *route = NULL;
2212 time64_t sec = ktime_get_real_seconds();
2213 long timeout = MAX_SCHEDULE_TIMEOUT;
2214 int dropped_lock = 0;
2216 if (ksocknal_connd_check_stop(sec, &timeout)) {
2217 /* wakeup another one to check stop */
2218 wake_up(&ksocknal_data.ksnd_connd_waitq);
2222 if (ksocknal_connd_check_start(sec, &timeout)) {
2223 /* created new thread */
2227 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2228 /* Connection accepted by the listener */
2229 cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
2230 struct ksock_connreq, ksncr_list);
2232 list_del(&cr->ksncr_list);
2233 spin_unlock_bh(connd_lock);
2236 ksocknal_create_conn(cr->ksncr_ni, NULL,
2237 cr->ksncr_sock, SOCKLND_CONN_NONE);
2238 lnet_ni_decref(cr->ksncr_ni);
2239 LIBCFS_FREE(cr, sizeof(*cr));
2241 spin_lock_bh(connd_lock);
2244 /* Only handle an outgoing connection request if there
2245 * is a thread left to handle incoming connections and
2246 * create new connd */
2247 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2248 ksocknal_data.ksnd_connd_running) {
2249 route = ksocknal_connd_get_route_locked(&timeout);
2251 if (route != NULL) {
2252 list_del(&route->ksnr_connd_list);
2253 ksocknal_data.ksnd_connd_connecting++;
2254 spin_unlock_bh(connd_lock);
2257 if (ksocknal_connect(route)) {
2258 /* consecutive retry */
2259 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2260 CWARN("massive consecutive "
2261 "re-connecting to %pI4h\n",
2262 &route->ksnr_ipaddr);
2269 ksocknal_route_decref(route);
2271 spin_lock_bh(connd_lock);
2272 ksocknal_data.ksnd_connd_connecting--;
2276 if (++nloops < SOCKNAL_RESCHED)
2278 spin_unlock_bh(connd_lock);
2281 spin_lock_bh(connd_lock);
2285 /* Nothing to do for 'timeout' */
2286 set_current_state(TASK_INTERRUPTIBLE);
2287 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2288 spin_unlock_bh(connd_lock);
2291 schedule_timeout(timeout);
2293 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2294 spin_lock_bh(connd_lock);
2296 ksocknal_data.ksnd_connd_running--;
2297 spin_unlock_bh(connd_lock);
2299 ksocknal_thread_fini();
2303 static struct ksock_conn *
2304 ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
2306 /* We're called with a shared lock on ksnd_global_lock */
2307 struct ksock_conn *conn;
2308 struct list_head *ctmp;
2309 struct ksock_tx *tx;
2311 list_for_each(ctmp, &peer_ni->ksnp_conns) {
2314 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
2316 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2317 LASSERT (!conn->ksnc_closing);
2319 error = conn->ksnc_sock->sk->sk_err;
2321 ksocknal_conn_addref(conn);
2325 CNETERR("A connection with %s "
2326 "(%pI4h:%d) was reset; "
2327 "it may have rebooted.\n",
2328 libcfs_id2str(peer_ni->ksnp_id),
2333 CNETERR("A connection with %s "
2334 "(%pI4h:%d) timed out; the "
2335 "network or node may be down.\n",
2336 libcfs_id2str(peer_ni->ksnp_id),
2341 CNETERR("An unexpected network error %d "
2343 "(%pI4h:%d\n", error,
2344 libcfs_id2str(peer_ni->ksnp_id),
2353 if (conn->ksnc_rx_started &&
2354 ktime_get_seconds() >= conn->ksnc_rx_deadline) {
2355 /* Timed out incomplete incoming message */
2356 ksocknal_conn_addref(conn);
2357 CNETERR("Timeout receiving from %s (%pI4h:%d), "
2358 "state %d wanted %d left %d\n",
2359 libcfs_id2str(peer_ni->ksnp_id),
2362 conn->ksnc_rx_state,
2363 conn->ksnc_rx_nob_wanted,
2364 conn->ksnc_rx_nob_left);
2368 if ((!list_empty(&conn->ksnc_tx_queue) ||
2369 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2370 ktime_get_seconds() >= conn->ksnc_tx_deadline) {
2371 /* Timed out messages queued for sending or
2372 * buffered in the socket's send buffer */
2373 ksocknal_conn_addref(conn);
2374 list_for_each_entry(tx, &conn->ksnc_tx_queue,
2377 LNET_MSG_STATUS_LOCAL_TIMEOUT;
2378 CNETERR("Timeout sending data to %s (%pI4h:%d) "
2379 "the network or that node may be down.\n",
2380 libcfs_id2str(peer_ni->ksnp_id),
2381 &conn->ksnc_ipaddr, conn->ksnc_port);
2390 ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
2392 struct ksock_tx *tx;
2393 LIST_HEAD(stale_txs);
2395 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2397 while (!list_empty(&peer_ni->ksnp_tx_queue)) {
2398 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2399 struct ksock_tx, tx_list);
2401 if (ktime_get_seconds() < tx->tx_deadline)
2404 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2406 list_move_tail(&tx->tx_list, &stale_txs);
2409 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2411 ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, -ETIMEDOUT);
2415 ksocknal_send_keepalive_locked(struct ksock_peer_ni *peer_ni)
2416 __must_hold(&ksocknal_data.ksnd_global_lock)
2418 struct ksock_sched *sched;
2419 struct ksock_conn *conn;
2420 struct ksock_tx *tx;
2422 /* last_alive will be updated by create_conn */
2423 if (list_empty(&peer_ni->ksnp_conns))
2426 if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x)
2429 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2430 ktime_get_seconds() < peer_ni->ksnp_last_alive +
2431 *ksocknal_tunables.ksnd_keepalive)
2434 if (ktime_get_seconds() < peer_ni->ksnp_send_keepalive)
2437 /* retry 10 secs later, so we wouldn't put pressure
2438 * on this peer_ni if we failed to send keepalive this time */
2439 peer_ni->ksnp_send_keepalive = ktime_get_seconds() + 10;
2441 conn = ksocknal_find_conn_locked(peer_ni, NULL, 1);
2443 sched = conn->ksnc_scheduler;
2445 spin_lock_bh(&sched->kss_lock);
2446 if (!list_empty(&conn->ksnc_tx_queue)) {
2447 spin_unlock_bh(&sched->kss_lock);
2448 /* there is an queued ACK, don't need keepalive */
2452 spin_unlock_bh(&sched->kss_lock);
2455 read_unlock(&ksocknal_data.ksnd_global_lock);
2457 /* cookie = 1 is reserved for keepalive PING */
2458 tx = ksocknal_alloc_tx_noop(1, 1);
2460 read_lock(&ksocknal_data.ksnd_global_lock);
2464 if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id) == 0) {
2465 read_lock(&ksocknal_data.ksnd_global_lock);
2469 ksocknal_free_tx(tx);
2470 read_lock(&ksocknal_data.ksnd_global_lock);
2477 ksocknal_check_peer_timeouts(int idx)
2479 struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
2480 struct ksock_peer_ni *peer_ni;
2481 struct ksock_conn *conn;
2482 struct ksock_tx *tx;
2485 /* NB. We expect to have a look at all the peers and not find any
2486 * connections to time out, so we just use a shared lock while we
2489 read_lock(&ksocknal_data.ksnd_global_lock);
2491 hlist_for_each_entry(peer_ni, peers, ksnp_list) {
2492 struct ksock_tx *tx_stale;
2493 time64_t deadline = 0;
2497 if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
2498 read_unlock(&ksocknal_data.ksnd_global_lock);
2502 conn = ksocknal_find_timed_out_conn(peer_ni);
2505 read_unlock(&ksocknal_data.ksnd_global_lock);
2507 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
2509 /* NB we won't find this one again, but we can't
2510 * just proceed with the next peer_ni, since we dropped
2511 * ksnd_global_lock and it might be dead already!
2513 ksocknal_conn_decref(conn);
2517 /* we can't process stale txs right here because we're
2518 * holding only shared lock
2520 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
2521 struct ksock_tx *tx;
2523 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2524 struct ksock_tx, tx_list);
2525 if (ktime_get_seconds() >= tx->tx_deadline) {
2526 ksocknal_peer_addref(peer_ni);
2527 read_unlock(&ksocknal_data.ksnd_global_lock);
2529 ksocknal_flush_stale_txs(peer_ni);
2531 ksocknal_peer_decref(peer_ni);
2536 if (list_empty(&peer_ni->ksnp_zc_req_list))
2540 spin_lock(&peer_ni->ksnp_lock);
2541 list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
2542 if (ktime_get_seconds() < tx->tx_deadline)
2544 /* ignore the TX if connection is being closed */
2545 if (tx->tx_conn->ksnc_closing)
2548 if (tx_stale == NULL)
2552 if (tx_stale == NULL) {
2553 spin_unlock(&peer_ni->ksnp_lock);
2557 deadline = tx_stale->tx_deadline;
2558 resid = tx_stale->tx_resid;
2559 conn = tx_stale->tx_conn;
2560 ksocknal_conn_addref(conn);
2562 spin_unlock(&peer_ni->ksnp_lock);
2563 read_unlock(&ksocknal_data.ksnd_global_lock);
2565 CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the "
2566 "oldest(%p) timed out %lld secs ago, "
2567 "resid: %d, wmem: %d\n",
2568 n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale,
2569 ktime_get_seconds() - deadline,
2570 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2572 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2573 ksocknal_conn_decref(conn);
2577 read_unlock(&ksocknal_data.ksnd_global_lock);
2580 int ksocknal_reaper(void *arg)
2582 wait_queue_entry_t wait;
2583 struct ksock_conn *conn;
2584 struct ksock_sched *sched;
2585 LIST_HEAD(enomem_conns);
2590 time64_t deadline = ktime_get_seconds();
2592 init_waitqueue_entry(&wait, current);
2594 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2596 while (!ksocknal_data.ksnd_shuttingdown) {
2597 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2598 conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
2599 struct ksock_conn, ksnc_list);
2600 list_del(&conn->ksnc_list);
2602 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2604 ksocknal_terminate_conn(conn);
2605 ksocknal_conn_decref(conn);
2607 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2611 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2612 conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
2613 struct ksock_conn, ksnc_list);
2614 list_del(&conn->ksnc_list);
2616 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2618 ksocknal_destroy_conn(conn);
2620 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2624 list_splice_init(&ksocknal_data.ksnd_enomem_conns,
2627 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2629 /* reschedule all the connections that stalled with ENOMEM... */
2631 while (!list_empty(&enomem_conns)) {
2632 conn = list_entry(enomem_conns.next,
2633 struct ksock_conn, ksnc_tx_list);
2634 list_del(&conn->ksnc_tx_list);
2636 sched = conn->ksnc_scheduler;
2638 spin_lock_bh(&sched->kss_lock);
2640 LASSERT(conn->ksnc_tx_scheduled);
2641 conn->ksnc_tx_ready = 1;
2642 list_add_tail(&conn->ksnc_tx_list,
2643 &sched->kss_tx_conns);
2644 wake_up(&sched->kss_waitq);
2646 spin_unlock_bh(&sched->kss_lock);
2650 /* careful with the jiffy wrap... */
2651 while ((timeout = deadline - ktime_get_seconds()) <= 0) {
2654 int chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
2655 unsigned int lnd_timeout;
2657 /* Time to check for timeouts on a few more peers: I
2658 * do checks every 'p' seconds on a proportion of the
2659 * peer_ni table and I need to check every connection
2660 * 'n' times within a timeout interval, to ensure I
2661 * detect a timeout on any connection within (n+1)/n
2662 * times the timeout interval.
2665 lnd_timeout = ksocknal_timeout();
2666 if (lnd_timeout > n * p)
2667 chunk = (chunk * n * p) / lnd_timeout;
2671 for (i = 0; i < chunk; i++) {
2672 ksocknal_check_peer_timeouts(peer_index);
2673 peer_index = (peer_index + 1) %
2674 HASH_SIZE(ksocknal_data.ksnd_peers);
2680 if (nenomem_conns != 0) {
2681 /* Reduce my timeout if I rescheduled ENOMEM conns.
2682 * This also prevents me getting woken immediately
2683 * if any go back on my enomem list. */
2684 timeout = SOCKNAL_ENOMEM_RETRY;
2686 ksocknal_data.ksnd_reaper_waketime = ktime_get_seconds() +
2689 set_current_state(TASK_INTERRUPTIBLE);
2690 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2692 if (!ksocknal_data.ksnd_shuttingdown &&
2693 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2694 list_empty(&ksocknal_data.ksnd_zombie_conns))
2695 schedule_timeout(cfs_time_seconds(timeout));
2697 set_current_state(TASK_RUNNING);
2698 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2700 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2703 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2705 ksocknal_thread_fini();