2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Lustre, https://wiki.hpdd.intel.com/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 ksocknal_alloc_tx(int type, int size)
32 struct ksock_tx *tx = NULL;
34 if (type == KSOCK_MSG_NOOP) {
35 LASSERT(size == KSOCK_NOOP_TX_SIZE);
37 /* searching for a noop tx in free list */
38 spin_lock(&ksocknal_data.ksnd_tx_lock);
40 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
41 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
42 struct ksock_tx, tx_list);
43 LASSERT(tx->tx_desc_size == size);
44 list_del(&tx->tx_list);
47 spin_unlock(&ksocknal_data.ksnd_tx_lock);
51 LIBCFS_ALLOC(tx, size);
56 atomic_set(&tx->tx_refcount, 1);
57 tx->tx_zc_aborted = 0;
58 tx->tx_zc_capable = 0;
59 tx->tx_zc_checked = 0;
60 tx->tx_hstatus = LNET_MSG_STATUS_OK;
61 tx->tx_desc_size = size;
63 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
69 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
73 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
75 CERROR("Can't allocate noop tx desc\n");
80 tx->tx_lnetmsg = NULL;
83 tx->tx_iov = tx->tx_frags.virt.iov;
85 tx->tx_nonblk = nonblk;
87 tx->tx_msg.ksm_csum = 0;
88 tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
89 tx->tx_msg.ksm_zc_cookies[0] = 0;
90 tx->tx_msg.ksm_zc_cookies[1] = cookie;
97 ksocknal_free_tx(struct ksock_tx *tx)
99 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
101 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
103 spin_lock(&ksocknal_data.ksnd_tx_lock);
105 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
107 spin_unlock(&ksocknal_data.ksnd_tx_lock);
109 LIBCFS_FREE(tx, tx->tx_desc_size);
114 ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
116 struct kvec *iov = tx->tx_iov;
120 LASSERT (tx->tx_niov > 0);
122 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
123 rc = ksocknal_lib_send_iov(conn, tx);
125 if (rc <= 0) /* sent nothing? */
129 LASSERT (nob <= tx->tx_resid);
134 LASSERT (tx->tx_niov > 0);
136 if (nob < (int) iov->iov_len) {
137 iov->iov_base += nob;
151 ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
153 lnet_kiov_t *kiov = tx->tx_kiov;
157 LASSERT (tx->tx_niov == 0);
158 LASSERT (tx->tx_nkiov > 0);
160 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
161 rc = ksocknal_lib_send_kiov(conn, tx);
163 if (rc <= 0) /* sent nothing? */
167 LASSERT (nob <= tx->tx_resid);
172 LASSERT(tx->tx_nkiov > 0);
174 if (nob < (int)kiov->kiov_len) {
175 kiov->kiov_offset += nob;
176 kiov->kiov_len -= nob;
180 nob -= (int)kiov->kiov_len;
181 tx->tx_kiov = ++kiov;
189 ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
194 if (ksocknal_data.ksnd_stall_tx != 0) {
195 set_current_state(TASK_UNINTERRUPTIBLE);
196 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
199 LASSERT(tx->tx_resid != 0);
201 rc = ksocknal_connsock_addref(conn);
203 LASSERT (conn->ksnc_closing);
208 if (ksocknal_data.ksnd_enomem_tx > 0) {
210 ksocknal_data.ksnd_enomem_tx--;
212 } else if (tx->tx_niov != 0) {
213 rc = ksocknal_send_iov (conn, tx);
215 rc = ksocknal_send_kiov (conn, tx);
218 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
219 if (rc > 0) /* sent something? */
220 conn->ksnc_tx_bufnob += rc; /* account it */
222 if (bufnob < conn->ksnc_tx_bufnob) {
223 /* allocated send buffer bytes < computed; infer
224 * something got ACKed */
225 conn->ksnc_tx_deadline = ktime_get_seconds() +
226 lnet_get_lnd_timeout();
227 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
228 conn->ksnc_tx_bufnob = bufnob;
232 if (rc <= 0) { /* Didn't write anything? */
234 if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
237 /* Check if EAGAIN is due to memory pressure */
238 if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
244 /* socket's wmem_queued now includes 'rc' bytes */
245 atomic_sub (rc, &conn->ksnc_tx_nob);
248 } while (tx->tx_resid != 0);
250 ksocknal_connsock_decref(conn);
255 ksocknal_recv_iov(struct ksock_conn *conn)
257 struct kvec *iov = conn->ksnc_rx_iov;
261 LASSERT (conn->ksnc_rx_niov > 0);
263 /* Never touch conn->ksnc_rx_iov or change connection
264 * status inside ksocknal_lib_recv_iov */
265 rc = ksocknal_lib_recv_iov(conn);
270 /* received something... */
273 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
274 conn->ksnc_rx_deadline = ktime_get_seconds() +
275 lnet_get_lnd_timeout();
276 smp_mb(); /* order with setting rx_started */
277 conn->ksnc_rx_started = 1;
279 conn->ksnc_rx_nob_wanted -= nob;
280 conn->ksnc_rx_nob_left -= nob;
283 LASSERT (conn->ksnc_rx_niov > 0);
285 if (nob < (int)iov->iov_len) {
287 iov->iov_base += nob;
292 conn->ksnc_rx_iov = ++iov;
293 conn->ksnc_rx_niov--;
300 ksocknal_recv_kiov(struct ksock_conn *conn)
302 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
305 LASSERT (conn->ksnc_rx_nkiov > 0);
307 /* Never touch conn->ksnc_rx_kiov or change connection
308 * status inside ksocknal_lib_recv_iov */
309 rc = ksocknal_lib_recv_kiov(conn);
314 /* received something... */
317 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
318 conn->ksnc_rx_deadline = ktime_get_seconds() +
319 lnet_get_lnd_timeout();
320 smp_mb(); /* order with setting rx_started */
321 conn->ksnc_rx_started = 1;
323 conn->ksnc_rx_nob_wanted -= nob;
324 conn->ksnc_rx_nob_left -= nob;
327 LASSERT (conn->ksnc_rx_nkiov > 0);
329 if (nob < (int) kiov->kiov_len) {
330 kiov->kiov_offset += nob;
331 kiov->kiov_len -= nob;
335 nob -= kiov->kiov_len;
336 conn->ksnc_rx_kiov = ++kiov;
337 conn->ksnc_rx_nkiov--;
344 ksocknal_receive(struct ksock_conn *conn)
346 /* Return 1 on success, 0 on EOF, < 0 on error.
347 * Caller checks ksnc_rx_nob_wanted to determine
348 * progress/completion. */
352 if (ksocknal_data.ksnd_stall_rx != 0) {
353 set_current_state(TASK_UNINTERRUPTIBLE);
354 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
357 rc = ksocknal_connsock_addref(conn);
359 LASSERT (conn->ksnc_closing);
364 if (conn->ksnc_rx_niov != 0)
365 rc = ksocknal_recv_iov (conn);
367 rc = ksocknal_recv_kiov (conn);
370 /* error/EOF or partial receive */
373 } else if (rc == 0 && conn->ksnc_rx_started) {
374 /* EOF in the middle of a message */
380 /* Completed a fragment */
382 if (conn->ksnc_rx_nob_wanted == 0) {
388 ksocknal_connsock_decref(conn);
393 ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
395 struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
396 enum lnet_msg_hstatus hstatus = tx->tx_hstatus;
399 LASSERT(ni != NULL || tx->tx_conn != NULL);
401 if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) {
403 hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
406 if (tx->tx_conn != NULL)
407 ksocknal_conn_decref(tx->tx_conn);
409 ksocknal_free_tx(tx);
410 if (lnetmsg != NULL) { /* KSOCK_MSG_NOOP go without lnetmsg */
412 CERROR("tx failure rc = %d, hstatus = %d\n", rc,
414 lnetmsg->msg_health_status = hstatus;
415 lnet_finalize(lnetmsg, rc);
422 ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
426 while (!list_empty(txlist)) {
427 tx = list_entry(txlist->next, struct ksock_tx, tx_list);
429 if (error && tx->tx_lnetmsg != NULL) {
430 CNETERR("Deleting packet type %d len %d %s->%s\n",
431 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
432 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
433 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
434 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
436 CNETERR("Deleting noop packet\n");
439 list_del(&tx->tx_list);
441 if (tx->tx_hstatus == LNET_MSG_STATUS_OK) {
442 if (error == -ETIMEDOUT)
444 LNET_MSG_STATUS_LOCAL_TIMEOUT;
445 else if (error == -ENETDOWN ||
446 error == -EHOSTUNREACH ||
447 error == -ENETUNREACH)
448 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_DROPPED;
450 * for all other errors we don't want to
454 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
457 LASSERT(atomic_read(&tx->tx_refcount) == 1);
458 ksocknal_tx_done(ni, tx, error);
463 ksocknal_check_zc_req(struct ksock_tx *tx)
465 struct ksock_conn *conn = tx->tx_conn;
466 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
468 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
469 * to ksnp_zc_req_list if some fragment of this message should be sent
470 * zero-copy. Our peer_ni will send an ACK containing this cookie when
471 * she has received this message to tell us we can signal completion.
472 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
473 * ksnp_zc_req_list. */
474 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
475 LASSERT (tx->tx_zc_capable);
477 tx->tx_zc_checked = 1;
479 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
480 !conn->ksnc_zc_capable)
483 /* assign cookie and queue tx to pending list, it will be released when
484 * a matching ack is received. See ksocknal_handle_zcack() */
486 ksocknal_tx_addref(tx);
488 spin_lock(&peer_ni->ksnp_lock);
490 /* ZC_REQ is going to be pinned to the peer_ni */
491 tx->tx_deadline = ktime_get_seconds() +
492 lnet_get_lnd_timeout();
494 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
496 tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++;
498 if (peer_ni->ksnp_zc_next_cookie == 0)
499 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
501 list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list);
503 spin_unlock(&peer_ni->ksnp_lock);
507 ksocknal_uncheck_zc_req(struct ksock_tx *tx)
509 struct ksock_peer_ni *peer_ni = tx->tx_conn->ksnc_peer;
511 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
512 LASSERT(tx->tx_zc_capable);
514 tx->tx_zc_checked = 0;
516 spin_lock(&peer_ni->ksnp_lock);
518 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
519 /* Not waiting for an ACK */
520 spin_unlock(&peer_ni->ksnp_lock);
524 tx->tx_msg.ksm_zc_cookies[0] = 0;
525 list_del(&tx->tx_zc_list);
527 spin_unlock(&peer_ni->ksnp_lock);
529 ksocknal_tx_decref(tx);
533 ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
537 if (tx->tx_zc_capable && !tx->tx_zc_checked)
538 ksocknal_check_zc_req(tx);
540 rc = ksocknal_transmit (conn, tx);
542 CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
544 if (tx->tx_resid == 0) {
545 /* Sent everything OK */
557 counter++; /* exponential backoff warnings */
558 if ((counter & (-counter)) == counter)
559 CWARN("%u ENOMEM tx %p (%u allocated)\n",
560 counter, conn, atomic_read(&libcfs_kmemory));
562 /* Queue on ksnd_enomem_conns for retry after a timeout */
563 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
565 /* enomem list takes over scheduler's ref... */
566 LASSERT (conn->ksnc_tx_scheduled);
567 list_add_tail(&conn->ksnc_tx_list,
568 &ksocknal_data.ksnd_enomem_conns);
569 if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY <
570 ksocknal_data.ksnd_reaper_waketime)
571 wake_up(&ksocknal_data.ksnd_reaper_waitq);
573 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
576 * set the health status of the message which determines
577 * whether we should retry the transmit
579 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
587 * set the health status of the message which determines
588 * whether we should retry the transmit
590 if (rc == -ETIMEDOUT)
591 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
593 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
595 if (!conn->ksnc_closing) {
598 LCONSOLE_WARN("Host %pI4h reset our connection "
599 "while we were sending data; it may have "
604 LCONSOLE_WARN("There was an unexpected network error "
605 "while writing to %pI4h: %d.\n",
606 &conn->ksnc_ipaddr, rc);
609 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
610 conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
611 &conn->ksnc_ipaddr, conn->ksnc_port);
614 if (tx->tx_zc_checked)
615 ksocknal_uncheck_zc_req(tx);
617 /* it's not an error if conn is being closed */
618 ksocknal_close_conn_and_siblings(conn,
619 (conn->ksnc_closing) ? 0 : rc);
625 ksocknal_launch_connection_locked(struct ksock_route *route)
628 /* called holding write lock on ksnd_global_lock */
630 LASSERT (!route->ksnr_scheduled);
631 LASSERT (!route->ksnr_connecting);
632 LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
634 route->ksnr_scheduled = 1; /* scheduling conn for connd */
635 ksocknal_route_addref(route); /* extra ref for connd */
637 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
639 list_add_tail(&route->ksnr_connd_list,
640 &ksocknal_data.ksnd_connd_routes);
641 wake_up(&ksocknal_data.ksnd_connd_waitq);
643 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
647 ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
649 struct ksock_route *route;
651 /* called holding write lock on ksnd_global_lock */
653 /* launch any/all connections that need it */
654 route = ksocknal_find_connectable_route_locked(peer_ni);
658 ksocknal_launch_connection_locked(route);
663 ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
665 struct list_head *tmp;
666 struct ksock_conn *conn;
667 struct ksock_conn *typed = NULL;
668 struct ksock_conn *fallback = NULL;
672 list_for_each(tmp, &peer_ni->ksnp_conns) {
673 struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
675 int nob = atomic_read(&c->ksnc_tx_nob) +
676 c->ksnc_sock->sk->sk_wmem_queued;
679 LASSERT (!c->ksnc_closing);
680 LASSERT (c->ksnc_proto != NULL &&
681 c->ksnc_proto->pro_match_tx != NULL);
683 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
688 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
691 case SOCKNAL_MATCH_YES: /* typed connection */
692 if (typed == NULL || tnob > nob ||
693 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
694 typed->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
700 case SOCKNAL_MATCH_MAY: /* fallback connection */
701 if (fallback == NULL || fnob > nob ||
702 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
703 fallback->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
711 /* prefer the typed selection */
712 conn = (typed != NULL) ? typed : fallback;
715 conn->ksnc_tx_last_post = ktime_get_seconds();
721 ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
723 conn->ksnc_proto->pro_pack(tx);
725 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
726 ksocknal_conn_addref(conn); /* +1 ref for tx */
731 ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
733 struct ksock_sched *sched = conn->ksnc_scheduler;
734 struct ksock_msg *msg = &tx->tx_msg;
735 struct ksock_tx *ztx = NULL;
738 /* called holding global lock (read or irq-write) and caller may
739 * not have dropped this lock between finding conn and calling me,
740 * so we don't need the {get,put}connsock dance to deref
742 LASSERT(!conn->ksnc_closing);
744 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
745 libcfs_id2str(conn->ksnc_peer->ksnp_id),
746 &conn->ksnc_ipaddr, conn->ksnc_port);
748 ksocknal_tx_prep(conn, tx);
750 /* Ensure the frags we've been given EXACTLY match the number of
751 * bytes we want to send. Many TCP/IP stacks disregard any total
752 * size parameters passed to them and just look at the frags.
754 * We always expect at least 1 mapped fragment containing the
755 * complete ksocknal message header. */
756 LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
757 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
758 (unsigned int)tx->tx_nob);
759 LASSERT (tx->tx_niov >= 1);
760 LASSERT (tx->tx_resid == tx->tx_nob);
762 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
763 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
765 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
767 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
768 spin_lock_bh(&sched->kss_lock);
770 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
771 /* First packet starts the timeout */
772 conn->ksnc_tx_deadline = ktime_get_seconds() +
773 lnet_get_lnd_timeout();
774 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
775 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
776 conn->ksnc_tx_bufnob = 0;
777 smp_mb(); /* order with adding to tx_queue */
780 if (msg->ksm_type == KSOCK_MSG_NOOP) {
781 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
782 * on a normal packet so I don't need to send it */
783 LASSERT (msg->ksm_zc_cookies[1] != 0);
784 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
786 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
787 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
790 /* It's a normal packet - can it piggback a noop zc-ack that
791 * has been queued already? */
792 LASSERT (msg->ksm_zc_cookies[1] == 0);
793 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
795 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
796 /* ztx will be released later */
800 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
801 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
804 if (conn->ksnc_tx_ready && /* able to send */
805 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
806 /* +1 ref for scheduler */
807 ksocknal_conn_addref(conn);
808 list_add_tail(&conn->ksnc_tx_list,
809 &sched->kss_tx_conns);
810 conn->ksnc_tx_scheduled = 1;
811 wake_up(&sched->kss_waitq);
814 spin_unlock_bh(&sched->kss_lock);
819 ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni)
821 time64_t now = ktime_get_seconds();
822 struct list_head *tmp;
823 struct ksock_route *route;
825 list_for_each(tmp, &peer_ni->ksnp_routes) {
826 route = list_entry(tmp, struct ksock_route, ksnr_list);
828 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
830 if (route->ksnr_scheduled) /* connections being established */
833 /* all route types connected ? */
834 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
837 if (!(route->ksnr_retry_interval == 0 || /* first attempt */
838 now >= route->ksnr_timeout)) {
840 "Too soon to retry route %pI4h "
841 "(cnted %d, interval %lld, %lld secs later)\n",
843 route->ksnr_connected,
844 route->ksnr_retry_interval,
845 route->ksnr_timeout - now);
856 ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni)
858 struct list_head *tmp;
859 struct ksock_route *route;
861 list_for_each(tmp, &peer_ni->ksnp_routes) {
862 route = list_entry(tmp, struct ksock_route, ksnr_list);
864 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
866 if (route->ksnr_scheduled)
874 ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
875 struct lnet_process_id id)
877 struct ksock_peer_ni *peer_ni;
878 struct ksock_conn *conn;
883 LASSERT (tx->tx_conn == NULL);
885 g_lock = &ksocknal_data.ksnd_global_lock;
887 for (retry = 0;; retry = 1) {
889 peer_ni = ksocknal_find_peer_locked(ni, id);
890 if (peer_ni != NULL) {
891 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) {
892 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
894 /* I've got no routes that need to be
895 * connecting and I do have an actual
897 ksocknal_queue_tx_locked (tx, conn);
904 /* I'll need a write lock... */
907 write_lock_bh(g_lock);
909 peer_ni = ksocknal_find_peer_locked(ni, id);
913 write_unlock_bh(g_lock);
915 if ((id.pid & LNET_PID_USERFLAG) != 0) {
916 CERROR("Refusing to create a connection to "
917 "userspace process %s\n", libcfs_id2str(id));
918 return -EHOSTUNREACH;
922 CERROR("Can't find peer_ni %s\n", libcfs_id2str(id));
923 return -EHOSTUNREACH;
926 rc = ksocknal_add_peer(ni, id,
927 LNET_NIDADDR(id.nid),
928 lnet_acceptor_port());
930 CERROR("Can't add peer_ni %s: %d\n",
931 libcfs_id2str(id), rc);
936 ksocknal_launch_all_connections_locked(peer_ni);
938 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
940 /* Connection exists; queue message on it */
941 ksocknal_queue_tx_locked (tx, conn);
942 write_unlock_bh(g_lock);
946 if (peer_ni->ksnp_accepting > 0 ||
947 ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
948 /* the message is going to be pinned to the peer_ni */
949 tx->tx_deadline = ktime_get_seconds() +
950 lnet_get_lnd_timeout();
952 /* Queue the message until a connection is established */
953 list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
954 write_unlock_bh(g_lock);
958 write_unlock_bh(g_lock);
960 /* NB Routes may be ignored if connections to them failed recently */
961 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
962 return (-EHOSTUNREACH);
966 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
969 int type = lntmsg->msg_type;
970 struct lnet_process_id target = lntmsg->msg_target;
971 unsigned int payload_niov = lntmsg->msg_niov;
972 struct kvec *payload_iov = lntmsg->msg_iov;
973 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
974 unsigned int payload_offset = lntmsg->msg_offset;
975 unsigned int payload_nob = lntmsg->msg_len;
980 /* NB 'private' is different depending on what we're sending.
981 * Just ignore it... */
983 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
984 payload_nob, payload_niov, libcfs_id2str(target));
986 LASSERT (payload_nob == 0 || payload_niov > 0);
987 LASSERT (payload_niov <= LNET_MAX_IOV);
988 /* payload is either all vaddrs or all pages */
989 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
990 LASSERT (!in_interrupt ());
992 if (payload_iov != NULL)
993 desc_size = offsetof(struct ksock_tx,
994 tx_frags.virt.iov[1 + payload_niov]);
996 desc_size = offsetof(struct ksock_tx,
997 tx_frags.paged.kiov[payload_niov]);
999 if (lntmsg->msg_vmflush)
1000 mpflag = cfs_memory_pressure_get_and_set();
1001 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
1003 CERROR("Can't allocate tx desc type %d size %d\n",
1005 if (lntmsg->msg_vmflush)
1006 cfs_memory_pressure_restore(mpflag);
1010 tx->tx_conn = NULL; /* set when assigned a conn */
1011 tx->tx_lnetmsg = lntmsg;
1013 if (payload_iov != NULL) {
1016 tx->tx_iov = tx->tx_frags.virt.iov;
1018 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
1019 payload_niov, payload_iov,
1020 payload_offset, payload_nob);
1023 tx->tx_iov = &tx->tx_frags.paged.iov;
1024 tx->tx_kiov = tx->tx_frags.paged.kiov;
1025 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
1026 payload_niov, payload_kiov,
1027 payload_offset, payload_nob);
1029 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
1030 tx->tx_zc_capable = 1;
1033 tx->tx_msg.ksm_csum = 0;
1034 tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
1035 tx->tx_msg.ksm_zc_cookies[0] = 0;
1036 tx->tx_msg.ksm_zc_cookies[1] = 0;
1038 /* The first fragment will be set later in pro_pack */
1039 rc = ksocknal_launch_packet(ni, tx, target);
1041 cfs_memory_pressure_restore(mpflag);
1046 ksocknal_free_tx(tx);
1051 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1053 struct task_struct *task = kthread_run(fn, arg, name);
1056 return PTR_ERR(task);
1058 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1059 ksocknal_data.ksnd_nthreads++;
1060 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1065 ksocknal_thread_fini (void)
1067 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1068 ksocknal_data.ksnd_nthreads--;
1069 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1073 ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
1075 static char ksocknal_slop_buffer[4096];
1080 LASSERT(conn->ksnc_proto != NULL);
1082 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1083 /* Remind the socket to ack eagerly... */
1084 ksocknal_lib_eager_ack(conn);
1087 if (nob_to_skip == 0) { /* right at next packet boundary now */
1088 conn->ksnc_rx_started = 0;
1089 smp_mb(); /* racing with timeout thread */
1091 switch (conn->ksnc_proto->pro_version) {
1092 case KSOCK_PROTO_V2:
1093 case KSOCK_PROTO_V3:
1094 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1095 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1096 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
1098 conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
1099 conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
1100 conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
1103 case KSOCK_PROTO_V1:
1104 /* Receiving bare struct lnet_hdr */
1105 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1106 conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr);
1107 conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
1109 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1110 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1111 conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr);
1117 conn->ksnc_rx_niov = 1;
1119 conn->ksnc_rx_kiov = NULL;
1120 conn->ksnc_rx_nkiov = 0;
1121 conn->ksnc_rx_csum = ~0;
1125 /* Set up to skip as much as possible now. If there's more left
1126 * (ran out of iov entries) we'll get called again */
1128 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1129 conn->ksnc_rx_nob_left = nob_to_skip;
1130 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1135 nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
1137 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1138 conn->ksnc_rx_iov[niov].iov_len = nob;
1143 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1144 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
1146 conn->ksnc_rx_niov = niov;
1147 conn->ksnc_rx_kiov = NULL;
1148 conn->ksnc_rx_nkiov = 0;
1149 conn->ksnc_rx_nob_wanted = skipped;
1154 ksocknal_process_receive(struct ksock_conn *conn)
1156 struct lnet_hdr *lhdr;
1157 struct lnet_process_id *id;
1160 LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
1162 /* NB: sched lock NOT held */
1163 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1164 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1165 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1166 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1167 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1169 if (conn->ksnc_rx_nob_wanted != 0) {
1170 rc = ksocknal_receive(conn);
1173 struct lnet_process_id ksnp_id;
1175 ksnp_id = conn->ksnc_peer->ksnp_id;
1177 LASSERT(rc != -EAGAIN);
1179 CDEBUG(D_NET, "[%p] EOF from %s "
1180 "ip %pI4h:%d\n", conn,
1181 libcfs_id2str(ksnp_id),
1184 else if (!conn->ksnc_closing)
1185 CERROR("[%p] Error %d on read from %s "
1186 "ip %pI4h:%d\n", conn, rc,
1187 libcfs_id2str(ksnp_id),
1191 /* it's not an error if conn is being closed */
1192 ksocknal_close_conn_and_siblings (conn,
1193 (conn->ksnc_closing) ? 0 : rc);
1194 return (rc == 0 ? -ESHUTDOWN : rc);
1197 if (conn->ksnc_rx_nob_wanted != 0) {
1202 switch (conn->ksnc_rx_state) {
1203 case SOCKNAL_RX_KSM_HEADER:
1204 if (conn->ksnc_flip) {
1205 __swab32s(&conn->ksnc_msg.ksm_type);
1206 __swab32s(&conn->ksnc_msg.ksm_csum);
1207 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1208 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1211 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1212 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1213 CERROR("%s: Unknown message type: %x\n",
1214 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1215 conn->ksnc_msg.ksm_type);
1216 ksocknal_new_packet(conn, 0);
1217 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1221 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1222 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1223 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1224 /* NOOP Checksum error */
1225 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1226 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1227 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1228 ksocknal_new_packet(conn, 0);
1229 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1233 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1236 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1238 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1239 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1241 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1242 conn->ksnc_msg.ksm_zc_cookies[1]);
1245 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1246 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1247 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1248 ksocknal_new_packet(conn, 0);
1249 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1254 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1255 ksocknal_new_packet (conn, 0);
1256 return 0; /* NOOP is done and just return */
1259 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1260 conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
1261 conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
1263 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1264 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1265 conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
1267 conn->ksnc_rx_niov = 1;
1268 conn->ksnc_rx_kiov = NULL;
1269 conn->ksnc_rx_nkiov = 0;
1271 goto again; /* read lnet header now */
1273 case SOCKNAL_RX_LNET_HEADER:
1274 /* unpack message header */
1275 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1277 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1278 /* Userspace peer_ni */
1279 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1280 id = &conn->ksnc_peer->ksnp_id;
1282 /* Substitute process ID assigned at connection time */
1283 lhdr->src_pid = cpu_to_le32(id->pid);
1284 lhdr->src_nid = cpu_to_le64(id->nid);
1287 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1288 ksocknal_conn_addref(conn); /* ++ref while parsing */
1290 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1291 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1292 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1294 /* I just received garbage: give up on this conn */
1295 ksocknal_new_packet(conn, 0);
1296 ksocknal_close_conn_and_siblings (conn, rc);
1297 ksocknal_conn_decref(conn);
1301 /* I'm racing with ksocknal_recv() */
1302 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1303 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1305 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1308 /* ksocknal_recv() got called */
1311 case SOCKNAL_RX_LNET_PAYLOAD:
1312 /* payload all received */
1315 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1316 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1317 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1318 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1319 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1320 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1324 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1325 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1327 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1328 id = &conn->ksnc_peer->ksnp_id;
1330 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1331 conn->ksnc_msg.ksm_zc_cookies[0],
1332 *ksocknal_tunables.ksnd_nonblk_zcack ||
1333 le64_to_cpu(lhdr->src_nid) != id->nid);
1336 lnet_finalize(conn->ksnc_cookie, rc);
1339 ksocknal_new_packet(conn, 0);
1340 ksocknal_close_conn_and_siblings (conn, rc);
1345 case SOCKNAL_RX_SLOP:
1346 /* starting new packet? */
1347 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1348 return 0; /* come back later */
1349 goto again; /* try to finish reading slop now */
1357 return (-EINVAL); /* keep gcc happy */
1361 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
1362 int delayed, unsigned int niov, struct kvec *iov,
1363 lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
1366 struct ksock_conn *conn = private;
1367 struct ksock_sched *sched = conn->ksnc_scheduler;
1369 LASSERT (mlen <= rlen);
1370 LASSERT (niov <= LNET_MAX_IOV);
1372 conn->ksnc_cookie = msg;
1373 conn->ksnc_rx_nob_wanted = mlen;
1374 conn->ksnc_rx_nob_left = rlen;
1376 if (mlen == 0 || iov != NULL) {
1377 conn->ksnc_rx_nkiov = 0;
1378 conn->ksnc_rx_kiov = NULL;
1379 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1380 conn->ksnc_rx_niov =
1381 lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
1382 niov, iov, offset, mlen);
1384 conn->ksnc_rx_niov = 0;
1385 conn->ksnc_rx_iov = NULL;
1386 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1387 conn->ksnc_rx_nkiov =
1388 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1389 niov, kiov, offset, mlen);
1393 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1394 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1396 LASSERT (conn->ksnc_rx_scheduled);
1398 spin_lock_bh(&sched->kss_lock);
1400 switch (conn->ksnc_rx_state) {
1401 case SOCKNAL_RX_PARSE_WAIT:
1402 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1403 wake_up(&sched->kss_waitq);
1404 LASSERT(conn->ksnc_rx_ready);
1407 case SOCKNAL_RX_PARSE:
1408 /* scheduler hasn't noticed I'm parsing yet */
1412 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1414 spin_unlock_bh(&sched->kss_lock);
1415 ksocknal_conn_decref(conn);
1420 ksocknal_sched_cansleep(struct ksock_sched *sched)
1424 spin_lock_bh(&sched->kss_lock);
1426 rc = (!ksocknal_data.ksnd_shuttingdown &&
1427 list_empty(&sched->kss_rx_conns) &&
1428 list_empty(&sched->kss_tx_conns));
1430 spin_unlock_bh(&sched->kss_lock);
1434 int ksocknal_scheduler(void *arg)
1436 struct ksock_sched_info *info;
1437 struct ksock_sched *sched;
1438 struct ksock_conn *conn;
1439 struct ksock_tx *tx;
1442 long id = (long)arg;
1444 info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
1445 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
1447 cfs_block_allsigs();
1449 rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
1451 CWARN("Can't set CPU partition affinity to %d: %d\n",
1455 spin_lock_bh(&sched->kss_lock);
1457 while (!ksocknal_data.ksnd_shuttingdown) {
1458 int did_something = 0;
1460 /* Ensure I progress everything semi-fairly */
1462 if (!list_empty(&sched->kss_rx_conns)) {
1463 conn = list_entry(sched->kss_rx_conns.next,
1464 struct ksock_conn, ksnc_rx_list);
1465 list_del(&conn->ksnc_rx_list);
1467 LASSERT(conn->ksnc_rx_scheduled);
1468 LASSERT(conn->ksnc_rx_ready);
1470 /* clear rx_ready in case receive isn't complete.
1471 * Do it BEFORE we call process_recv, since
1472 * data_ready can set it any time after we release
1474 conn->ksnc_rx_ready = 0;
1475 spin_unlock_bh(&sched->kss_lock);
1477 rc = ksocknal_process_receive(conn);
1479 spin_lock_bh(&sched->kss_lock);
1481 /* I'm the only one that can clear this flag */
1482 LASSERT(conn->ksnc_rx_scheduled);
1484 /* Did process_receive get everything it wanted? */
1486 conn->ksnc_rx_ready = 1;
1488 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1489 /* Conn blocked waiting for ksocknal_recv()
1490 * I change its state (under lock) to signal
1491 * it can be rescheduled */
1492 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1493 } else if (conn->ksnc_rx_ready) {
1494 /* reschedule for rx */
1495 list_add_tail(&conn->ksnc_rx_list,
1496 &sched->kss_rx_conns);
1498 conn->ksnc_rx_scheduled = 0;
1500 ksocknal_conn_decref(conn);
1506 if (!list_empty(&sched->kss_tx_conns)) {
1507 struct list_head zlist = LIST_HEAD_INIT(zlist);
1509 if (!list_empty(&sched->kss_zombie_noop_txs)) {
1511 &sched->kss_zombie_noop_txs);
1512 list_del_init(&sched->kss_zombie_noop_txs);
1515 conn = list_entry(sched->kss_tx_conns.next,
1516 struct ksock_conn, ksnc_tx_list);
1517 list_del(&conn->ksnc_tx_list);
1519 LASSERT(conn->ksnc_tx_scheduled);
1520 LASSERT(conn->ksnc_tx_ready);
1521 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1523 tx = list_entry(conn->ksnc_tx_queue.next,
1524 struct ksock_tx, tx_list);
1526 if (conn->ksnc_tx_carrier == tx)
1527 ksocknal_next_tx_carrier(conn);
1529 /* dequeue now so empty list => more to send */
1530 list_del(&tx->tx_list);
1532 /* Clear tx_ready in case send isn't complete. Do
1533 * it BEFORE we call process_transmit, since
1534 * write_space can set it any time after we release
1536 conn->ksnc_tx_ready = 0;
1537 spin_unlock_bh(&sched->kss_lock);
1539 if (!list_empty(&zlist)) {
1540 /* free zombie noop txs, it's fast because
1541 * noop txs are just put in freelist */
1542 ksocknal_txlist_done(NULL, &zlist, 0);
1545 rc = ksocknal_process_transmit(conn, tx);
1547 if (rc == -ENOMEM || rc == -EAGAIN) {
1548 /* Incomplete send: replace tx on HEAD of tx_queue */
1549 spin_lock_bh(&sched->kss_lock);
1550 list_add(&tx->tx_list,
1551 &conn->ksnc_tx_queue);
1553 /* Complete send; tx -ref */
1554 ksocknal_tx_decref(tx);
1556 spin_lock_bh(&sched->kss_lock);
1557 /* assume space for more */
1558 conn->ksnc_tx_ready = 1;
1561 if (rc == -ENOMEM) {
1562 /* Do nothing; after a short timeout, this
1563 * conn will be reposted on kss_tx_conns. */
1564 } else if (conn->ksnc_tx_ready &&
1565 !list_empty(&conn->ksnc_tx_queue)) {
1566 /* reschedule for tx */
1567 list_add_tail(&conn->ksnc_tx_list,
1568 &sched->kss_tx_conns);
1570 conn->ksnc_tx_scheduled = 0;
1572 ksocknal_conn_decref(conn);
1577 if (!did_something || /* nothing to do */
1578 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1579 spin_unlock_bh(&sched->kss_lock);
1583 if (!did_something) { /* wait for something to do */
1584 rc = wait_event_interruptible_exclusive(
1586 !ksocknal_sched_cansleep(sched));
1592 spin_lock_bh(&sched->kss_lock);
1596 spin_unlock_bh(&sched->kss_lock);
1597 ksocknal_thread_fini();
1602 * Add connection to kss_rx_conns of scheduler
1603 * and wakeup the scheduler.
1605 void ksocknal_read_callback(struct ksock_conn *conn)
1607 struct ksock_sched *sched;
1610 sched = conn->ksnc_scheduler;
1612 spin_lock_bh(&sched->kss_lock);
1614 conn->ksnc_rx_ready = 1;
1616 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1617 list_add_tail(&conn->ksnc_rx_list,
1618 &sched->kss_rx_conns);
1619 conn->ksnc_rx_scheduled = 1;
1620 /* extra ref for scheduler */
1621 ksocknal_conn_addref(conn);
1623 wake_up (&sched->kss_waitq);
1625 spin_unlock_bh(&sched->kss_lock);
1631 * Add connection to kss_tx_conns of scheduler
1632 * and wakeup the scheduler.
1634 void ksocknal_write_callback(struct ksock_conn *conn)
1636 struct ksock_sched *sched;
1639 sched = conn->ksnc_scheduler;
1641 spin_lock_bh(&sched->kss_lock);
1643 conn->ksnc_tx_ready = 1;
1645 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1646 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1647 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1648 conn->ksnc_tx_scheduled = 1;
1649 /* extra ref for scheduler */
1650 ksocknal_conn_addref(conn);
1652 wake_up(&sched->kss_waitq);
1655 spin_unlock_bh(&sched->kss_lock);
1660 static struct ksock_proto *
1661 ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
1665 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1666 version = hello->kshm_version;
1667 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1668 version = __swab32(hello->kshm_version);
1671 #if SOCKNAL_VERSION_DEBUG
1672 if (*ksocknal_tunables.ksnd_protocol == 1)
1675 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1676 version == KSOCK_PROTO_V3)
1679 if (version == KSOCK_PROTO_V2)
1680 return &ksocknal_protocol_v2x;
1682 if (version == KSOCK_PROTO_V3)
1683 return &ksocknal_protocol_v3x;
1688 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1689 struct lnet_magicversion *hmv;
1691 CLASSERT(sizeof(struct lnet_magicversion) ==
1692 offsetof(struct ksock_hello_msg, kshm_src_nid));
1694 hmv = (struct lnet_magicversion *)hello;
1696 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1697 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1698 return &ksocknal_protocol_v1x;
1705 ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1706 lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
1708 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1709 struct ksock_net *net = (struct ksock_net *)ni->ni_data;
1711 LASSERT(hello->kshm_nips <= LNET_INTERFACES_NUM);
1713 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1714 LASSERT(conn->ksnc_proto != NULL);
1716 hello->kshm_src_nid = ni->ni_nid;
1717 hello->kshm_dst_nid = peer_nid;
1718 hello->kshm_src_pid = the_lnet.ln_pid;
1720 hello->kshm_src_incarnation = net->ksnn_incarnation;
1721 hello->kshm_ctype = conn->ksnc_type;
1723 return conn->ksnc_proto->pro_send_hello(conn, hello);
1727 ksocknal_invert_type(int type)
1731 case SOCKLND_CONN_ANY:
1732 case SOCKLND_CONN_CONTROL:
1734 case SOCKLND_CONN_BULK_IN:
1735 return SOCKLND_CONN_BULK_OUT;
1736 case SOCKLND_CONN_BULK_OUT:
1737 return SOCKLND_CONN_BULK_IN;
1739 return (SOCKLND_CONN_NONE);
1744 ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1745 struct ksock_hello_msg *hello,
1746 struct lnet_process_id *peerid,
1749 /* Return < 0 fatal error
1751 * EALREADY lost connection race
1752 * EPROTO protocol version mismatch
1754 struct socket *sock = conn->ksnc_sock;
1755 int active = (conn->ksnc_proto != NULL);
1759 struct ksock_proto *proto;
1760 struct lnet_process_id recv_id;
1762 /* socket type set on active connections - not set on passive */
1763 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1765 timeout = active ? lnet_get_lnd_timeout() :
1766 lnet_acceptor_timeout();
1768 rc = lnet_sock_read(sock, &hello->kshm_magic,
1769 sizeof(hello->kshm_magic), timeout);
1771 CERROR("Error %d reading HELLO from %pI4h\n",
1772 rc, &conn->ksnc_ipaddr);
1777 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1778 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1779 hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
1780 /* Unexpected magic! */
1781 CERROR ("Bad magic(1) %#08x (%#08x expected) from "
1782 "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
1783 LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr);
1787 rc = lnet_sock_read(sock, &hello->kshm_version,
1788 sizeof(hello->kshm_version), timeout);
1790 CERROR("Error %d reading HELLO from %pI4h\n",
1791 rc, &conn->ksnc_ipaddr);
1796 proto = ksocknal_parse_proto_version(hello);
1797 if (proto == NULL) {
1799 /* unknown protocol from peer_ni, tell peer_ni my protocol */
1800 conn->ksnc_proto = &ksocknal_protocol_v3x;
1801 #if SOCKNAL_VERSION_DEBUG
1802 if (*ksocknal_tunables.ksnd_protocol == 2)
1803 conn->ksnc_proto = &ksocknal_protocol_v2x;
1804 else if (*ksocknal_tunables.ksnd_protocol == 1)
1805 conn->ksnc_proto = &ksocknal_protocol_v1x;
1807 hello->kshm_nips = 0;
1808 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1811 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1812 conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr);
1817 proto_match = (conn->ksnc_proto == proto);
1818 conn->ksnc_proto = proto;
1820 /* receive the rest of hello message anyway */
1821 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1823 CERROR("Error %d reading or checking hello from from %pI4h\n",
1824 rc, &conn->ksnc_ipaddr);
1829 *incarnation = hello->kshm_src_incarnation;
1831 if (hello->kshm_src_nid == LNET_NID_ANY) {
1832 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
1833 "from %pI4h\n", &conn->ksnc_ipaddr);
1838 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1839 /* Userspace NAL assigns peer_ni process ID from socket */
1840 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1841 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1843 recv_id.nid = hello->kshm_src_nid;
1844 recv_id.pid = hello->kshm_src_pid;
1850 /* peer_ni determines type */
1851 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1852 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1853 CERROR("Unexpected type %d from %s ip %pI4h\n",
1854 hello->kshm_ctype, libcfs_id2str(*peerid),
1855 &conn->ksnc_ipaddr);
1861 if (peerid->pid != recv_id.pid ||
1862 peerid->nid != recv_id.nid) {
1863 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
1864 " %pI4h, but they claimed they were "
1865 "%s; please check your Lustre "
1867 libcfs_id2str(*peerid),
1869 libcfs_id2str(recv_id));
1873 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1874 /* Possible protocol mismatch or I lost the connection race */
1875 return proto_match ? EALREADY : EPROTO;
1878 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1879 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
1880 conn->ksnc_type, libcfs_id2str(*peerid),
1889 ksocknal_connect(struct ksock_route *route)
1891 struct list_head zombies = LIST_HEAD_INIT(zombies);
1892 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
1895 struct socket *sock;
1897 int retry_later = 0;
1900 deadline = ktime_get_seconds() + lnet_get_lnd_timeout();
1902 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1904 LASSERT (route->ksnr_scheduled);
1905 LASSERT (!route->ksnr_connecting);
1907 route->ksnr_connecting = 1;
1910 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1912 /* stop connecting if peer_ni/route got closed under me, or
1913 * route got connected while queued */
1914 if (peer_ni->ksnp_closing || route->ksnr_deleted ||
1920 /* reschedule if peer_ni is connecting to me */
1921 if (peer_ni->ksnp_accepting > 0) {
1923 "peer_ni %s(%d) already connecting to me, retry later.\n",
1924 libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
1928 if (retry_later) /* needs reschedule */
1931 if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
1932 type = SOCKLND_CONN_ANY;
1933 } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
1934 type = SOCKLND_CONN_CONTROL;
1935 } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
1936 type = SOCKLND_CONN_BULK_IN;
1938 LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
1939 type = SOCKLND_CONN_BULK_OUT;
1942 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1944 if (ktime_get_seconds() >= deadline) {
1946 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1952 rc = lnet_connect(&sock, peer_ni->ksnp_id.nid,
1953 route->ksnr_myipaddr,
1954 route->ksnr_ipaddr, route->ksnr_port);
1958 rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
1960 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1966 /* A +ve RC means I have to retry because I lost the connection
1967 * race or I have to renegotiate protocol version */
1968 retry_later = (rc != 0);
1970 CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
1971 libcfs_nid2str(peer_ni->ksnp_id.nid));
1973 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1976 route->ksnr_scheduled = 0;
1977 route->ksnr_connecting = 0;
1980 /* re-queue for attention; this frees me up to handle
1981 * the peer_ni's incoming connection request */
1983 if (rc == EALREADY ||
1984 (rc == 0 && peer_ni->ksnp_accepting > 0)) {
1985 /* We want to introduce a delay before next
1986 * attempt to connect if we lost conn race,
1987 * but the race is resolved quickly usually,
1988 * so min_reconnectms should be good heuristic */
1989 route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000;
1990 route->ksnr_timeout = ktime_get_seconds() +
1991 route->ksnr_retry_interval;
1994 ksocknal_launch_connection_locked(route);
1997 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2001 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2003 route->ksnr_scheduled = 0;
2004 route->ksnr_connecting = 0;
2006 /* This is a retry rather than a new connection */
2007 route->ksnr_retry_interval *= 2;
2008 route->ksnr_retry_interval =
2009 max_t(time64_t, route->ksnr_retry_interval,
2010 *ksocknal_tunables.ksnd_min_reconnectms / 1000);
2011 route->ksnr_retry_interval =
2012 min_t(time64_t, route->ksnr_retry_interval,
2013 *ksocknal_tunables.ksnd_max_reconnectms / 1000);
2015 LASSERT(route->ksnr_retry_interval);
2016 route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval;
2018 if (!list_empty(&peer_ni->ksnp_tx_queue) &&
2019 peer_ni->ksnp_accepting == 0 &&
2020 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
2021 struct ksock_conn *conn;
2023 /* ksnp_tx_queue is queued on a conn on successful
2024 * connection for V1.x and V2.x */
2025 if (!list_empty(&peer_ni->ksnp_conns)) {
2026 conn = list_entry(peer_ni->ksnp_conns.next,
2027 struct ksock_conn, ksnc_list);
2028 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
2031 /* take all the blocked packets while I've got the lock and
2032 * complete below... */
2033 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
2036 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2038 ksocknal_peer_failed(peer_ni);
2039 ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, rc);
2044 * check whether we need to create more connds.
2045 * It will try to create new thread if it's necessary, @timeout can
2046 * be updated if failed to create, so caller wouldn't keep try while
2047 * running out of resource.
2050 ksocknal_connd_check_start(time64_t sec, long *timeout)
2054 int total = ksocknal_data.ksnd_connd_starting +
2055 ksocknal_data.ksnd_connd_running;
2057 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2058 /* still in initializing */
2062 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2063 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2064 /* can't create more connd, or still have enough
2065 * threads to handle more connecting */
2069 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2070 /* no pending connecting request */
2074 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2075 /* may run out of resource, retry later */
2076 *timeout = cfs_time_seconds(1);
2080 if (ksocknal_data.ksnd_connd_starting > 0) {
2081 /* serialize starting to avoid flood */
2085 ksocknal_data.ksnd_connd_starting_stamp = sec;
2086 ksocknal_data.ksnd_connd_starting++;
2087 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2089 /* NB: total is the next id */
2090 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2091 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2093 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2098 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2099 ksocknal_data.ksnd_connd_starting--;
2100 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2106 * check whether current thread can exit, it will return 1 if there are too
2107 * many threads and no creating in past 120 seconds.
2108 * Also, this function may update @timeout to make caller come back
2109 * again to recheck these conditions.
2112 ksocknal_connd_check_stop(time64_t sec, long *timeout)
2116 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2117 /* still in initializing */
2121 if (ksocknal_data.ksnd_connd_starting > 0) {
2122 /* in progress of starting new thread */
2126 if (ksocknal_data.ksnd_connd_running <=
2127 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2131 /* created thread in past 120 seconds? */
2132 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2133 SOCKNAL_CONND_TIMEOUT - sec);
2135 *timeout = (val > 0) ? cfs_time_seconds(val) :
2136 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2140 /* no creating in past 120 seconds */
2142 return ksocknal_data.ksnd_connd_running >
2143 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2146 /* Go through connd_routes queue looking for a route that we can process
2147 * right now, @timeout_p can be updated if we need to come back later */
2148 static struct ksock_route *
2149 ksocknal_connd_get_route_locked(signed long *timeout_p)
2151 time64_t now = ktime_get_seconds();
2152 struct ksock_route *route;
2154 /* connd_routes can contain both pending and ordinary routes */
2155 list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2158 if (route->ksnr_retry_interval == 0 ||
2159 now >= route->ksnr_timeout)
2162 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2163 *timeout_p > cfs_time_seconds(route->ksnr_timeout - now))
2164 *timeout_p = cfs_time_seconds(route->ksnr_timeout - now);
2171 ksocknal_connd(void *arg)
2173 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2174 struct ksock_connreq *cr;
2175 wait_queue_entry_t wait;
2179 cfs_block_allsigs();
2181 init_waitqueue_entry(&wait, current);
2183 spin_lock_bh(connd_lock);
2185 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2186 ksocknal_data.ksnd_connd_starting--;
2187 ksocknal_data.ksnd_connd_running++;
2189 while (!ksocknal_data.ksnd_shuttingdown) {
2190 struct ksock_route *route = NULL;
2191 time64_t sec = ktime_get_real_seconds();
2192 long timeout = MAX_SCHEDULE_TIMEOUT;
2193 int dropped_lock = 0;
2195 if (ksocknal_connd_check_stop(sec, &timeout)) {
2196 /* wakeup another one to check stop */
2197 wake_up(&ksocknal_data.ksnd_connd_waitq);
2201 if (ksocknal_connd_check_start(sec, &timeout)) {
2202 /* created new thread */
2206 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2207 /* Connection accepted by the listener */
2208 cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
2209 struct ksock_connreq, ksncr_list);
2211 list_del(&cr->ksncr_list);
2212 spin_unlock_bh(connd_lock);
2215 ksocknal_create_conn(cr->ksncr_ni, NULL,
2216 cr->ksncr_sock, SOCKLND_CONN_NONE);
2217 lnet_ni_decref(cr->ksncr_ni);
2218 LIBCFS_FREE(cr, sizeof(*cr));
2220 spin_lock_bh(connd_lock);
2223 /* Only handle an outgoing connection request if there
2224 * is a thread left to handle incoming connections and
2225 * create new connd */
2226 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2227 ksocknal_data.ksnd_connd_running) {
2228 route = ksocknal_connd_get_route_locked(&timeout);
2230 if (route != NULL) {
2231 list_del(&route->ksnr_connd_list);
2232 ksocknal_data.ksnd_connd_connecting++;
2233 spin_unlock_bh(connd_lock);
2236 if (ksocknal_connect(route)) {
2237 /* consecutive retry */
2238 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2239 CWARN("massive consecutive "
2240 "re-connecting to %pI4h\n",
2241 &route->ksnr_ipaddr);
2248 ksocknal_route_decref(route);
2250 spin_lock_bh(connd_lock);
2251 ksocknal_data.ksnd_connd_connecting--;
2255 if (++nloops < SOCKNAL_RESCHED)
2257 spin_unlock_bh(connd_lock);
2260 spin_lock_bh(connd_lock);
2264 /* Nothing to do for 'timeout' */
2265 set_current_state(TASK_INTERRUPTIBLE);
2266 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2267 spin_unlock_bh(connd_lock);
2270 schedule_timeout(timeout);
2272 set_current_state(TASK_RUNNING);
2273 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2274 spin_lock_bh(connd_lock);
2276 ksocknal_data.ksnd_connd_running--;
2277 spin_unlock_bh(connd_lock);
2279 ksocknal_thread_fini();
2283 static struct ksock_conn *
2284 ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
2286 /* We're called with a shared lock on ksnd_global_lock */
2287 struct ksock_conn *conn;
2288 struct list_head *ctmp;
2289 struct ksock_tx *tx;
2291 list_for_each(ctmp, &peer_ni->ksnp_conns) {
2294 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
2296 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2297 LASSERT (!conn->ksnc_closing);
2299 error = conn->ksnc_sock->sk->sk_err;
2301 ksocknal_conn_addref(conn);
2305 CNETERR("A connection with %s "
2306 "(%pI4h:%d) was reset; "
2307 "it may have rebooted.\n",
2308 libcfs_id2str(peer_ni->ksnp_id),
2313 CNETERR("A connection with %s "
2314 "(%pI4h:%d) timed out; the "
2315 "network or node may be down.\n",
2316 libcfs_id2str(peer_ni->ksnp_id),
2321 CNETERR("An unexpected network error %d "
2323 "(%pI4h:%d\n", error,
2324 libcfs_id2str(peer_ni->ksnp_id),
2333 if (conn->ksnc_rx_started &&
2334 ktime_get_seconds() >= conn->ksnc_rx_deadline) {
2335 /* Timed out incomplete incoming message */
2336 ksocknal_conn_addref(conn);
2337 CNETERR("Timeout receiving from %s (%pI4h:%d), "
2338 "state %d wanted %d left %d\n",
2339 libcfs_id2str(peer_ni->ksnp_id),
2342 conn->ksnc_rx_state,
2343 conn->ksnc_rx_nob_wanted,
2344 conn->ksnc_rx_nob_left);
2348 if ((!list_empty(&conn->ksnc_tx_queue) ||
2349 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2350 ktime_get_seconds() >= conn->ksnc_tx_deadline) {
2351 /* Timed out messages queued for sending or
2352 * buffered in the socket's send buffer */
2353 ksocknal_conn_addref(conn);
2354 list_for_each_entry(tx, &conn->ksnc_tx_queue,
2357 LNET_MSG_STATUS_LOCAL_TIMEOUT;
2358 CNETERR("Timeout sending data to %s (%pI4h:%d) "
2359 "the network or that node may be down.\n",
2360 libcfs_id2str(peer_ni->ksnp_id),
2361 &conn->ksnc_ipaddr, conn->ksnc_port);
2370 ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
2372 struct ksock_tx *tx;
2373 struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
2375 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2377 while (!list_empty(&peer_ni->ksnp_tx_queue)) {
2378 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2379 struct ksock_tx, tx_list);
2381 if (ktime_get_seconds() < tx->tx_deadline)
2384 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2386 list_del(&tx->tx_list);
2387 list_add_tail(&tx->tx_list, &stale_txs);
2390 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2392 ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, -ETIMEDOUT);
2396 ksocknal_send_keepalive_locked(struct ksock_peer_ni *peer_ni)
2397 __must_hold(&ksocknal_data.ksnd_global_lock)
2399 struct ksock_sched *sched;
2400 struct ksock_conn *conn;
2401 struct ksock_tx *tx;
2403 /* last_alive will be updated by create_conn */
2404 if (list_empty(&peer_ni->ksnp_conns))
2407 if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x)
2410 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2411 ktime_get_seconds() < peer_ni->ksnp_last_alive +
2412 *ksocknal_tunables.ksnd_keepalive)
2415 if (ktime_get_seconds() < peer_ni->ksnp_send_keepalive)
2418 /* retry 10 secs later, so we wouldn't put pressure
2419 * on this peer_ni if we failed to send keepalive this time */
2420 peer_ni->ksnp_send_keepalive = ktime_get_seconds() + 10;
2422 conn = ksocknal_find_conn_locked(peer_ni, NULL, 1);
2424 sched = conn->ksnc_scheduler;
2426 spin_lock_bh(&sched->kss_lock);
2427 if (!list_empty(&conn->ksnc_tx_queue)) {
2428 spin_unlock_bh(&sched->kss_lock);
2429 /* there is an queued ACK, don't need keepalive */
2433 spin_unlock_bh(&sched->kss_lock);
2436 read_unlock(&ksocknal_data.ksnd_global_lock);
2438 /* cookie = 1 is reserved for keepalive PING */
2439 tx = ksocknal_alloc_tx_noop(1, 1);
2441 read_lock(&ksocknal_data.ksnd_global_lock);
2445 if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id) == 0) {
2446 read_lock(&ksocknal_data.ksnd_global_lock);
2450 ksocknal_free_tx(tx);
2451 read_lock(&ksocknal_data.ksnd_global_lock);
2458 ksocknal_check_peer_timeouts(int idx)
2460 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2461 struct ksock_peer_ni *peer_ni;
2462 struct ksock_conn *conn;
2463 struct ksock_tx *tx;
2466 /* NB. We expect to have a look at all the peers and not find any
2467 * connections to time out, so we just use a shared lock while we
2469 read_lock(&ksocknal_data.ksnd_global_lock);
2471 list_for_each_entry(peer_ni, peers, ksnp_list) {
2472 struct ksock_tx *tx_stale;
2473 time64_t deadline = 0;
2477 if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
2478 read_unlock(&ksocknal_data.ksnd_global_lock);
2482 conn = ksocknal_find_timed_out_conn (peer_ni);
2485 read_unlock(&ksocknal_data.ksnd_global_lock);
2487 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2489 /* NB we won't find this one again, but we can't
2490 * just proceed with the next peer_ni, since we dropped
2491 * ksnd_global_lock and it might be dead already! */
2492 ksocknal_conn_decref(conn);
2496 /* we can't process stale txs right here because we're
2497 * holding only shared lock */
2498 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
2499 struct ksock_tx *tx;
2501 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2502 struct ksock_tx, tx_list);
2503 if (ktime_get_seconds() >= tx->tx_deadline) {
2504 ksocknal_peer_addref(peer_ni);
2505 read_unlock(&ksocknal_data.ksnd_global_lock);
2507 ksocknal_flush_stale_txs(peer_ni);
2509 ksocknal_peer_decref(peer_ni);
2514 if (list_empty(&peer_ni->ksnp_zc_req_list))
2518 spin_lock(&peer_ni->ksnp_lock);
2519 list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
2520 if (ktime_get_seconds() < tx->tx_deadline)
2522 /* ignore the TX if connection is being closed */
2523 if (tx->tx_conn->ksnc_closing)
2526 if (tx_stale == NULL)
2530 if (tx_stale == NULL) {
2531 spin_unlock(&peer_ni->ksnp_lock);
2535 deadline = tx_stale->tx_deadline;
2536 resid = tx_stale->tx_resid;
2537 conn = tx_stale->tx_conn;
2538 ksocknal_conn_addref(conn);
2540 spin_unlock(&peer_ni->ksnp_lock);
2541 read_unlock(&ksocknal_data.ksnd_global_lock);
2543 CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the "
2544 "oldest(%p) timed out %lld secs ago, "
2545 "resid: %d, wmem: %d\n",
2546 n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale,
2547 ktime_get_seconds() - deadline,
2548 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2550 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2551 ksocknal_conn_decref(conn);
2555 read_unlock(&ksocknal_data.ksnd_global_lock);
2558 int ksocknal_reaper(void *arg)
2560 wait_queue_entry_t wait;
2561 struct ksock_conn *conn;
2562 struct ksock_sched *sched;
2563 struct list_head enomem_conns;
2568 time64_t deadline = ktime_get_seconds();
2570 cfs_block_allsigs ();
2572 INIT_LIST_HEAD(&enomem_conns);
2573 init_waitqueue_entry(&wait, current);
2575 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2577 while (!ksocknal_data.ksnd_shuttingdown) {
2578 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2579 conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
2580 struct ksock_conn, ksnc_list);
2581 list_del(&conn->ksnc_list);
2583 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2585 ksocknal_terminate_conn(conn);
2586 ksocknal_conn_decref(conn);
2588 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2592 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2593 conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
2594 struct ksock_conn, ksnc_list);
2595 list_del(&conn->ksnc_list);
2597 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2599 ksocknal_destroy_conn(conn);
2601 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2605 if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
2606 list_add(&enomem_conns,
2607 &ksocknal_data.ksnd_enomem_conns);
2608 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2611 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2613 /* reschedule all the connections that stalled with ENOMEM... */
2615 while (!list_empty(&enomem_conns)) {
2616 conn = list_entry(enomem_conns.next,
2617 struct ksock_conn, ksnc_tx_list);
2618 list_del(&conn->ksnc_tx_list);
2620 sched = conn->ksnc_scheduler;
2622 spin_lock_bh(&sched->kss_lock);
2624 LASSERT(conn->ksnc_tx_scheduled);
2625 conn->ksnc_tx_ready = 1;
2626 list_add_tail(&conn->ksnc_tx_list,
2627 &sched->kss_tx_conns);
2628 wake_up(&sched->kss_waitq);
2630 spin_unlock_bh(&sched->kss_lock);
2634 /* careful with the jiffy wrap... */
2635 while ((timeout = deadline - ktime_get_seconds()) <= 0) {
2638 int chunk = ksocknal_data.ksnd_peer_hash_size;
2639 unsigned int lnd_timeout;
2641 /* Time to check for timeouts on a few more peers: I do
2642 * checks every 'p' seconds on a proportion of the peer_ni
2643 * table and I need to check every connection 'n' times
2644 * within a timeout interval, to ensure I detect a
2645 * timeout on any connection within (n+1)/n times the
2646 * timeout interval. */
2648 lnd_timeout = lnet_get_lnd_timeout();
2649 if (lnd_timeout > n * p)
2650 chunk = (chunk * n * p) / lnd_timeout;
2654 for (i = 0; i < chunk; i++) {
2655 ksocknal_check_peer_timeouts (peer_index);
2656 peer_index = (peer_index + 1) %
2657 ksocknal_data.ksnd_peer_hash_size;
2663 if (nenomem_conns != 0) {
2664 /* Reduce my timeout if I rescheduled ENOMEM conns.
2665 * This also prevents me getting woken immediately
2666 * if any go back on my enomem list. */
2667 timeout = SOCKNAL_ENOMEM_RETRY;
2669 ksocknal_data.ksnd_reaper_waketime = ktime_get_seconds() +
2672 set_current_state(TASK_INTERRUPTIBLE);
2673 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2675 if (!ksocknal_data.ksnd_shuttingdown &&
2676 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2677 list_empty(&ksocknal_data.ksnd_zombie_conns))
2678 schedule_timeout(cfs_time_seconds(timeout));
2680 set_current_state(TASK_RUNNING);
2681 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2683 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2686 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2688 ksocknal_thread_fini();