2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2014, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Lustre, https://wiki.hpdd.intel.com/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 ksocknal_alloc_tx(int type, int size)
32 ksock_tx_t *tx = NULL;
34 if (type == KSOCK_MSG_NOOP) {
35 LASSERT(size == KSOCK_NOOP_TX_SIZE);
37 /* searching for a noop tx in free list */
38 spin_lock(&ksocknal_data.ksnd_tx_lock);
40 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
41 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
42 next, ksock_tx_t, tx_list);
43 LASSERT(tx->tx_desc_size == size);
44 list_del(&tx->tx_list);
47 spin_unlock(&ksocknal_data.ksnd_tx_lock);
51 LIBCFS_ALLOC(tx, size);
56 atomic_set(&tx->tx_refcount, 1);
57 tx->tx_zc_aborted = 0;
58 tx->tx_zc_capable = 0;
59 tx->tx_zc_checked = 0;
60 tx->tx_desc_size = size;
62 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
68 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
72 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
74 CERROR("Can't allocate noop tx desc\n");
79 tx->tx_lnetmsg = NULL;
82 tx->tx_iov = tx->tx_frags.virt.iov;
84 tx->tx_nonblk = nonblk;
86 tx->tx_msg.ksm_csum = 0;
87 tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
88 tx->tx_msg.ksm_zc_cookies[0] = 0;
89 tx->tx_msg.ksm_zc_cookies[1] = cookie;
96 ksocknal_free_tx (ksock_tx_t *tx)
98 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
100 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
102 spin_lock(&ksocknal_data.ksnd_tx_lock);
104 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
106 spin_unlock(&ksocknal_data.ksnd_tx_lock);
108 LIBCFS_FREE(tx, tx->tx_desc_size);
113 ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
115 struct kvec *iov = tx->tx_iov;
119 LASSERT (tx->tx_niov > 0);
121 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
122 rc = ksocknal_lib_send_iov(conn, tx);
124 if (rc <= 0) /* sent nothing? */
128 LASSERT (nob <= tx->tx_resid);
133 LASSERT (tx->tx_niov > 0);
135 if (nob < (int) iov->iov_len) {
136 iov->iov_base += nob;
150 ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
152 lnet_kiov_t *kiov = tx->tx_kiov;
156 LASSERT (tx->tx_niov == 0);
157 LASSERT (tx->tx_nkiov > 0);
159 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
160 rc = ksocknal_lib_send_kiov(conn, tx);
162 if (rc <= 0) /* sent nothing? */
166 LASSERT (nob <= tx->tx_resid);
171 LASSERT(tx->tx_nkiov > 0);
173 if (nob < (int)kiov->kiov_len) {
174 kiov->kiov_offset += nob;
175 kiov->kiov_len -= nob;
179 nob -= (int)kiov->kiov_len;
180 tx->tx_kiov = ++kiov;
188 ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
193 if (ksocknal_data.ksnd_stall_tx != 0) {
194 set_current_state(TASK_UNINTERRUPTIBLE);
195 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
198 LASSERT(tx->tx_resid != 0);
200 rc = ksocknal_connsock_addref(conn);
202 LASSERT (conn->ksnc_closing);
207 if (ksocknal_data.ksnd_enomem_tx > 0) {
209 ksocknal_data.ksnd_enomem_tx--;
211 } else if (tx->tx_niov != 0) {
212 rc = ksocknal_send_iov (conn, tx);
214 rc = ksocknal_send_kiov (conn, tx);
217 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
218 if (rc > 0) /* sent something? */
219 conn->ksnc_tx_bufnob += rc; /* account it */
221 if (bufnob < conn->ksnc_tx_bufnob) {
222 /* allocated send buffer bytes < computed; infer
223 * something got ACKed */
224 conn->ksnc_tx_deadline =
225 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
226 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
227 conn->ksnc_tx_bufnob = bufnob;
231 if (rc <= 0) { /* Didn't write anything? */
233 if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
236 /* Check if EAGAIN is due to memory pressure */
237 if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
243 /* socket's wmem_queued now includes 'rc' bytes */
244 atomic_sub (rc, &conn->ksnc_tx_nob);
247 } while (tx->tx_resid != 0);
249 ksocknal_connsock_decref(conn);
254 ksocknal_recv_iov (ksock_conn_t *conn)
256 struct kvec *iov = conn->ksnc_rx_iov;
260 LASSERT (conn->ksnc_rx_niov > 0);
262 /* Never touch conn->ksnc_rx_iov or change connection
263 * status inside ksocknal_lib_recv_iov */
264 rc = ksocknal_lib_recv_iov(conn);
269 /* received something... */
272 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
273 conn->ksnc_rx_deadline =
274 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
275 smp_mb(); /* order with setting rx_started */
276 conn->ksnc_rx_started = 1;
278 conn->ksnc_rx_nob_wanted -= nob;
279 conn->ksnc_rx_nob_left -= nob;
282 LASSERT (conn->ksnc_rx_niov > 0);
284 if (nob < (int)iov->iov_len) {
286 iov->iov_base += nob;
291 conn->ksnc_rx_iov = ++iov;
292 conn->ksnc_rx_niov--;
299 ksocknal_recv_kiov (ksock_conn_t *conn)
301 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
304 LASSERT (conn->ksnc_rx_nkiov > 0);
306 /* Never touch conn->ksnc_rx_kiov or change connection
307 * status inside ksocknal_lib_recv_iov */
308 rc = ksocknal_lib_recv_kiov(conn);
313 /* received something... */
316 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
317 conn->ksnc_rx_deadline =
318 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
319 smp_mb(); /* order with setting rx_started */
320 conn->ksnc_rx_started = 1;
322 conn->ksnc_rx_nob_wanted -= nob;
323 conn->ksnc_rx_nob_left -= nob;
326 LASSERT (conn->ksnc_rx_nkiov > 0);
328 if (nob < (int) kiov->kiov_len) {
329 kiov->kiov_offset += nob;
330 kiov->kiov_len -= nob;
334 nob -= kiov->kiov_len;
335 conn->ksnc_rx_kiov = ++kiov;
336 conn->ksnc_rx_nkiov--;
343 ksocknal_receive (ksock_conn_t *conn)
345 /* Return 1 on success, 0 on EOF, < 0 on error.
346 * Caller checks ksnc_rx_nob_wanted to determine
347 * progress/completion. */
351 if (ksocknal_data.ksnd_stall_rx != 0) {
352 set_current_state(TASK_UNINTERRUPTIBLE);
353 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
356 rc = ksocknal_connsock_addref(conn);
358 LASSERT (conn->ksnc_closing);
363 if (conn->ksnc_rx_niov != 0)
364 rc = ksocknal_recv_iov (conn);
366 rc = ksocknal_recv_kiov (conn);
369 /* error/EOF or partial receive */
372 } else if (rc == 0 && conn->ksnc_rx_started) {
373 /* EOF in the middle of a message */
379 /* Completed a fragment */
381 if (conn->ksnc_rx_nob_wanted == 0) {
387 ksocknal_connsock_decref(conn);
392 ksocknal_tx_done(struct lnet_ni *ni, ksock_tx_t *tx)
394 struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
395 int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
398 LASSERT(ni != NULL || tx->tx_conn != NULL);
400 if (tx->tx_conn != NULL)
401 ksocknal_conn_decref(tx->tx_conn);
403 if (ni == NULL && tx->tx_conn != NULL)
404 ni = tx->tx_conn->ksnc_peer->ksnp_ni;
406 ksocknal_free_tx (tx);
407 if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
408 lnet_finalize (ni, lnetmsg, rc);
414 ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
418 while (!list_empty(txlist)) {
419 tx = list_entry(txlist->next, ksock_tx_t, tx_list);
421 if (error && tx->tx_lnetmsg != NULL) {
422 CNETERR("Deleting packet type %d len %d %s->%s\n",
423 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
424 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
425 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
426 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
428 CNETERR("Deleting noop packet\n");
431 list_del(&tx->tx_list);
433 LASSERT (atomic_read(&tx->tx_refcount) == 1);
434 ksocknal_tx_done (ni, tx);
439 ksocknal_check_zc_req(ksock_tx_t *tx)
441 ksock_conn_t *conn = tx->tx_conn;
442 ksock_peer_ni_t *peer_ni = conn->ksnc_peer;
444 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
445 * to ksnp_zc_req_list if some fragment of this message should be sent
446 * zero-copy. Our peer_ni will send an ACK containing this cookie when
447 * she has received this message to tell us we can signal completion.
448 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
449 * ksnp_zc_req_list. */
450 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
451 LASSERT (tx->tx_zc_capable);
453 tx->tx_zc_checked = 1;
455 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
456 !conn->ksnc_zc_capable)
459 /* assign cookie and queue tx to pending list, it will be released when
460 * a matching ack is received. See ksocknal_handle_zcack() */
462 ksocknal_tx_addref(tx);
464 spin_lock(&peer_ni->ksnp_lock);
466 /* ZC_REQ is going to be pinned to the peer_ni */
468 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
470 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
472 tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++;
474 if (peer_ni->ksnp_zc_next_cookie == 0)
475 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
477 list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list);
479 spin_unlock(&peer_ni->ksnp_lock);
483 ksocknal_uncheck_zc_req(ksock_tx_t *tx)
485 ksock_peer_ni_t *peer_ni = tx->tx_conn->ksnc_peer;
487 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
488 LASSERT(tx->tx_zc_capable);
490 tx->tx_zc_checked = 0;
492 spin_lock(&peer_ni->ksnp_lock);
494 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
495 /* Not waiting for an ACK */
496 spin_unlock(&peer_ni->ksnp_lock);
500 tx->tx_msg.ksm_zc_cookies[0] = 0;
501 list_del(&tx->tx_zc_list);
503 spin_unlock(&peer_ni->ksnp_lock);
505 ksocknal_tx_decref(tx);
509 ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
513 if (tx->tx_zc_capable && !tx->tx_zc_checked)
514 ksocknal_check_zc_req(tx);
516 rc = ksocknal_transmit (conn, tx);
518 CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
520 if (tx->tx_resid == 0) {
521 /* Sent everything OK */
533 counter++; /* exponential backoff warnings */
534 if ((counter & (-counter)) == counter)
535 CWARN("%u ENOMEM tx %p (%u allocated)\n",
536 counter, conn, atomic_read(&libcfs_kmemory));
538 /* Queue on ksnd_enomem_conns for retry after a timeout */
539 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
541 /* enomem list takes over scheduler's ref... */
542 LASSERT (conn->ksnc_tx_scheduled);
543 list_add_tail(&conn->ksnc_tx_list,
544 &ksocknal_data.ksnd_enomem_conns);
545 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
546 SOCKNAL_ENOMEM_RETRY),
547 ksocknal_data.ksnd_reaper_waketime))
548 wake_up(&ksocknal_data.ksnd_reaper_waitq);
550 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
557 if (!conn->ksnc_closing) {
560 LCONSOLE_WARN("Host %pI4h reset our connection "
561 "while we were sending data; it may have "
566 LCONSOLE_WARN("There was an unexpected network error "
567 "while writing to %pI4h: %d.\n",
568 &conn->ksnc_ipaddr, rc);
571 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
572 conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
573 &conn->ksnc_ipaddr, conn->ksnc_port);
576 if (tx->tx_zc_checked)
577 ksocknal_uncheck_zc_req(tx);
579 /* it's not an error if conn is being closed */
580 ksocknal_close_conn_and_siblings (conn,
581 (conn->ksnc_closing) ? 0 : rc);
587 ksocknal_launch_connection_locked (ksock_route_t *route)
590 /* called holding write lock on ksnd_global_lock */
592 LASSERT (!route->ksnr_scheduled);
593 LASSERT (!route->ksnr_connecting);
594 LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
596 route->ksnr_scheduled = 1; /* scheduling conn for connd */
597 ksocknal_route_addref(route); /* extra ref for connd */
599 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
601 list_add_tail(&route->ksnr_connd_list,
602 &ksocknal_data.ksnd_connd_routes);
603 wake_up(&ksocknal_data.ksnd_connd_waitq);
605 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
609 ksocknal_launch_all_connections_locked (ksock_peer_ni_t *peer_ni)
611 ksock_route_t *route;
613 /* called holding write lock on ksnd_global_lock */
615 /* launch any/all connections that need it */
616 route = ksocknal_find_connectable_route_locked(peer_ni);
620 ksocknal_launch_connection_locked(route);
625 ksocknal_find_conn_locked(ksock_peer_ni_t *peer_ni, ksock_tx_t *tx, int nonblk)
627 struct list_head *tmp;
629 ksock_conn_t *typed = NULL;
630 ksock_conn_t *fallback = NULL;
634 list_for_each(tmp, &peer_ni->ksnp_conns) {
635 ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
636 int nob = atomic_read(&c->ksnc_tx_nob) +
637 c->ksnc_sock->sk->sk_wmem_queued;
640 LASSERT (!c->ksnc_closing);
641 LASSERT (c->ksnc_proto != NULL &&
642 c->ksnc_proto->pro_match_tx != NULL);
644 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
649 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
652 case SOCKNAL_MATCH_YES: /* typed connection */
653 if (typed == NULL || tnob > nob ||
654 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
655 cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
661 case SOCKNAL_MATCH_MAY: /* fallback connection */
662 if (fallback == NULL || fnob > nob ||
663 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
664 cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
672 /* prefer the typed selection */
673 conn = (typed != NULL) ? typed : fallback;
676 conn->ksnc_tx_last_post = cfs_time_current();
682 ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
684 conn->ksnc_proto->pro_pack(tx);
686 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
687 ksocknal_conn_addref(conn); /* +1 ref for tx */
692 ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
694 ksock_sched_t *sched = conn->ksnc_scheduler;
695 struct ksock_msg *msg = &tx->tx_msg;
696 ksock_tx_t *ztx = NULL;
699 /* called holding global lock (read or irq-write) and caller may
700 * not have dropped this lock between finding conn and calling me,
701 * so we don't need the {get,put}connsock dance to deref
703 LASSERT(!conn->ksnc_closing);
705 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
706 libcfs_id2str(conn->ksnc_peer->ksnp_id),
707 &conn->ksnc_ipaddr, conn->ksnc_port);
709 ksocknal_tx_prep(conn, tx);
711 /* Ensure the frags we've been given EXACTLY match the number of
712 * bytes we want to send. Many TCP/IP stacks disregard any total
713 * size parameters passed to them and just look at the frags.
715 * We always expect at least 1 mapped fragment containing the
716 * complete ksocknal message header. */
717 LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
718 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
719 (unsigned int)tx->tx_nob);
720 LASSERT (tx->tx_niov >= 1);
721 LASSERT (tx->tx_resid == tx->tx_nob);
723 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
724 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
726 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
728 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
729 spin_lock_bh(&sched->kss_lock);
731 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
732 /* First packet starts the timeout */
733 conn->ksnc_tx_deadline =
734 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
735 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
736 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
737 conn->ksnc_tx_bufnob = 0;
738 smp_mb(); /* order with adding to tx_queue */
741 if (msg->ksm_type == KSOCK_MSG_NOOP) {
742 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
743 * on a normal packet so I don't need to send it */
744 LASSERT (msg->ksm_zc_cookies[1] != 0);
745 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
747 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
748 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
751 /* It's a normal packet - can it piggback a noop zc-ack that
752 * has been queued already? */
753 LASSERT (msg->ksm_zc_cookies[1] == 0);
754 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
756 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
757 /* ztx will be released later */
761 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
762 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
765 if (conn->ksnc_tx_ready && /* able to send */
766 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
767 /* +1 ref for scheduler */
768 ksocknal_conn_addref(conn);
769 list_add_tail(&conn->ksnc_tx_list,
770 &sched->kss_tx_conns);
771 conn->ksnc_tx_scheduled = 1;
772 wake_up(&sched->kss_waitq);
775 spin_unlock_bh(&sched->kss_lock);
780 ksocknal_find_connectable_route_locked (ksock_peer_ni_t *peer_ni)
782 cfs_time_t now = cfs_time_current();
783 struct list_head *tmp;
784 ksock_route_t *route;
786 list_for_each(tmp, &peer_ni->ksnp_routes) {
787 route = list_entry(tmp, ksock_route_t, ksnr_list);
789 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
791 if (route->ksnr_scheduled) /* connections being established */
794 /* all route types connected ? */
795 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
798 if (!(route->ksnr_retry_interval == 0 || /* first attempt */
799 cfs_time_aftereq(now, route->ksnr_timeout))) {
801 "Too soon to retry route %pI4h "
802 "(cnted %d, interval %ld, %ld secs later)\n",
804 route->ksnr_connected,
805 route->ksnr_retry_interval,
806 cfs_duration_sec(route->ksnr_timeout - now));
817 ksocknal_find_connecting_route_locked (ksock_peer_ni_t *peer_ni)
819 struct list_head *tmp;
820 ksock_route_t *route;
822 list_for_each(tmp, &peer_ni->ksnp_routes) {
823 route = list_entry(tmp, ksock_route_t, ksnr_list);
825 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
827 if (route->ksnr_scheduled)
835 ksocknal_launch_packet(struct lnet_ni *ni, ksock_tx_t *tx,
836 struct lnet_process_id id)
838 ksock_peer_ni_t *peer_ni;
844 LASSERT (tx->tx_conn == NULL);
846 g_lock = &ksocknal_data.ksnd_global_lock;
848 for (retry = 0;; retry = 1) {
850 peer_ni = ksocknal_find_peer_locked(ni, id);
851 if (peer_ni != NULL) {
852 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) {
853 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
855 /* I've got no routes that need to be
856 * connecting and I do have an actual
858 ksocknal_queue_tx_locked (tx, conn);
865 /* I'll need a write lock... */
868 write_lock_bh(g_lock);
870 peer_ni = ksocknal_find_peer_locked(ni, id);
874 write_unlock_bh(g_lock);
876 if ((id.pid & LNET_PID_USERFLAG) != 0) {
877 CERROR("Refusing to create a connection to "
878 "userspace process %s\n", libcfs_id2str(id));
879 return -EHOSTUNREACH;
883 CERROR("Can't find peer_ni %s\n", libcfs_id2str(id));
884 return -EHOSTUNREACH;
887 rc = ksocknal_add_peer(ni, id,
888 LNET_NIDADDR(id.nid),
889 lnet_acceptor_port());
891 CERROR("Can't add peer_ni %s: %d\n",
892 libcfs_id2str(id), rc);
897 ksocknal_launch_all_connections_locked(peer_ni);
899 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
901 /* Connection exists; queue message on it */
902 ksocknal_queue_tx_locked (tx, conn);
903 write_unlock_bh(g_lock);
907 if (peer_ni->ksnp_accepting > 0 ||
908 ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
909 /* the message is going to be pinned to the peer_ni */
911 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
913 /* Queue the message until a connection is established */
914 list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
915 write_unlock_bh(g_lock);
919 write_unlock_bh(g_lock);
921 /* NB Routes may be ignored if connections to them failed recently */
922 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
923 return (-EHOSTUNREACH);
927 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
930 int type = lntmsg->msg_type;
931 struct lnet_process_id target = lntmsg->msg_target;
932 unsigned int payload_niov = lntmsg->msg_niov;
933 struct kvec *payload_iov = lntmsg->msg_iov;
934 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
935 unsigned int payload_offset = lntmsg->msg_offset;
936 unsigned int payload_nob = lntmsg->msg_len;
941 /* NB 'private' is different depending on what we're sending.
942 * Just ignore it... */
944 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
945 payload_nob, payload_niov, libcfs_id2str(target));
947 LASSERT (payload_nob == 0 || payload_niov > 0);
948 LASSERT (payload_niov <= LNET_MAX_IOV);
949 /* payload is either all vaddrs or all pages */
950 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
951 LASSERT (!in_interrupt ());
953 if (payload_iov != NULL)
954 desc_size = offsetof(ksock_tx_t,
955 tx_frags.virt.iov[1 + payload_niov]);
957 desc_size = offsetof(ksock_tx_t,
958 tx_frags.paged.kiov[payload_niov]);
960 if (lntmsg->msg_vmflush)
961 mpflag = cfs_memory_pressure_get_and_set();
962 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
964 CERROR("Can't allocate tx desc type %d size %d\n",
966 if (lntmsg->msg_vmflush)
967 cfs_memory_pressure_restore(mpflag);
971 tx->tx_conn = NULL; /* set when assigned a conn */
972 tx->tx_lnetmsg = lntmsg;
974 if (payload_iov != NULL) {
977 tx->tx_iov = tx->tx_frags.virt.iov;
979 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
980 payload_niov, payload_iov,
981 payload_offset, payload_nob);
984 tx->tx_iov = &tx->tx_frags.paged.iov;
985 tx->tx_kiov = tx->tx_frags.paged.kiov;
986 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
987 payload_niov, payload_kiov,
988 payload_offset, payload_nob);
990 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
991 tx->tx_zc_capable = 1;
994 tx->tx_msg.ksm_csum = 0;
995 tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
996 tx->tx_msg.ksm_zc_cookies[0] = 0;
997 tx->tx_msg.ksm_zc_cookies[1] = 0;
999 /* The first fragment will be set later in pro_pack */
1000 rc = ksocknal_launch_packet(ni, tx, target);
1002 cfs_memory_pressure_restore(mpflag);
1007 ksocknal_free_tx(tx);
1012 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1014 struct task_struct *task = kthread_run(fn, arg, name);
1017 return PTR_ERR(task);
1019 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1020 ksocknal_data.ksnd_nthreads++;
1021 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1026 ksocknal_thread_fini (void)
1028 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1029 ksocknal_data.ksnd_nthreads--;
1030 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1034 ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
1036 static char ksocknal_slop_buffer[4096];
1042 LASSERT(conn->ksnc_proto != NULL);
1044 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1045 /* Remind the socket to ack eagerly... */
1046 ksocknal_lib_eager_ack(conn);
1049 if (nob_to_skip == 0) { /* right at next packet boundary now */
1050 conn->ksnc_rx_started = 0;
1051 smp_mb(); /* racing with timeout thread */
1053 switch (conn->ksnc_proto->pro_version) {
1054 case KSOCK_PROTO_V2:
1055 case KSOCK_PROTO_V3:
1056 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1057 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1058 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
1060 conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
1061 conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
1062 conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
1065 case KSOCK_PROTO_V1:
1066 /* Receiving bare struct lnet_hdr */
1067 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1068 conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr);
1069 conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
1071 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1072 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1073 conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr);
1079 conn->ksnc_rx_niov = 1;
1081 conn->ksnc_rx_kiov = NULL;
1082 conn->ksnc_rx_nkiov = 0;
1083 conn->ksnc_rx_csum = ~0;
1087 /* Set up to skip as much as possible now. If there's more left
1088 * (ran out of iov entries) we'll get called again */
1090 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1091 conn->ksnc_rx_nob_left = nob_to_skip;
1092 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1097 nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
1099 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1100 conn->ksnc_rx_iov[niov].iov_len = nob;
1105 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1106 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
1108 conn->ksnc_rx_niov = niov;
1109 conn->ksnc_rx_kiov = NULL;
1110 conn->ksnc_rx_nkiov = 0;
1111 conn->ksnc_rx_nob_wanted = skipped;
1116 ksocknal_process_receive (ksock_conn_t *conn)
1118 struct lnet_hdr *lhdr;
1119 struct lnet_process_id *id;
1122 LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
1124 /* NB: sched lock NOT held */
1125 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1126 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1127 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1128 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1129 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1131 if (conn->ksnc_rx_nob_wanted != 0) {
1132 rc = ksocknal_receive(conn);
1135 struct lnet_process_id ksnp_id;
1137 ksnp_id = conn->ksnc_peer->ksnp_id;
1139 LASSERT(rc != -EAGAIN);
1141 CDEBUG(D_NET, "[%p] EOF from %s "
1142 "ip %pI4h:%d\n", conn,
1143 libcfs_id2str(ksnp_id),
1146 else if (!conn->ksnc_closing)
1147 CERROR("[%p] Error %d on read from %s "
1148 "ip %pI4h:%d\n", conn, rc,
1149 libcfs_id2str(ksnp_id),
1153 /* it's not an error if conn is being closed */
1154 ksocknal_close_conn_and_siblings (conn,
1155 (conn->ksnc_closing) ? 0 : rc);
1156 return (rc == 0 ? -ESHUTDOWN : rc);
1159 if (conn->ksnc_rx_nob_wanted != 0) {
1164 switch (conn->ksnc_rx_state) {
1165 case SOCKNAL_RX_KSM_HEADER:
1166 if (conn->ksnc_flip) {
1167 __swab32s(&conn->ksnc_msg.ksm_type);
1168 __swab32s(&conn->ksnc_msg.ksm_csum);
1169 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1170 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1173 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1174 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1175 CERROR("%s: Unknown message type: %x\n",
1176 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1177 conn->ksnc_msg.ksm_type);
1178 ksocknal_new_packet(conn, 0);
1179 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1183 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1184 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1185 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1186 /* NOOP Checksum error */
1187 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1188 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1189 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1190 ksocknal_new_packet(conn, 0);
1191 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1195 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1198 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1200 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1201 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1203 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1204 conn->ksnc_msg.ksm_zc_cookies[1]);
1207 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1208 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1209 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1210 ksocknal_new_packet(conn, 0);
1211 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1216 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1217 ksocknal_new_packet (conn, 0);
1218 return 0; /* NOOP is done and just return */
1221 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1222 conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
1223 conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
1225 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1226 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1227 conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
1229 conn->ksnc_rx_niov = 1;
1230 conn->ksnc_rx_kiov = NULL;
1231 conn->ksnc_rx_nkiov = 0;
1233 goto again; /* read lnet header now */
1235 case SOCKNAL_RX_LNET_HEADER:
1236 /* unpack message header */
1237 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1239 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1240 /* Userspace peer_ni */
1241 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1242 id = &conn->ksnc_peer->ksnp_id;
1244 /* Substitute process ID assigned at connection time */
1245 lhdr->src_pid = cpu_to_le32(id->pid);
1246 lhdr->src_nid = cpu_to_le64(id->nid);
1249 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1250 ksocknal_conn_addref(conn); /* ++ref while parsing */
1252 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1253 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1254 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1256 /* I just received garbage: give up on this conn */
1257 ksocknal_new_packet(conn, 0);
1258 ksocknal_close_conn_and_siblings (conn, rc);
1259 ksocknal_conn_decref(conn);
1263 /* I'm racing with ksocknal_recv() */
1264 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1265 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1267 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1270 /* ksocknal_recv() got called */
1273 case SOCKNAL_RX_LNET_PAYLOAD:
1274 /* payload all received */
1277 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1278 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1279 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1280 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1281 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1282 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1286 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1287 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1289 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1290 id = &conn->ksnc_peer->ksnp_id;
1292 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1293 conn->ksnc_msg.ksm_zc_cookies[0],
1294 *ksocknal_tunables.ksnd_nonblk_zcack ||
1295 le64_to_cpu(lhdr->src_nid) != id->nid);
1298 lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
1301 ksocknal_new_packet(conn, 0);
1302 ksocknal_close_conn_and_siblings (conn, rc);
1307 case SOCKNAL_RX_SLOP:
1308 /* starting new packet? */
1309 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1310 return 0; /* come back later */
1311 goto again; /* try to finish reading slop now */
1319 return (-EINVAL); /* keep gcc happy */
1323 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
1324 int delayed, unsigned int niov, struct kvec *iov,
1325 lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
1328 ksock_conn_t *conn = (ksock_conn_t *)private;
1329 ksock_sched_t *sched = conn->ksnc_scheduler;
1331 LASSERT (mlen <= rlen);
1332 LASSERT (niov <= LNET_MAX_IOV);
1334 conn->ksnc_cookie = msg;
1335 conn->ksnc_rx_nob_wanted = mlen;
1336 conn->ksnc_rx_nob_left = rlen;
1338 if (mlen == 0 || iov != NULL) {
1339 conn->ksnc_rx_nkiov = 0;
1340 conn->ksnc_rx_kiov = NULL;
1341 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1342 conn->ksnc_rx_niov =
1343 lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
1344 niov, iov, offset, mlen);
1346 conn->ksnc_rx_niov = 0;
1347 conn->ksnc_rx_iov = NULL;
1348 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1349 conn->ksnc_rx_nkiov =
1350 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1351 niov, kiov, offset, mlen);
1355 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1356 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1358 LASSERT (conn->ksnc_rx_scheduled);
1360 spin_lock_bh(&sched->kss_lock);
1362 switch (conn->ksnc_rx_state) {
1363 case SOCKNAL_RX_PARSE_WAIT:
1364 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1365 wake_up(&sched->kss_waitq);
1366 LASSERT(conn->ksnc_rx_ready);
1369 case SOCKNAL_RX_PARSE:
1370 /* scheduler hasn't noticed I'm parsing yet */
1374 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1376 spin_unlock_bh(&sched->kss_lock);
1377 ksocknal_conn_decref(conn);
1382 ksocknal_sched_cansleep(ksock_sched_t *sched)
1386 spin_lock_bh(&sched->kss_lock);
1388 rc = (!ksocknal_data.ksnd_shuttingdown &&
1389 list_empty(&sched->kss_rx_conns) &&
1390 list_empty(&sched->kss_tx_conns));
1392 spin_unlock_bh(&sched->kss_lock);
1396 int ksocknal_scheduler(void *arg)
1398 struct ksock_sched_info *info;
1399 ksock_sched_t *sched;
1404 long id = (long)arg;
1406 info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
1407 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
1409 cfs_block_allsigs();
1411 rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
1413 CWARN("Can't set CPU partition affinity to %d: %d\n",
1417 spin_lock_bh(&sched->kss_lock);
1419 while (!ksocknal_data.ksnd_shuttingdown) {
1420 int did_something = 0;
1422 /* Ensure I progress everything semi-fairly */
1424 if (!list_empty(&sched->kss_rx_conns)) {
1425 conn = list_entry(sched->kss_rx_conns.next,
1426 ksock_conn_t, ksnc_rx_list);
1427 list_del(&conn->ksnc_rx_list);
1429 LASSERT(conn->ksnc_rx_scheduled);
1430 LASSERT(conn->ksnc_rx_ready);
1432 /* clear rx_ready in case receive isn't complete.
1433 * Do it BEFORE we call process_recv, since
1434 * data_ready can set it any time after we release
1436 conn->ksnc_rx_ready = 0;
1437 spin_unlock_bh(&sched->kss_lock);
1439 rc = ksocknal_process_receive(conn);
1441 spin_lock_bh(&sched->kss_lock);
1443 /* I'm the only one that can clear this flag */
1444 LASSERT(conn->ksnc_rx_scheduled);
1446 /* Did process_receive get everything it wanted? */
1448 conn->ksnc_rx_ready = 1;
1450 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1451 /* Conn blocked waiting for ksocknal_recv()
1452 * I change its state (under lock) to signal
1453 * it can be rescheduled */
1454 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1455 } else if (conn->ksnc_rx_ready) {
1456 /* reschedule for rx */
1457 list_add_tail(&conn->ksnc_rx_list,
1458 &sched->kss_rx_conns);
1460 conn->ksnc_rx_scheduled = 0;
1462 ksocknal_conn_decref(conn);
1468 if (!list_empty(&sched->kss_tx_conns)) {
1469 struct list_head zlist = LIST_HEAD_INIT(zlist);
1471 if (!list_empty(&sched->kss_zombie_noop_txs)) {
1473 &sched->kss_zombie_noop_txs);
1474 list_del_init(&sched->kss_zombie_noop_txs);
1477 conn = list_entry(sched->kss_tx_conns.next,
1478 ksock_conn_t, ksnc_tx_list);
1479 list_del(&conn->ksnc_tx_list);
1481 LASSERT(conn->ksnc_tx_scheduled);
1482 LASSERT(conn->ksnc_tx_ready);
1483 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1485 tx = list_entry(conn->ksnc_tx_queue.next,
1486 ksock_tx_t, tx_list);
1488 if (conn->ksnc_tx_carrier == tx)
1489 ksocknal_next_tx_carrier(conn);
1491 /* dequeue now so empty list => more to send */
1492 list_del(&tx->tx_list);
1494 /* Clear tx_ready in case send isn't complete. Do
1495 * it BEFORE we call process_transmit, since
1496 * write_space can set it any time after we release
1498 conn->ksnc_tx_ready = 0;
1499 spin_unlock_bh(&sched->kss_lock);
1501 if (!list_empty(&zlist)) {
1502 /* free zombie noop txs, it's fast because
1503 * noop txs are just put in freelist */
1504 ksocknal_txlist_done(NULL, &zlist, 0);
1507 rc = ksocknal_process_transmit(conn, tx);
1509 if (rc == -ENOMEM || rc == -EAGAIN) {
1510 /* Incomplete send: replace tx on HEAD of tx_queue */
1511 spin_lock_bh(&sched->kss_lock);
1512 list_add(&tx->tx_list,
1513 &conn->ksnc_tx_queue);
1515 /* Complete send; tx -ref */
1516 ksocknal_tx_decref(tx);
1518 spin_lock_bh(&sched->kss_lock);
1519 /* assume space for more */
1520 conn->ksnc_tx_ready = 1;
1523 if (rc == -ENOMEM) {
1524 /* Do nothing; after a short timeout, this
1525 * conn will be reposted on kss_tx_conns. */
1526 } else if (conn->ksnc_tx_ready &&
1527 !list_empty(&conn->ksnc_tx_queue)) {
1528 /* reschedule for tx */
1529 list_add_tail(&conn->ksnc_tx_list,
1530 &sched->kss_tx_conns);
1532 conn->ksnc_tx_scheduled = 0;
1534 ksocknal_conn_decref(conn);
1539 if (!did_something || /* nothing to do */
1540 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1541 spin_unlock_bh(&sched->kss_lock);
1545 if (!did_something) { /* wait for something to do */
1546 rc = wait_event_interruptible_exclusive(
1548 !ksocknal_sched_cansleep(sched));
1554 spin_lock_bh(&sched->kss_lock);
1558 spin_unlock_bh(&sched->kss_lock);
1559 ksocknal_thread_fini();
1564 * Add connection to kss_rx_conns of scheduler
1565 * and wakeup the scheduler.
1567 void ksocknal_read_callback (ksock_conn_t *conn)
1569 ksock_sched_t *sched;
1572 sched = conn->ksnc_scheduler;
1574 spin_lock_bh(&sched->kss_lock);
1576 conn->ksnc_rx_ready = 1;
1578 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1579 list_add_tail(&conn->ksnc_rx_list,
1580 &sched->kss_rx_conns);
1581 conn->ksnc_rx_scheduled = 1;
1582 /* extra ref for scheduler */
1583 ksocknal_conn_addref(conn);
1585 wake_up (&sched->kss_waitq);
1587 spin_unlock_bh(&sched->kss_lock);
1593 * Add connection to kss_tx_conns of scheduler
1594 * and wakeup the scheduler.
1596 void ksocknal_write_callback(ksock_conn_t *conn)
1598 ksock_sched_t *sched;
1601 sched = conn->ksnc_scheduler;
1603 spin_lock_bh(&sched->kss_lock);
1605 conn->ksnc_tx_ready = 1;
1607 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1608 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1609 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1610 conn->ksnc_tx_scheduled = 1;
1611 /* extra ref for scheduler */
1612 ksocknal_conn_addref(conn);
1614 wake_up(&sched->kss_waitq);
1617 spin_unlock_bh(&sched->kss_lock);
1622 static ksock_proto_t *
1623 ksocknal_parse_proto_version (struct ksock_hello_msg *hello)
1627 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1628 version = hello->kshm_version;
1629 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1630 version = __swab32(hello->kshm_version);
1633 #if SOCKNAL_VERSION_DEBUG
1634 if (*ksocknal_tunables.ksnd_protocol == 1)
1637 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1638 version == KSOCK_PROTO_V3)
1641 if (version == KSOCK_PROTO_V2)
1642 return &ksocknal_protocol_v2x;
1644 if (version == KSOCK_PROTO_V3)
1645 return &ksocknal_protocol_v3x;
1650 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1651 struct lnet_magicversion *hmv;
1653 CLASSERT(sizeof(struct lnet_magicversion) ==
1654 offsetof(struct ksock_hello_msg, kshm_src_nid));
1656 hmv = (struct lnet_magicversion *)hello;
1658 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1659 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1660 return &ksocknal_protocol_v1x;
1667 ksocknal_send_hello(struct lnet_ni *ni, ksock_conn_t *conn,
1668 lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
1670 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1671 ksock_net_t *net = (ksock_net_t *)ni->ni_data;
1673 LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES);
1675 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1676 LASSERT (conn->ksnc_proto != NULL);
1678 hello->kshm_src_nid = ni->ni_nid;
1679 hello->kshm_dst_nid = peer_nid;
1680 hello->kshm_src_pid = the_lnet.ln_pid;
1682 hello->kshm_src_incarnation = net->ksnn_incarnation;
1683 hello->kshm_ctype = conn->ksnc_type;
1685 return conn->ksnc_proto->pro_send_hello(conn, hello);
1689 ksocknal_invert_type(int type)
1693 case SOCKLND_CONN_ANY:
1694 case SOCKLND_CONN_CONTROL:
1696 case SOCKLND_CONN_BULK_IN:
1697 return SOCKLND_CONN_BULK_OUT;
1698 case SOCKLND_CONN_BULK_OUT:
1699 return SOCKLND_CONN_BULK_IN;
1701 return (SOCKLND_CONN_NONE);
1706 ksocknal_recv_hello(struct lnet_ni *ni, ksock_conn_t *conn,
1707 struct ksock_hello_msg *hello,
1708 struct lnet_process_id *peerid,
1711 /* Return < 0 fatal error
1713 * EALREADY lost connection race
1714 * EPROTO protocol version mismatch
1716 struct socket *sock = conn->ksnc_sock;
1717 int active = (conn->ksnc_proto != NULL);
1721 ksock_proto_t *proto;
1722 struct lnet_process_id recv_id;
1724 /* socket type set on active connections - not set on passive */
1725 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1727 timeout = active ? *ksocknal_tunables.ksnd_timeout :
1728 lnet_acceptor_timeout();
1730 rc = lnet_sock_read(sock, &hello->kshm_magic,
1731 sizeof(hello->kshm_magic), timeout);
1733 CERROR("Error %d reading HELLO from %pI4h\n",
1734 rc, &conn->ksnc_ipaddr);
1739 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1740 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1741 hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
1742 /* Unexpected magic! */
1743 CERROR ("Bad magic(1) %#08x (%#08x expected) from "
1744 "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
1745 LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr);
1749 rc = lnet_sock_read(sock, &hello->kshm_version,
1750 sizeof(hello->kshm_version), timeout);
1752 CERROR("Error %d reading HELLO from %pI4h\n",
1753 rc, &conn->ksnc_ipaddr);
1758 proto = ksocknal_parse_proto_version(hello);
1759 if (proto == NULL) {
1761 /* unknown protocol from peer_ni, tell peer_ni my protocol */
1762 conn->ksnc_proto = &ksocknal_protocol_v3x;
1763 #if SOCKNAL_VERSION_DEBUG
1764 if (*ksocknal_tunables.ksnd_protocol == 2)
1765 conn->ksnc_proto = &ksocknal_protocol_v2x;
1766 else if (*ksocknal_tunables.ksnd_protocol == 1)
1767 conn->ksnc_proto = &ksocknal_protocol_v1x;
1769 hello->kshm_nips = 0;
1770 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1773 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1774 conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr);
1779 proto_match = (conn->ksnc_proto == proto);
1780 conn->ksnc_proto = proto;
1782 /* receive the rest of hello message anyway */
1783 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1785 CERROR("Error %d reading or checking hello from from %pI4h\n",
1786 rc, &conn->ksnc_ipaddr);
1791 *incarnation = hello->kshm_src_incarnation;
1793 if (hello->kshm_src_nid == LNET_NID_ANY) {
1794 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
1795 "from %pI4h\n", &conn->ksnc_ipaddr);
1800 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1801 /* Userspace NAL assigns peer_ni process ID from socket */
1802 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1803 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1805 recv_id.nid = hello->kshm_src_nid;
1806 recv_id.pid = hello->kshm_src_pid;
1812 /* peer_ni determines type */
1813 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1814 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1815 CERROR("Unexpected type %d from %s ip %pI4h\n",
1816 hello->kshm_ctype, libcfs_id2str(*peerid),
1817 &conn->ksnc_ipaddr);
1823 if (peerid->pid != recv_id.pid ||
1824 peerid->nid != recv_id.nid) {
1825 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
1826 " %pI4h, but they claimed they were "
1827 "%s; please check your Lustre "
1829 libcfs_id2str(*peerid),
1831 libcfs_id2str(recv_id));
1835 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1836 /* Possible protocol mismatch or I lost the connection race */
1837 return proto_match ? EALREADY : EPROTO;
1840 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1841 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
1842 conn->ksnc_type, libcfs_id2str(*peerid),
1851 ksocknal_connect (ksock_route_t *route)
1853 struct list_head zombies = LIST_HEAD_INIT(zombies);
1854 ksock_peer_ni_t *peer_ni = route->ksnr_peer;
1857 struct socket *sock;
1858 cfs_time_t deadline;
1859 int retry_later = 0;
1862 deadline = cfs_time_add(cfs_time_current(),
1863 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
1865 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1867 LASSERT (route->ksnr_scheduled);
1868 LASSERT (!route->ksnr_connecting);
1870 route->ksnr_connecting = 1;
1873 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1875 /* stop connecting if peer_ni/route got closed under me, or
1876 * route got connected while queued */
1877 if (peer_ni->ksnp_closing || route->ksnr_deleted ||
1883 /* reschedule if peer_ni is connecting to me */
1884 if (peer_ni->ksnp_accepting > 0) {
1886 "peer_ni %s(%d) already connecting to me, retry later.\n",
1887 libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
1891 if (retry_later) /* needs reschedule */
1894 if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
1895 type = SOCKLND_CONN_ANY;
1896 } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
1897 type = SOCKLND_CONN_CONTROL;
1898 } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
1899 type = SOCKLND_CONN_BULK_IN;
1901 LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
1902 type = SOCKLND_CONN_BULK_OUT;
1905 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1907 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
1909 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1915 rc = lnet_connect(&sock, peer_ni->ksnp_id.nid,
1916 route->ksnr_myipaddr,
1917 route->ksnr_ipaddr, route->ksnr_port);
1921 rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
1923 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1929 /* A +ve RC means I have to retry because I lost the connection
1930 * race or I have to renegotiate protocol version */
1931 retry_later = (rc != 0);
1933 CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
1934 libcfs_nid2str(peer_ni->ksnp_id.nid));
1936 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1939 route->ksnr_scheduled = 0;
1940 route->ksnr_connecting = 0;
1943 /* re-queue for attention; this frees me up to handle
1944 * the peer_ni's incoming connection request */
1946 if (rc == EALREADY ||
1947 (rc == 0 && peer_ni->ksnp_accepting > 0)) {
1948 /* We want to introduce a delay before next
1949 * attempt to connect if we lost conn race,
1950 * but the race is resolved quickly usually,
1951 * so min_reconnectms should be good heuristic */
1952 route->ksnr_retry_interval =
1953 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
1954 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1955 route->ksnr_retry_interval);
1958 ksocknal_launch_connection_locked(route);
1961 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1965 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1967 route->ksnr_scheduled = 0;
1968 route->ksnr_connecting = 0;
1970 /* This is a retry rather than a new connection */
1971 route->ksnr_retry_interval *= 2;
1972 route->ksnr_retry_interval =
1973 MAX(route->ksnr_retry_interval,
1974 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
1975 route->ksnr_retry_interval =
1976 MIN(route->ksnr_retry_interval,
1977 cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
1979 LASSERT (route->ksnr_retry_interval != 0);
1980 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1981 route->ksnr_retry_interval);
1983 if (!list_empty(&peer_ni->ksnp_tx_queue) &&
1984 peer_ni->ksnp_accepting == 0 &&
1985 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
1988 /* ksnp_tx_queue is queued on a conn on successful
1989 * connection for V1.x and V2.x */
1990 if (!list_empty(&peer_ni->ksnp_conns)) {
1991 conn = list_entry(peer_ni->ksnp_conns.next,
1992 ksock_conn_t, ksnc_list);
1993 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
1996 /* take all the blocked packets while I've got the lock and
1997 * complete below... */
1998 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
2001 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2003 ksocknal_peer_failed(peer_ni);
2004 ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, 1);
2009 * check whether we need to create more connds.
2010 * It will try to create new thread if it's necessary, @timeout can
2011 * be updated if failed to create, so caller wouldn't keep try while
2012 * running out of resource.
2015 ksocknal_connd_check_start(time64_t sec, long *timeout)
2019 int total = ksocknal_data.ksnd_connd_starting +
2020 ksocknal_data.ksnd_connd_running;
2022 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2023 /* still in initializing */
2027 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2028 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2029 /* can't create more connd, or still have enough
2030 * threads to handle more connecting */
2034 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2035 /* no pending connecting request */
2039 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2040 /* may run out of resource, retry later */
2041 *timeout = cfs_time_seconds(1);
2045 if (ksocknal_data.ksnd_connd_starting > 0) {
2046 /* serialize starting to avoid flood */
2050 ksocknal_data.ksnd_connd_starting_stamp = sec;
2051 ksocknal_data.ksnd_connd_starting++;
2052 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2054 /* NB: total is the next id */
2055 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2056 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2058 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2063 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2064 ksocknal_data.ksnd_connd_starting--;
2065 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2071 * check whether current thread can exit, it will return 1 if there are too
2072 * many threads and no creating in past 120 seconds.
2073 * Also, this function may update @timeout to make caller come back
2074 * again to recheck these conditions.
2077 ksocknal_connd_check_stop(time64_t sec, long *timeout)
2081 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2082 /* still in initializing */
2086 if (ksocknal_data.ksnd_connd_starting > 0) {
2087 /* in progress of starting new thread */
2091 if (ksocknal_data.ksnd_connd_running <=
2092 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2096 /* created thread in past 120 seconds? */
2097 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2098 SOCKNAL_CONND_TIMEOUT - sec);
2100 *timeout = (val > 0) ? cfs_time_seconds(val) :
2101 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2105 /* no creating in past 120 seconds */
2107 return ksocknal_data.ksnd_connd_running >
2108 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2111 /* Go through connd_routes queue looking for a route that we can process
2112 * right now, @timeout_p can be updated if we need to come back later */
2113 static ksock_route_t *
2114 ksocknal_connd_get_route_locked(signed long *timeout_p)
2116 ksock_route_t *route;
2119 now = cfs_time_current();
2121 /* connd_routes can contain both pending and ordinary routes */
2122 list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2125 if (route->ksnr_retry_interval == 0 ||
2126 cfs_time_aftereq(now, route->ksnr_timeout))
2129 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2130 (int)*timeout_p > (int)(route->ksnr_timeout - now))
2131 *timeout_p = (int)(route->ksnr_timeout - now);
2138 ksocknal_connd (void *arg)
2140 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2141 ksock_connreq_t *cr;
2146 cfs_block_allsigs();
2148 init_waitqueue_entry(&wait, current);
2150 spin_lock_bh(connd_lock);
2152 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2153 ksocknal_data.ksnd_connd_starting--;
2154 ksocknal_data.ksnd_connd_running++;
2156 while (!ksocknal_data.ksnd_shuttingdown) {
2157 ksock_route_t *route = NULL;
2158 time64_t sec = ktime_get_real_seconds();
2159 long timeout = MAX_SCHEDULE_TIMEOUT;
2160 int dropped_lock = 0;
2162 if (ksocknal_connd_check_stop(sec, &timeout)) {
2163 /* wakeup another one to check stop */
2164 wake_up(&ksocknal_data.ksnd_connd_waitq);
2168 if (ksocknal_connd_check_start(sec, &timeout)) {
2169 /* created new thread */
2173 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2174 /* Connection accepted by the listener */
2175 cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
2176 next, ksock_connreq_t, ksncr_list);
2178 list_del(&cr->ksncr_list);
2179 spin_unlock_bh(connd_lock);
2182 ksocknal_create_conn(cr->ksncr_ni, NULL,
2183 cr->ksncr_sock, SOCKLND_CONN_NONE);
2184 lnet_ni_decref(cr->ksncr_ni);
2185 LIBCFS_FREE(cr, sizeof(*cr));
2187 spin_lock_bh(connd_lock);
2190 /* Only handle an outgoing connection request if there
2191 * is a thread left to handle incoming connections and
2192 * create new connd */
2193 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2194 ksocknal_data.ksnd_connd_running) {
2195 route = ksocknal_connd_get_route_locked(&timeout);
2197 if (route != NULL) {
2198 list_del(&route->ksnr_connd_list);
2199 ksocknal_data.ksnd_connd_connecting++;
2200 spin_unlock_bh(connd_lock);
2203 if (ksocknal_connect(route)) {
2204 /* consecutive retry */
2205 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2206 CWARN("massive consecutive "
2207 "re-connecting to %pI4h\n",
2208 &route->ksnr_ipaddr);
2215 ksocknal_route_decref(route);
2217 spin_lock_bh(connd_lock);
2218 ksocknal_data.ksnd_connd_connecting--;
2222 if (++nloops < SOCKNAL_RESCHED)
2224 spin_unlock_bh(connd_lock);
2227 spin_lock_bh(connd_lock);
2231 /* Nothing to do for 'timeout' */
2232 set_current_state(TASK_INTERRUPTIBLE);
2233 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2234 spin_unlock_bh(connd_lock);
2237 schedule_timeout(timeout);
2239 set_current_state(TASK_RUNNING);
2240 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2241 spin_lock_bh(connd_lock);
2243 ksocknal_data.ksnd_connd_running--;
2244 spin_unlock_bh(connd_lock);
2246 ksocknal_thread_fini();
2250 static ksock_conn_t *
2251 ksocknal_find_timed_out_conn (ksock_peer_ni_t *peer_ni)
2253 /* We're called with a shared lock on ksnd_global_lock */
2255 struct list_head *ctmp;
2257 list_for_each(ctmp, &peer_ni->ksnp_conns) {
2259 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
2261 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2262 LASSERT (!conn->ksnc_closing);
2264 error = conn->ksnc_sock->sk->sk_err;
2266 ksocknal_conn_addref(conn);
2270 CNETERR("A connection with %s "
2271 "(%pI4h:%d) was reset; "
2272 "it may have rebooted.\n",
2273 libcfs_id2str(peer_ni->ksnp_id),
2278 CNETERR("A connection with %s "
2279 "(%pI4h:%d) timed out; the "
2280 "network or node may be down.\n",
2281 libcfs_id2str(peer_ni->ksnp_id),
2286 CNETERR("An unexpected network error %d "
2288 "(%pI4h:%d\n", error,
2289 libcfs_id2str(peer_ni->ksnp_id),
2298 if (conn->ksnc_rx_started &&
2299 cfs_time_aftereq(cfs_time_current(),
2300 conn->ksnc_rx_deadline)) {
2301 /* Timed out incomplete incoming message */
2302 ksocknal_conn_addref(conn);
2303 CNETERR("Timeout receiving from %s (%pI4h:%d), "
2304 "state %d wanted %d left %d\n",
2305 libcfs_id2str(peer_ni->ksnp_id),
2308 conn->ksnc_rx_state,
2309 conn->ksnc_rx_nob_wanted,
2310 conn->ksnc_rx_nob_left);
2314 if ((!list_empty(&conn->ksnc_tx_queue) ||
2315 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2316 cfs_time_aftereq(cfs_time_current(),
2317 conn->ksnc_tx_deadline)) {
2318 /* Timed out messages queued for sending or
2319 * buffered in the socket's send buffer */
2320 ksocknal_conn_addref(conn);
2321 CNETERR("Timeout sending data to %s (%pI4h:%d) "
2322 "the network or that node may be down.\n",
2323 libcfs_id2str(peer_ni->ksnp_id),
2324 &conn->ksnc_ipaddr, conn->ksnc_port);
2333 ksocknal_flush_stale_txs(ksock_peer_ni_t *peer_ni)
2336 struct list_head stale_txs = LIST_HEAD_INIT(stale_txs);
2338 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2340 while (!list_empty(&peer_ni->ksnp_tx_queue)) {
2341 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2342 ksock_tx_t, tx_list);
2344 if (!cfs_time_aftereq(cfs_time_current(),
2348 list_del(&tx->tx_list);
2349 list_add_tail(&tx->tx_list, &stale_txs);
2352 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2354 ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, 1);
2358 ksocknal_send_keepalive_locked(ksock_peer_ni_t *peer_ni)
2359 __must_hold(&ksocknal_data.ksnd_global_lock)
2361 ksock_sched_t *sched;
2365 /* last_alive will be updated by create_conn */
2366 if (list_empty(&peer_ni->ksnp_conns))
2369 if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x)
2372 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2373 cfs_time_before(cfs_time_current(),
2374 cfs_time_add(peer_ni->ksnp_last_alive,
2375 cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
2378 if (cfs_time_before(cfs_time_current(),
2379 peer_ni->ksnp_send_keepalive))
2382 /* retry 10 secs later, so we wouldn't put pressure
2383 * on this peer_ni if we failed to send keepalive this time */
2384 peer_ni->ksnp_send_keepalive = cfs_time_shift(10);
2386 conn = ksocknal_find_conn_locked(peer_ni, NULL, 1);
2388 sched = conn->ksnc_scheduler;
2390 spin_lock_bh(&sched->kss_lock);
2391 if (!list_empty(&conn->ksnc_tx_queue)) {
2392 spin_unlock_bh(&sched->kss_lock);
2393 /* there is an queued ACK, don't need keepalive */
2397 spin_unlock_bh(&sched->kss_lock);
2400 read_unlock(&ksocknal_data.ksnd_global_lock);
2402 /* cookie = 1 is reserved for keepalive PING */
2403 tx = ksocknal_alloc_tx_noop(1, 1);
2405 read_lock(&ksocknal_data.ksnd_global_lock);
2409 if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id) == 0) {
2410 read_lock(&ksocknal_data.ksnd_global_lock);
2414 ksocknal_free_tx(tx);
2415 read_lock(&ksocknal_data.ksnd_global_lock);
2422 ksocknal_check_peer_timeouts (int idx)
2424 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2425 ksock_peer_ni_t *peer_ni;
2430 /* NB. We expect to have a look at all the peers and not find any
2431 * connections to time out, so we just use a shared lock while we
2433 read_lock(&ksocknal_data.ksnd_global_lock);
2435 list_for_each_entry(peer_ni, peers, ksnp_list) {
2436 ksock_tx_t *tx_stale;
2437 cfs_time_t deadline = 0;
2441 if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
2442 read_unlock(&ksocknal_data.ksnd_global_lock);
2446 conn = ksocknal_find_timed_out_conn (peer_ni);
2449 read_unlock(&ksocknal_data.ksnd_global_lock);
2451 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2453 /* NB we won't find this one again, but we can't
2454 * just proceed with the next peer_ni, since we dropped
2455 * ksnd_global_lock and it might be dead already! */
2456 ksocknal_conn_decref(conn);
2460 /* we can't process stale txs right here because we're
2461 * holding only shared lock */
2462 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
2464 list_entry(peer_ni->ksnp_tx_queue.next,
2465 ksock_tx_t, tx_list);
2467 if (cfs_time_aftereq(cfs_time_current(),
2470 ksocknal_peer_addref(peer_ni);
2471 read_unlock(&ksocknal_data.ksnd_global_lock);
2473 ksocknal_flush_stale_txs(peer_ni);
2475 ksocknal_peer_decref(peer_ni);
2480 if (list_empty(&peer_ni->ksnp_zc_req_list))
2484 spin_lock(&peer_ni->ksnp_lock);
2485 list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
2486 if (!cfs_time_aftereq(cfs_time_current(),
2489 /* ignore the TX if connection is being closed */
2490 if (tx->tx_conn->ksnc_closing)
2493 if (tx_stale == NULL)
2497 if (tx_stale == NULL) {
2498 spin_unlock(&peer_ni->ksnp_lock);
2502 deadline = tx_stale->tx_deadline;
2503 resid = tx_stale->tx_resid;
2504 conn = tx_stale->tx_conn;
2505 ksocknal_conn_addref(conn);
2507 spin_unlock(&peer_ni->ksnp_lock);
2508 read_unlock(&ksocknal_data.ksnd_global_lock);
2510 CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the "
2511 "oldest(%p) timed out %ld secs ago, "
2512 "resid: %d, wmem: %d\n",
2513 n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale,
2514 cfs_duration_sec(cfs_time_current() - deadline),
2515 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2517 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2518 ksocknal_conn_decref(conn);
2522 read_unlock(&ksocknal_data.ksnd_global_lock);
2525 int ksocknal_reaper(void *arg)
2529 ksock_sched_t *sched;
2530 struct list_head enomem_conns;
2532 cfs_duration_t timeout;
2535 cfs_time_t deadline = cfs_time_current();
2537 cfs_block_allsigs ();
2539 INIT_LIST_HEAD(&enomem_conns);
2540 init_waitqueue_entry(&wait, current);
2542 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2544 while (!ksocknal_data.ksnd_shuttingdown) {
2546 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2547 conn = list_entry(ksocknal_data. \
2548 ksnd_deathrow_conns.next,
2549 ksock_conn_t, ksnc_list);
2550 list_del(&conn->ksnc_list);
2552 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2554 ksocknal_terminate_conn(conn);
2555 ksocknal_conn_decref(conn);
2557 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2561 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2562 conn = list_entry(ksocknal_data.ksnd_zombie_conns.\
2563 next, ksock_conn_t, ksnc_list);
2564 list_del(&conn->ksnc_list);
2566 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2568 ksocknal_destroy_conn(conn);
2570 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2574 if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
2575 list_add(&enomem_conns,
2576 &ksocknal_data.ksnd_enomem_conns);
2577 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2580 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2582 /* reschedule all the connections that stalled with ENOMEM... */
2584 while (!list_empty(&enomem_conns)) {
2585 conn = list_entry(enomem_conns.next,
2586 ksock_conn_t, ksnc_tx_list);
2587 list_del(&conn->ksnc_tx_list);
2589 sched = conn->ksnc_scheduler;
2591 spin_lock_bh(&sched->kss_lock);
2593 LASSERT(conn->ksnc_tx_scheduled);
2594 conn->ksnc_tx_ready = 1;
2595 list_add_tail(&conn->ksnc_tx_list,
2596 &sched->kss_tx_conns);
2597 wake_up(&sched->kss_waitq);
2599 spin_unlock_bh(&sched->kss_lock);
2603 /* careful with the jiffy wrap... */
2604 while ((timeout = cfs_time_sub(deadline,
2605 cfs_time_current())) <= 0) {
2608 int chunk = ksocknal_data.ksnd_peer_hash_size;
2610 /* Time to check for timeouts on a few more peers: I do
2611 * checks every 'p' seconds on a proportion of the peer_ni
2612 * table and I need to check every connection 'n' times
2613 * within a timeout interval, to ensure I detect a
2614 * timeout on any connection within (n+1)/n times the
2615 * timeout interval. */
2617 if (*ksocknal_tunables.ksnd_timeout > n * p)
2618 chunk = (chunk * n * p) /
2619 *ksocknal_tunables.ksnd_timeout;
2623 for (i = 0; i < chunk; i++) {
2624 ksocknal_check_peer_timeouts (peer_index);
2625 peer_index = (peer_index + 1) %
2626 ksocknal_data.ksnd_peer_hash_size;
2629 deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2632 if (nenomem_conns != 0) {
2633 /* Reduce my timeout if I rescheduled ENOMEM conns.
2634 * This also prevents me getting woken immediately
2635 * if any go back on my enomem list. */
2636 timeout = SOCKNAL_ENOMEM_RETRY;
2638 ksocknal_data.ksnd_reaper_waketime =
2639 cfs_time_add(cfs_time_current(), timeout);
2641 set_current_state(TASK_INTERRUPTIBLE);
2642 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2644 if (!ksocknal_data.ksnd_shuttingdown &&
2645 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2646 list_empty(&ksocknal_data.ksnd_zombie_conns))
2647 schedule_timeout(timeout);
2649 set_current_state(TASK_RUNNING);
2650 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2652 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2655 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2657 ksocknal_thread_fini();