2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Lustre, https://wiki.whamcloud.com/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 ksocknal_alloc_tx(int type, int size)
32 struct ksock_tx *tx = NULL;
34 if (type == KSOCK_MSG_NOOP) {
35 LASSERT(size == KSOCK_NOOP_TX_SIZE);
37 /* searching for a noop tx in free list */
38 spin_lock(&ksocknal_data.ksnd_tx_lock);
40 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
41 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
42 struct ksock_tx, tx_list);
43 LASSERT(tx->tx_desc_size == size);
44 list_del(&tx->tx_list);
47 spin_unlock(&ksocknal_data.ksnd_tx_lock);
51 LIBCFS_ALLOC(tx, size);
56 atomic_set(&tx->tx_refcount, 1);
57 tx->tx_zc_aborted = 0;
58 tx->tx_zc_capable = 0;
59 tx->tx_zc_checked = 0;
60 tx->tx_hstatus = LNET_MSG_STATUS_OK;
61 tx->tx_desc_size = size;
63 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
69 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
73 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
75 CERROR("Can't allocate noop tx desc\n");
80 tx->tx_lnetmsg = NULL;
83 tx->tx_iov = tx->tx_frags.virt.iov;
85 tx->tx_nonblk = nonblk;
87 tx->tx_msg.ksm_csum = 0;
88 tx->tx_msg.ksm_type = KSOCK_MSG_NOOP;
89 tx->tx_msg.ksm_zc_cookies[0] = 0;
90 tx->tx_msg.ksm_zc_cookies[1] = cookie;
97 ksocknal_free_tx(struct ksock_tx *tx)
99 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
101 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
103 spin_lock(&ksocknal_data.ksnd_tx_lock);
105 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
107 spin_unlock(&ksocknal_data.ksnd_tx_lock);
109 LIBCFS_FREE(tx, tx->tx_desc_size);
114 ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx,
115 struct kvec *scratch_iov)
117 struct kvec *iov = tx->tx_iov;
121 LASSERT(tx->tx_niov > 0);
123 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
124 rc = ksocknal_lib_send_iov(conn, tx, scratch_iov);
126 if (rc <= 0) /* sent nothing? */
130 LASSERT(nob <= tx->tx_resid);
135 LASSERT(tx->tx_niov > 0);
137 if (nob < (int) iov->iov_len) {
138 iov->iov_base += nob;
152 ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
153 struct kvec *scratch_iov)
155 struct bio_vec *kiov = tx->tx_kiov;
159 LASSERT(tx->tx_niov == 0);
160 LASSERT(tx->tx_nkiov > 0);
162 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
163 rc = ksocknal_lib_send_kiov(conn, tx, scratch_iov);
165 if (rc <= 0) /* sent nothing? */
169 LASSERT(nob <= tx->tx_resid);
174 LASSERT(tx->tx_nkiov > 0);
176 if (nob < (int)kiov->bv_len) {
177 kiov->bv_offset += nob;
182 nob -= (int)kiov->bv_len;
183 tx->tx_kiov = ++kiov;
191 ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
192 struct kvec *scratch_iov)
197 if (ksocknal_data.ksnd_stall_tx != 0)
198 schedule_timeout_uninterruptible(
199 cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
201 LASSERT(tx->tx_resid != 0);
203 rc = ksocknal_connsock_addref(conn);
205 LASSERT(conn->ksnc_closing);
210 if (ksocknal_data.ksnd_enomem_tx > 0) {
212 ksocknal_data.ksnd_enomem_tx--;
214 } else if (tx->tx_niov != 0) {
215 rc = ksocknal_send_iov(conn, tx, scratch_iov);
217 rc = ksocknal_send_kiov(conn, tx, scratch_iov);
220 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
221 if (rc > 0) /* sent something? */
222 conn->ksnc_tx_bufnob += rc; /* account it */
224 if (bufnob < conn->ksnc_tx_bufnob) {
225 /* allocated send buffer bytes < computed; infer
226 * something got ACKed */
227 conn->ksnc_tx_deadline = ktime_get_seconds() +
228 lnet_get_lnd_timeout();
229 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
230 conn->ksnc_tx_bufnob = bufnob;
234 if (rc <= 0) { /* Didn't write anything? */
235 /* some stacks return 0 instead of -EAGAIN */
239 /* Check if EAGAIN is due to memory pressure */
240 if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
246 /* socket's wmem_queued now includes 'rc' bytes */
247 atomic_sub (rc, &conn->ksnc_tx_nob);
250 } while (tx->tx_resid != 0);
252 ksocknal_connsock_decref(conn);
257 ksocknal_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov)
259 struct kvec *iov = conn->ksnc_rx_iov;
263 LASSERT(conn->ksnc_rx_niov > 0);
265 /* Never touch conn->ksnc_rx_iov or change connection
266 * status inside ksocknal_lib_recv_iov */
267 rc = ksocknal_lib_recv_iov(conn, scratchiov);
272 /* received something... */
275 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
276 conn->ksnc_rx_deadline = ktime_get_seconds() +
277 lnet_get_lnd_timeout();
278 smp_mb(); /* order with setting rx_started */
279 conn->ksnc_rx_started = 1;
281 conn->ksnc_rx_nob_wanted -= nob;
282 conn->ksnc_rx_nob_left -= nob;
285 LASSERT(conn->ksnc_rx_niov > 0);
287 if (nob < (int)iov->iov_len) {
289 iov->iov_base += nob;
294 conn->ksnc_rx_iov = ++iov;
295 conn->ksnc_rx_niov--;
302 ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
303 struct kvec *scratch_iov)
305 struct bio_vec *kiov = conn->ksnc_rx_kiov;
308 LASSERT(conn->ksnc_rx_nkiov > 0);
310 /* Never touch conn->ksnc_rx_kiov or change connection
311 * status inside ksocknal_lib_recv_iov */
312 rc = ksocknal_lib_recv_kiov(conn, rx_scratch_pgs, scratch_iov);
317 /* received something... */
320 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
321 conn->ksnc_rx_deadline = ktime_get_seconds() +
322 lnet_get_lnd_timeout();
323 smp_mb(); /* order with setting rx_started */
324 conn->ksnc_rx_started = 1;
326 conn->ksnc_rx_nob_wanted -= nob;
327 conn->ksnc_rx_nob_left -= nob;
330 LASSERT(conn->ksnc_rx_nkiov > 0);
332 if (nob < (int) kiov->bv_len) {
333 kiov->bv_offset += nob;
339 conn->ksnc_rx_kiov = ++kiov;
340 conn->ksnc_rx_nkiov--;
347 ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs,
348 struct kvec *scratch_iov)
350 /* Return 1 on success, 0 on EOF, < 0 on error.
351 * Caller checks ksnc_rx_nob_wanted to determine
352 * progress/completion. */
356 if (ksocknal_data.ksnd_stall_rx != 0)
357 schedule_timeout_uninterruptible(
358 cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
360 rc = ksocknal_connsock_addref(conn);
362 LASSERT(conn->ksnc_closing);
367 if (conn->ksnc_rx_niov != 0)
368 rc = ksocknal_recv_iov(conn, scratch_iov);
370 rc = ksocknal_recv_kiov(conn, rx_scratch_pgs,
374 /* error/EOF or partial receive */
377 } else if (rc == 0 && conn->ksnc_rx_started) {
378 /* EOF in the middle of a message */
384 /* Completed a fragment */
386 if (conn->ksnc_rx_nob_wanted == 0) {
392 ksocknal_connsock_decref(conn);
397 ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int rc)
399 struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
400 enum lnet_msg_hstatus hstatus = tx->tx_hstatus;
403 LASSERT(ni != NULL || tx->tx_conn != NULL);
405 if (!rc && (tx->tx_resid != 0 || tx->tx_zc_aborted)) {
407 if (hstatus == LNET_MSG_STATUS_OK)
408 hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
411 if (tx->tx_conn != NULL)
412 ksocknal_conn_decref(tx->tx_conn);
414 ksocknal_free_tx(tx);
415 if (lnetmsg != NULL) { /* KSOCK_MSG_NOOP go without lnetmsg */
416 lnetmsg->msg_health_status = hstatus;
417 lnet_finalize(lnetmsg, rc);
424 ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
428 while (!list_empty(txlist)) {
429 tx = list_entry(txlist->next, struct ksock_tx, tx_list);
431 if (error && tx->tx_lnetmsg != NULL) {
432 CNETERR("Deleting packet type %d len %d %s->%s\n",
433 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
434 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
435 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
436 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
438 CNETERR("Deleting noop packet\n");
441 list_del(&tx->tx_list);
443 if (tx->tx_hstatus == LNET_MSG_STATUS_OK) {
444 if (error == -ETIMEDOUT)
446 LNET_MSG_STATUS_LOCAL_TIMEOUT;
447 else if (error == -ENETDOWN ||
448 error == -EHOSTUNREACH ||
449 error == -ENETUNREACH ||
450 error == -ECONNREFUSED ||
451 error == -ECONNRESET)
452 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
454 * for all other errors we don't want to
458 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
461 LASSERT(atomic_read(&tx->tx_refcount) == 1);
462 ksocknal_tx_done(ni, tx, error);
467 ksocknal_check_zc_req(struct ksock_tx *tx)
469 struct ksock_conn *conn = tx->tx_conn;
470 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
472 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
473 * to ksnp_zc_req_list if some fragment of this message should be sent
474 * zero-copy. Our peer_ni will send an ACK containing this cookie when
475 * she has received this message to tell us we can signal completion.
476 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
477 * ksnp_zc_req_list. */
478 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
479 LASSERT (tx->tx_zc_capable);
481 tx->tx_zc_checked = 1;
483 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
484 !conn->ksnc_zc_capable)
487 /* assign cookie and queue tx to pending list, it will be released when
488 * a matching ack is received. See ksocknal_handle_zcack() */
490 ksocknal_tx_addref(tx);
492 spin_lock(&peer_ni->ksnp_lock);
494 /* ZC_REQ is going to be pinned to the peer_ni */
495 tx->tx_deadline = ktime_get_seconds() +
496 lnet_get_lnd_timeout();
498 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
500 tx->tx_msg.ksm_zc_cookies[0] = peer_ni->ksnp_zc_next_cookie++;
502 if (peer_ni->ksnp_zc_next_cookie == 0)
503 peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
505 list_add_tail(&tx->tx_zc_list, &peer_ni->ksnp_zc_req_list);
507 spin_unlock(&peer_ni->ksnp_lock);
511 ksocknal_uncheck_zc_req(struct ksock_tx *tx)
513 struct ksock_peer_ni *peer_ni = tx->tx_conn->ksnc_peer;
515 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
516 LASSERT(tx->tx_zc_capable);
518 tx->tx_zc_checked = 0;
520 spin_lock(&peer_ni->ksnp_lock);
522 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
523 /* Not waiting for an ACK */
524 spin_unlock(&peer_ni->ksnp_lock);
528 tx->tx_msg.ksm_zc_cookies[0] = 0;
529 list_del(&tx->tx_zc_list);
531 spin_unlock(&peer_ni->ksnp_lock);
533 ksocknal_tx_decref(tx);
537 ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
538 struct kvec *scratch_iov)
541 bool error_sim = false;
543 if (lnet_send_error_simulation(tx->tx_lnetmsg, &tx->tx_hstatus)) {
549 if (tx->tx_zc_capable && !tx->tx_zc_checked)
550 ksocknal_check_zc_req(tx);
552 rc = ksocknal_transmit(conn, tx, scratch_iov);
554 CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
556 if (tx->tx_resid == 0) {
557 /* Sent everything OK */
569 counter++; /* exponential backoff warnings */
570 if ((counter & (-counter)) == counter)
571 CWARN("%u ENOMEM tx %p (%u allocated)\n",
572 counter, conn, atomic_read(&libcfs_kmemory));
574 /* Queue on ksnd_enomem_conns for retry after a timeout */
575 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
577 /* enomem list takes over scheduler's ref... */
578 LASSERT(conn->ksnc_tx_scheduled);
579 list_add_tail(&conn->ksnc_tx_list,
580 &ksocknal_data.ksnd_enomem_conns);
581 if (ktime_get_seconds() + SOCKNAL_ENOMEM_RETRY <
582 ksocknal_data.ksnd_reaper_waketime)
583 wake_up(&ksocknal_data.ksnd_reaper_waitq);
585 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
588 * set the health status of the message which determines
589 * whether we should retry the transmit
591 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
602 * set the health status of the message which determines
603 * whether we should retry the transmit
605 if (rc == -ETIMEDOUT)
606 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_TIMEOUT;
608 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
611 if (!conn->ksnc_closing) {
614 LCONSOLE_WARN("Host %pI4h reset our connection "
615 "while we were sending data; it may have "
620 LCONSOLE_WARN("There was an unexpected network error "
621 "while writing to %pI4h: %d.\n",
622 &conn->ksnc_ipaddr, rc);
625 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
626 conn, rc, libcfs_id2str(conn->ksnc_peer->ksnp_id),
627 &conn->ksnc_ipaddr, conn->ksnc_port);
630 if (tx->tx_zc_checked)
631 ksocknal_uncheck_zc_req(tx);
633 /* it's not an error if conn is being closed */
634 ksocknal_close_conn_and_siblings(conn,
635 (conn->ksnc_closing) ? 0 : rc);
641 ksocknal_launch_connection_locked(struct ksock_route *route)
644 /* called holding write lock on ksnd_global_lock */
646 LASSERT (!route->ksnr_scheduled);
647 LASSERT (!route->ksnr_connecting);
648 LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
650 route->ksnr_scheduled = 1; /* scheduling conn for connd */
651 ksocknal_route_addref(route); /* extra ref for connd */
653 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
655 list_add_tail(&route->ksnr_connd_list,
656 &ksocknal_data.ksnd_connd_routes);
657 wake_up(&ksocknal_data.ksnd_connd_waitq);
659 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
663 ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
665 struct ksock_route *route;
667 /* called holding write lock on ksnd_global_lock */
669 /* launch any/all connections that need it */
670 route = ksocknal_find_connectable_route_locked(peer_ni);
674 ksocknal_launch_connection_locked(route);
679 ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
681 struct list_head *tmp;
682 struct ksock_conn *conn;
683 struct ksock_conn *typed = NULL;
684 struct ksock_conn *fallback = NULL;
688 list_for_each(tmp, &peer_ni->ksnp_conns) {
689 struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
691 int nob = atomic_read(&c->ksnc_tx_nob) +
692 c->ksnc_sock->sk->sk_wmem_queued;
695 LASSERT (!c->ksnc_closing);
696 LASSERT (c->ksnc_proto != NULL &&
697 c->ksnc_proto->pro_match_tx != NULL);
699 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
704 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
707 case SOCKNAL_MATCH_YES: /* typed connection */
708 if (typed == NULL || tnob > nob ||
709 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
710 typed->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
716 case SOCKNAL_MATCH_MAY: /* fallback connection */
717 if (fallback == NULL || fnob > nob ||
718 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
719 fallback->ksnc_tx_last_post > c->ksnc_tx_last_post)) {
727 /* prefer the typed selection */
728 conn = (typed != NULL) ? typed : fallback;
731 conn->ksnc_tx_last_post = ktime_get_seconds();
737 ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
739 conn->ksnc_proto->pro_pack(tx);
741 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
742 ksocknal_conn_addref(conn); /* +1 ref for tx */
747 ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
749 struct ksock_sched *sched = conn->ksnc_scheduler;
750 struct ksock_msg *msg = &tx->tx_msg;
751 struct ksock_tx *ztx = NULL;
754 /* called holding global lock (read or irq-write) and caller may
755 * not have dropped this lock between finding conn and calling me,
756 * so we don't need the {get,put}connsock dance to deref
758 LASSERT(!conn->ksnc_closing);
760 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
761 libcfs_id2str(conn->ksnc_peer->ksnp_id),
762 &conn->ksnc_ipaddr, conn->ksnc_port);
764 ksocknal_tx_prep(conn, tx);
766 /* Ensure the frags we've been given EXACTLY match the number of
767 * bytes we want to send. Many TCP/IP stacks disregard any total
768 * size parameters passed to them and just look at the frags.
770 * We always expect at least 1 mapped fragment containing the
771 * complete ksocknal message header. */
772 LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
773 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
774 (unsigned int)tx->tx_nob);
775 LASSERT (tx->tx_niov >= 1);
776 LASSERT (tx->tx_resid == tx->tx_nob);
778 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
779 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
781 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
783 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
784 spin_lock_bh(&sched->kss_lock);
786 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
787 /* First packet starts the timeout */
788 conn->ksnc_tx_deadline = ktime_get_seconds() +
789 lnet_get_lnd_timeout();
790 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
791 conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
792 conn->ksnc_tx_bufnob = 0;
793 smp_mb(); /* order with adding to tx_queue */
796 if (msg->ksm_type == KSOCK_MSG_NOOP) {
797 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
798 * on a normal packet so I don't need to send it */
799 LASSERT (msg->ksm_zc_cookies[1] != 0);
800 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
802 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
803 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
806 /* It's a normal packet - can it piggback a noop zc-ack that
807 * has been queued already? */
808 LASSERT (msg->ksm_zc_cookies[1] == 0);
809 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
811 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
812 /* ztx will be released later */
816 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
817 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
820 if (conn->ksnc_tx_ready && /* able to send */
821 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
822 /* +1 ref for scheduler */
823 ksocknal_conn_addref(conn);
824 list_add_tail(&conn->ksnc_tx_list,
825 &sched->kss_tx_conns);
826 conn->ksnc_tx_scheduled = 1;
827 wake_up(&sched->kss_waitq);
830 spin_unlock_bh(&sched->kss_lock);
835 ksocknal_find_connectable_route_locked(struct ksock_peer_ni *peer_ni)
837 time64_t now = ktime_get_seconds();
838 struct list_head *tmp;
839 struct ksock_route *route;
841 list_for_each(tmp, &peer_ni->ksnp_routes) {
842 route = list_entry(tmp, struct ksock_route, ksnr_list);
844 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
846 if (route->ksnr_scheduled) /* connections being established */
849 /* all route types connected ? */
850 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
853 if (!(route->ksnr_retry_interval == 0 || /* first attempt */
854 now >= route->ksnr_timeout)) {
856 "Too soon to retry route %pI4h "
857 "(cnted %d, interval %lld, %lld secs later)\n",
859 route->ksnr_connected,
860 route->ksnr_retry_interval,
861 route->ksnr_timeout - now);
872 ksocknal_find_connecting_route_locked(struct ksock_peer_ni *peer_ni)
874 struct list_head *tmp;
875 struct ksock_route *route;
877 list_for_each(tmp, &peer_ni->ksnp_routes) {
878 route = list_entry(tmp, struct ksock_route, ksnr_list);
880 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
882 if (route->ksnr_scheduled)
890 ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
891 struct lnet_process_id id)
893 struct ksock_peer_ni *peer_ni;
894 struct ksock_conn *conn;
899 LASSERT (tx->tx_conn == NULL);
901 g_lock = &ksocknal_data.ksnd_global_lock;
903 for (retry = 0;; retry = 1) {
905 peer_ni = ksocknal_find_peer_locked(ni, id);
906 if (peer_ni != NULL) {
907 if (ksocknal_find_connectable_route_locked(peer_ni) == NULL) {
908 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
910 /* I've got no routes that need to be
911 * connecting and I do have an actual
913 ksocknal_queue_tx_locked (tx, conn);
920 /* I'll need a write lock... */
923 write_lock_bh(g_lock);
925 peer_ni = ksocknal_find_peer_locked(ni, id);
929 write_unlock_bh(g_lock);
931 if ((id.pid & LNET_PID_USERFLAG) != 0) {
932 CERROR("Refusing to create a connection to "
933 "userspace process %s\n", libcfs_id2str(id));
934 return -EHOSTUNREACH;
938 CERROR("Can't find peer_ni %s\n", libcfs_id2str(id));
939 return -EHOSTUNREACH;
942 rc = ksocknal_add_peer(ni, id,
943 LNET_NIDADDR(id.nid),
944 lnet_acceptor_port());
946 CERROR("Can't add peer_ni %s: %d\n",
947 libcfs_id2str(id), rc);
952 ksocknal_launch_all_connections_locked(peer_ni);
954 conn = ksocknal_find_conn_locked(peer_ni, tx, tx->tx_nonblk);
956 /* Connection exists; queue message on it */
957 ksocknal_queue_tx_locked (tx, conn);
958 write_unlock_bh(g_lock);
962 if (peer_ni->ksnp_accepting > 0 ||
963 ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
964 /* the message is going to be pinned to the peer_ni */
965 tx->tx_deadline = ktime_get_seconds() +
966 lnet_get_lnd_timeout();
968 /* Queue the message until a connection is established */
969 list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
970 write_unlock_bh(g_lock);
974 write_unlock_bh(g_lock);
976 /* NB Routes may be ignored if connections to them failed recently */
977 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
978 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
979 return (-EHOSTUNREACH);
983 ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
986 int type = lntmsg->msg_type;
987 struct lnet_process_id target = lntmsg->msg_target;
988 unsigned int payload_niov = lntmsg->msg_niov;
989 struct kvec *payload_iov = lntmsg->msg_iov;
990 struct bio_vec *payload_kiov = lntmsg->msg_kiov;
991 unsigned int payload_offset = lntmsg->msg_offset;
992 unsigned int payload_nob = lntmsg->msg_len;
997 /* NB 'private' is different depending on what we're sending.
998 * Just ignore it... */
1000 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
1001 payload_nob, payload_niov, libcfs_id2str(target));
1003 LASSERT (payload_nob == 0 || payload_niov > 0);
1004 LASSERT (payload_niov <= LNET_MAX_IOV);
1005 /* payload is either all vaddrs or all pages */
1006 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1007 LASSERT (!in_interrupt ());
1009 if (payload_iov != NULL)
1010 desc_size = offsetof(struct ksock_tx,
1011 tx_frags.virt.iov[1 + payload_niov]);
1013 desc_size = offsetof(struct ksock_tx,
1014 tx_frags.paged.kiov[payload_niov]);
1016 if (lntmsg->msg_vmflush)
1017 mpflag = cfs_memory_pressure_get_and_set();
1018 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
1020 CERROR("Can't allocate tx desc type %d size %d\n",
1022 if (lntmsg->msg_vmflush)
1023 cfs_memory_pressure_restore(mpflag);
1027 tx->tx_conn = NULL; /* set when assigned a conn */
1028 tx->tx_lnetmsg = lntmsg;
1030 if (payload_iov != NULL) {
1033 tx->tx_iov = tx->tx_frags.virt.iov;
1035 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
1036 payload_niov, payload_iov,
1037 payload_offset, payload_nob);
1040 tx->tx_iov = &tx->tx_frags.paged.iov;
1041 tx->tx_kiov = tx->tx_frags.paged.kiov;
1042 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
1043 payload_niov, payload_kiov,
1044 payload_offset, payload_nob);
1046 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
1047 tx->tx_zc_capable = 1;
1050 tx->tx_msg.ksm_csum = 0;
1051 tx->tx_msg.ksm_type = KSOCK_MSG_LNET;
1052 tx->tx_msg.ksm_zc_cookies[0] = 0;
1053 tx->tx_msg.ksm_zc_cookies[1] = 0;
1055 /* The first fragment will be set later in pro_pack */
1056 rc = ksocknal_launch_packet(ni, tx, target);
1058 cfs_memory_pressure_restore(mpflag);
1063 lntmsg->msg_health_status = tx->tx_hstatus;
1064 ksocknal_free_tx(tx);
1069 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1071 struct task_struct *task = kthread_run(fn, arg, name);
1074 return PTR_ERR(task);
1076 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1077 ksocknal_data.ksnd_nthreads++;
1078 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1083 ksocknal_thread_fini (void)
1085 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1086 if (--ksocknal_data.ksnd_nthreads == 0)
1087 wake_up_var(&ksocknal_data.ksnd_nthreads);
1088 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1092 ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
1094 static char ksocknal_slop_buffer[4096];
1099 LASSERT(conn->ksnc_proto != NULL);
1101 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1102 /* Remind the socket to ack eagerly... */
1103 ksocknal_lib_eager_ack(conn);
1106 if (nob_to_skip == 0) { /* right at next packet boundary now */
1107 conn->ksnc_rx_started = 0;
1108 smp_mb(); /* racing with timeout thread */
1110 switch (conn->ksnc_proto->pro_version) {
1111 case KSOCK_PROTO_V2:
1112 case KSOCK_PROTO_V3:
1113 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1114 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1115 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
1117 conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
1118 conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
1119 conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
1122 case KSOCK_PROTO_V1:
1123 /* Receiving bare struct lnet_hdr */
1124 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1125 conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr);
1126 conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
1128 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1129 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1130 conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr);
1136 conn->ksnc_rx_niov = 1;
1138 conn->ksnc_rx_kiov = NULL;
1139 conn->ksnc_rx_nkiov = 0;
1140 conn->ksnc_rx_csum = ~0;
1144 /* Set up to skip as much as possible now. If there's more left
1145 * (ran out of iov entries) we'll get called again */
1147 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1148 conn->ksnc_rx_nob_left = nob_to_skip;
1149 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1154 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
1156 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1157 conn->ksnc_rx_iov[niov].iov_len = nob;
1162 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1163 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct kvec));
1165 conn->ksnc_rx_niov = niov;
1166 conn->ksnc_rx_kiov = NULL;
1167 conn->ksnc_rx_nkiov = 0;
1168 conn->ksnc_rx_nob_wanted = skipped;
1173 ksocknal_process_receive(struct ksock_conn *conn,
1174 struct page **rx_scratch_pgs,
1175 struct kvec *scratch_iov)
1177 struct lnet_hdr *lhdr;
1178 struct lnet_process_id *id;
1181 LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
1183 /* NB: sched lock NOT held */
1184 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1185 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1186 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1187 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1188 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1190 if (conn->ksnc_rx_nob_wanted != 0) {
1191 rc = ksocknal_receive(conn, rx_scratch_pgs,
1195 struct lnet_process_id ksnp_id;
1197 ksnp_id = conn->ksnc_peer->ksnp_id;
1199 LASSERT(rc != -EAGAIN);
1201 CDEBUG(D_NET, "[%p] EOF from %s "
1202 "ip %pI4h:%d\n", conn,
1203 libcfs_id2str(ksnp_id),
1206 else if (!conn->ksnc_closing)
1207 CERROR("[%p] Error %d on read from %s "
1208 "ip %pI4h:%d\n", conn, rc,
1209 libcfs_id2str(ksnp_id),
1213 /* it's not an error if conn is being closed */
1214 ksocknal_close_conn_and_siblings (conn,
1215 (conn->ksnc_closing) ? 0 : rc);
1216 return (rc == 0 ? -ESHUTDOWN : rc);
1219 if (conn->ksnc_rx_nob_wanted != 0) {
1224 switch (conn->ksnc_rx_state) {
1225 case SOCKNAL_RX_KSM_HEADER:
1226 if (conn->ksnc_flip) {
1227 __swab32s(&conn->ksnc_msg.ksm_type);
1228 __swab32s(&conn->ksnc_msg.ksm_csum);
1229 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1230 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1233 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1234 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1235 CERROR("%s: Unknown message type: %x\n",
1236 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1237 conn->ksnc_msg.ksm_type);
1238 ksocknal_new_packet(conn, 0);
1239 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1243 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1244 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1245 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1246 /* NOOP Checksum error */
1247 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1248 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1249 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1250 ksocknal_new_packet(conn, 0);
1251 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1255 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1258 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1260 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1261 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1263 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1264 conn->ksnc_msg.ksm_zc_cookies[1]);
1267 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1268 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1269 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1270 ksocknal_new_packet(conn, 0);
1271 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1276 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1277 ksocknal_new_packet (conn, 0);
1278 return 0; /* NOOP is done and just return */
1281 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1282 conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
1283 conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
1285 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1286 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1287 conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
1289 conn->ksnc_rx_niov = 1;
1290 conn->ksnc_rx_kiov = NULL;
1291 conn->ksnc_rx_nkiov = 0;
1293 goto again; /* read lnet header now */
1295 case SOCKNAL_RX_LNET_HEADER:
1296 /* unpack message header */
1297 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1299 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1300 /* Userspace peer_ni */
1301 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1302 id = &conn->ksnc_peer->ksnp_id;
1304 /* Substitute process ID assigned at connection time */
1305 lhdr->src_pid = cpu_to_le32(id->pid);
1306 lhdr->src_nid = cpu_to_le64(id->nid);
1309 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1310 ksocknal_conn_addref(conn); /* ++ref while parsing */
1312 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1313 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1314 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1316 /* I just received garbage: give up on this conn */
1317 ksocknal_new_packet(conn, 0);
1318 ksocknal_close_conn_and_siblings (conn, rc);
1319 ksocknal_conn_decref(conn);
1323 /* I'm racing with ksocknal_recv() */
1324 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1325 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1327 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1330 /* ksocknal_recv() got called */
1333 case SOCKNAL_RX_LNET_PAYLOAD:
1334 /* payload all received */
1337 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1338 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1339 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1340 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1341 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1342 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1346 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1347 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1349 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1350 id = &conn->ksnc_peer->ksnp_id;
1352 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1353 conn->ksnc_msg.ksm_zc_cookies[0],
1354 *ksocknal_tunables.ksnd_nonblk_zcack ||
1355 le64_to_cpu(lhdr->src_nid) != id->nid);
1358 if (rc && conn->ksnc_lnet_msg)
1359 conn->ksnc_lnet_msg->msg_health_status =
1360 LNET_MSG_STATUS_REMOTE_ERROR;
1361 lnet_finalize(conn->ksnc_lnet_msg, rc);
1364 ksocknal_new_packet(conn, 0);
1365 ksocknal_close_conn_and_siblings (conn, rc);
1370 case SOCKNAL_RX_SLOP:
1371 /* starting new packet? */
1372 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1373 return 0; /* come back later */
1374 goto again; /* try to finish reading slop now */
1382 return (-EINVAL); /* keep gcc happy */
1386 ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
1387 int delayed, unsigned int niov, struct kvec *iov,
1388 struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
1391 struct ksock_conn *conn = private;
1392 struct ksock_sched *sched = conn->ksnc_scheduler;
1394 LASSERT (mlen <= rlen);
1395 LASSERT (niov <= LNET_MAX_IOV);
1397 conn->ksnc_lnet_msg = msg;
1398 conn->ksnc_rx_nob_wanted = mlen;
1399 conn->ksnc_rx_nob_left = rlen;
1401 if (mlen == 0 || iov != NULL) {
1402 conn->ksnc_rx_nkiov = 0;
1403 conn->ksnc_rx_kiov = NULL;
1404 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1405 conn->ksnc_rx_niov =
1406 lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
1407 niov, iov, offset, mlen);
1409 conn->ksnc_rx_niov = 0;
1410 conn->ksnc_rx_iov = NULL;
1411 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1412 conn->ksnc_rx_nkiov =
1413 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1414 niov, kiov, offset, mlen);
1418 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1419 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1421 LASSERT (conn->ksnc_rx_scheduled);
1423 spin_lock_bh(&sched->kss_lock);
1425 switch (conn->ksnc_rx_state) {
1426 case SOCKNAL_RX_PARSE_WAIT:
1427 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1428 wake_up(&sched->kss_waitq);
1429 LASSERT(conn->ksnc_rx_ready);
1432 case SOCKNAL_RX_PARSE:
1433 /* scheduler hasn't noticed I'm parsing yet */
1437 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1439 spin_unlock_bh(&sched->kss_lock);
1440 ksocknal_conn_decref(conn);
1445 ksocknal_sched_cansleep(struct ksock_sched *sched)
1449 spin_lock_bh(&sched->kss_lock);
1451 rc = (!ksocknal_data.ksnd_shuttingdown &&
1452 list_empty(&sched->kss_rx_conns) &&
1453 list_empty(&sched->kss_tx_conns));
1455 spin_unlock_bh(&sched->kss_lock);
1459 int ksocknal_scheduler(void *arg)
1461 struct ksock_sched *sched;
1462 struct ksock_conn *conn;
1463 struct ksock_tx *tx;
1466 long id = (long)arg;
1467 struct page **rx_scratch_pgs;
1468 struct kvec *scratch_iov;
1470 sched = ksocknal_data.ksnd_schedulers[KSOCK_THREAD_CPT(id)];
1472 LIBCFS_CPT_ALLOC(rx_scratch_pgs, lnet_cpt_table(), sched->kss_cpt,
1473 sizeof(*rx_scratch_pgs) * LNET_MAX_IOV);
1474 if (!rx_scratch_pgs) {
1475 CERROR("Unable to allocate scratch pages\n");
1479 LIBCFS_CPT_ALLOC(scratch_iov, lnet_cpt_table(), sched->kss_cpt,
1480 sizeof(*scratch_iov) * LNET_MAX_IOV);
1482 CERROR("Unable to allocate scratch iov\n");
1486 rc = cfs_cpt_bind(lnet_cpt_table(), sched->kss_cpt);
1488 CWARN("Can't set CPU partition affinity to %d: %d\n",
1489 sched->kss_cpt, rc);
1492 spin_lock_bh(&sched->kss_lock);
1494 while (!ksocknal_data.ksnd_shuttingdown) {
1495 int did_something = 0;
1497 /* Ensure I progress everything semi-fairly */
1499 if (!list_empty(&sched->kss_rx_conns)) {
1500 conn = list_entry(sched->kss_rx_conns.next,
1501 struct ksock_conn, ksnc_rx_list);
1502 list_del(&conn->ksnc_rx_list);
1504 LASSERT(conn->ksnc_rx_scheduled);
1505 LASSERT(conn->ksnc_rx_ready);
1507 /* clear rx_ready in case receive isn't complete.
1508 * Do it BEFORE we call process_recv, since
1509 * data_ready can set it any time after we release
1511 conn->ksnc_rx_ready = 0;
1512 spin_unlock_bh(&sched->kss_lock);
1514 rc = ksocknal_process_receive(conn, rx_scratch_pgs,
1517 spin_lock_bh(&sched->kss_lock);
1519 /* I'm the only one that can clear this flag */
1520 LASSERT(conn->ksnc_rx_scheduled);
1522 /* Did process_receive get everything it wanted? */
1524 conn->ksnc_rx_ready = 1;
1526 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1527 /* Conn blocked waiting for ksocknal_recv()
1528 * I change its state (under lock) to signal
1529 * it can be rescheduled */
1530 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1531 } else if (conn->ksnc_rx_ready) {
1532 /* reschedule for rx */
1533 list_add_tail(&conn->ksnc_rx_list,
1534 &sched->kss_rx_conns);
1536 conn->ksnc_rx_scheduled = 0;
1538 ksocknal_conn_decref(conn);
1544 if (!list_empty(&sched->kss_tx_conns)) {
1547 list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
1549 conn = list_entry(sched->kss_tx_conns.next,
1550 struct ksock_conn, ksnc_tx_list);
1551 list_del(&conn->ksnc_tx_list);
1553 LASSERT(conn->ksnc_tx_scheduled);
1554 LASSERT(conn->ksnc_tx_ready);
1555 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1557 tx = list_entry(conn->ksnc_tx_queue.next,
1558 struct ksock_tx, tx_list);
1560 if (conn->ksnc_tx_carrier == tx)
1561 ksocknal_next_tx_carrier(conn);
1563 /* dequeue now so empty list => more to send */
1564 list_del(&tx->tx_list);
1566 /* Clear tx_ready in case send isn't complete. Do
1567 * it BEFORE we call process_transmit, since
1568 * write_space can set it any time after we release
1570 conn->ksnc_tx_ready = 0;
1571 spin_unlock_bh(&sched->kss_lock);
1573 if (!list_empty(&zlist)) {
1574 /* free zombie noop txs, it's fast because
1575 * noop txs are just put in freelist */
1576 ksocknal_txlist_done(NULL, &zlist, 0);
1579 rc = ksocknal_process_transmit(conn, tx, scratch_iov);
1581 if (rc == -ENOMEM || rc == -EAGAIN) {
1582 /* Incomplete send: replace tx on HEAD of tx_queue */
1583 spin_lock_bh(&sched->kss_lock);
1584 list_add(&tx->tx_list,
1585 &conn->ksnc_tx_queue);
1587 /* Complete send; tx -ref */
1588 ksocknal_tx_decref(tx);
1590 spin_lock_bh(&sched->kss_lock);
1591 /* assume space for more */
1592 conn->ksnc_tx_ready = 1;
1595 if (rc == -ENOMEM) {
1596 /* Do nothing; after a short timeout, this
1597 * conn will be reposted on kss_tx_conns. */
1598 } else if (conn->ksnc_tx_ready &&
1599 !list_empty(&conn->ksnc_tx_queue)) {
1600 /* reschedule for tx */
1601 list_add_tail(&conn->ksnc_tx_list,
1602 &sched->kss_tx_conns);
1604 conn->ksnc_tx_scheduled = 0;
1606 ksocknal_conn_decref(conn);
1611 if (!did_something || /* nothing to do */
1612 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1613 spin_unlock_bh(&sched->kss_lock);
1617 if (!did_something) { /* wait for something to do */
1618 rc = wait_event_interruptible_exclusive(
1620 !ksocknal_sched_cansleep(sched));
1626 spin_lock_bh(&sched->kss_lock);
1630 spin_unlock_bh(&sched->kss_lock);
1631 CFS_FREE_PTR_ARRAY(rx_scratch_pgs, LNET_MAX_IOV);
1632 CFS_FREE_PTR_ARRAY(scratch_iov, LNET_MAX_IOV);
1633 ksocknal_thread_fini();
1638 * Add connection to kss_rx_conns of scheduler
1639 * and wakeup the scheduler.
1641 void ksocknal_read_callback(struct ksock_conn *conn)
1643 struct ksock_sched *sched;
1646 sched = conn->ksnc_scheduler;
1648 spin_lock_bh(&sched->kss_lock);
1650 conn->ksnc_rx_ready = 1;
1652 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1653 list_add_tail(&conn->ksnc_rx_list,
1654 &sched->kss_rx_conns);
1655 conn->ksnc_rx_scheduled = 1;
1656 /* extra ref for scheduler */
1657 ksocknal_conn_addref(conn);
1659 wake_up (&sched->kss_waitq);
1661 spin_unlock_bh(&sched->kss_lock);
1667 * Add connection to kss_tx_conns of scheduler
1668 * and wakeup the scheduler.
1670 void ksocknal_write_callback(struct ksock_conn *conn)
1672 struct ksock_sched *sched;
1675 sched = conn->ksnc_scheduler;
1677 spin_lock_bh(&sched->kss_lock);
1679 conn->ksnc_tx_ready = 1;
1681 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1682 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1683 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
1684 conn->ksnc_tx_scheduled = 1;
1685 /* extra ref for scheduler */
1686 ksocknal_conn_addref(conn);
1688 wake_up(&sched->kss_waitq);
1691 spin_unlock_bh(&sched->kss_lock);
1696 static const struct ksock_proto *
1697 ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
1701 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1702 version = hello->kshm_version;
1703 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1704 version = __swab32(hello->kshm_version);
1707 #if SOCKNAL_VERSION_DEBUG
1708 if (*ksocknal_tunables.ksnd_protocol == 1)
1711 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1712 version == KSOCK_PROTO_V3)
1715 if (version == KSOCK_PROTO_V2)
1716 return &ksocknal_protocol_v2x;
1718 if (version == KSOCK_PROTO_V3)
1719 return &ksocknal_protocol_v3x;
1724 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1725 struct lnet_magicversion *hmv;
1727 BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
1728 offsetof(struct ksock_hello_msg, kshm_src_nid));
1730 hmv = (struct lnet_magicversion *)hello;
1732 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1733 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1734 return &ksocknal_protocol_v1x;
1741 ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1742 lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
1744 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1745 struct ksock_net *net = (struct ksock_net *)ni->ni_data;
1747 LASSERT(hello->kshm_nips <= LNET_INTERFACES_NUM);
1749 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1750 LASSERT(conn->ksnc_proto != NULL);
1752 hello->kshm_src_nid = ni->ni_nid;
1753 hello->kshm_dst_nid = peer_nid;
1754 hello->kshm_src_pid = the_lnet.ln_pid;
1756 hello->kshm_src_incarnation = net->ksnn_incarnation;
1757 hello->kshm_ctype = conn->ksnc_type;
1759 return conn->ksnc_proto->pro_send_hello(conn, hello);
1763 ksocknal_invert_type(int type)
1767 case SOCKLND_CONN_ANY:
1768 case SOCKLND_CONN_CONTROL:
1770 case SOCKLND_CONN_BULK_IN:
1771 return SOCKLND_CONN_BULK_OUT;
1772 case SOCKLND_CONN_BULK_OUT:
1773 return SOCKLND_CONN_BULK_IN;
1775 return (SOCKLND_CONN_NONE);
1780 ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
1781 struct ksock_hello_msg *hello,
1782 struct lnet_process_id *peerid,
1785 /* Return < 0 fatal error
1787 * EALREADY lost connection race
1788 * EPROTO protocol version mismatch
1790 struct socket *sock = conn->ksnc_sock;
1791 int active = (conn->ksnc_proto != NULL);
1795 const struct ksock_proto *proto;
1796 struct lnet_process_id recv_id;
1798 /* socket type set on active connections - not set on passive */
1799 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1801 timeout = active ? lnet_get_lnd_timeout() :
1802 lnet_acceptor_timeout();
1804 rc = lnet_sock_read(sock, &hello->kshm_magic,
1805 sizeof(hello->kshm_magic), timeout);
1807 CERROR("Error %d reading HELLO from %pI4h\n",
1808 rc, &conn->ksnc_ipaddr);
1813 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1814 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1815 hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
1816 /* Unexpected magic! */
1817 CERROR ("Bad magic(1) %#08x (%#08x expected) from "
1818 "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
1819 LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr);
1823 rc = lnet_sock_read(sock, &hello->kshm_version,
1824 sizeof(hello->kshm_version), timeout);
1826 CERROR("Error %d reading HELLO from %pI4h\n",
1827 rc, &conn->ksnc_ipaddr);
1832 proto = ksocknal_parse_proto_version(hello);
1833 if (proto == NULL) {
1835 /* unknown protocol from peer_ni, tell peer_ni my protocol */
1836 conn->ksnc_proto = &ksocknal_protocol_v3x;
1837 #if SOCKNAL_VERSION_DEBUG
1838 if (*ksocknal_tunables.ksnd_protocol == 2)
1839 conn->ksnc_proto = &ksocknal_protocol_v2x;
1840 else if (*ksocknal_tunables.ksnd_protocol == 1)
1841 conn->ksnc_proto = &ksocknal_protocol_v1x;
1843 hello->kshm_nips = 0;
1844 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1847 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1848 conn->ksnc_proto->pro_version, &conn->ksnc_ipaddr);
1853 proto_match = (conn->ksnc_proto == proto);
1854 conn->ksnc_proto = proto;
1856 /* receive the rest of hello message anyway */
1857 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1859 CERROR("Error %d reading or checking hello from from %pI4h\n",
1860 rc, &conn->ksnc_ipaddr);
1865 *incarnation = hello->kshm_src_incarnation;
1867 if (hello->kshm_src_nid == LNET_NID_ANY) {
1868 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
1869 &conn->ksnc_ipaddr);
1874 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1875 /* Userspace NAL assigns peer_ni process ID from socket */
1876 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1877 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1879 recv_id.nid = hello->kshm_src_nid;
1880 recv_id.pid = hello->kshm_src_pid;
1886 /* peer_ni determines type */
1887 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1888 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1889 CERROR("Unexpected type %d from %s ip %pI4h\n",
1890 hello->kshm_ctype, libcfs_id2str(*peerid),
1891 &conn->ksnc_ipaddr);
1897 if (peerid->pid != recv_id.pid ||
1898 peerid->nid != recv_id.nid) {
1899 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
1900 " %pI4h, but they claimed they were "
1901 "%s; please check your Lustre "
1903 libcfs_id2str(*peerid),
1905 libcfs_id2str(recv_id));
1909 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1910 /* Possible protocol mismatch or I lost the connection race */
1911 return proto_match ? EALREADY : EPROTO;
1914 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1915 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
1916 conn->ksnc_type, libcfs_id2str(*peerid),
1925 ksocknal_connect(struct ksock_route *route)
1928 struct ksock_peer_ni *peer_ni = route->ksnr_peer;
1931 struct socket *sock;
1933 int retry_later = 0;
1936 deadline = ktime_get_seconds() + lnet_get_lnd_timeout();
1938 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1940 LASSERT (route->ksnr_scheduled);
1941 LASSERT (!route->ksnr_connecting);
1943 route->ksnr_connecting = 1;
1946 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1948 /* stop connecting if peer_ni/route got closed under me, or
1949 * route got connected while queued */
1950 if (peer_ni->ksnp_closing || route->ksnr_deleted ||
1956 /* reschedule if peer_ni is connecting to me */
1957 if (peer_ni->ksnp_accepting > 0) {
1959 "peer_ni %s(%d) already connecting to me, retry later.\n",
1960 libcfs_nid2str(peer_ni->ksnp_id.nid), peer_ni->ksnp_accepting);
1964 if (retry_later) /* needs reschedule */
1967 if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
1968 type = SOCKLND_CONN_ANY;
1969 } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
1970 type = SOCKLND_CONN_CONTROL;
1971 } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
1972 type = SOCKLND_CONN_BULK_IN;
1974 LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
1975 type = SOCKLND_CONN_BULK_OUT;
1978 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1980 if (ktime_get_seconds() >= deadline) {
1982 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
1988 sock = lnet_connect(peer_ni->ksnp_id.nid,
1989 route->ksnr_myipaddr,
1990 route->ksnr_ipaddr, route->ksnr_port,
1991 peer_ni->ksnp_ni->ni_net_ns);
1997 rc = ksocknal_create_conn(peer_ni->ksnp_ni, route, sock, type);
1999 lnet_connect_console_error(rc, peer_ni->ksnp_id.nid,
2005 /* A +ve RC means I have to retry because I lost the connection
2006 * race or I have to renegotiate protocol version */
2007 retry_later = (rc != 0);
2009 CDEBUG(D_NET, "peer_ni %s: conn race, retry later.\n",
2010 libcfs_nid2str(peer_ni->ksnp_id.nid));
2012 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2015 route->ksnr_scheduled = 0;
2016 route->ksnr_connecting = 0;
2019 /* re-queue for attention; this frees me up to handle
2020 * the peer_ni's incoming connection request */
2022 if (rc == EALREADY ||
2023 (rc == 0 && peer_ni->ksnp_accepting > 0)) {
2024 /* We want to introduce a delay before next
2025 * attempt to connect if we lost conn race,
2026 * but the race is resolved quickly usually,
2027 * so min_reconnectms should be good heuristic */
2028 route->ksnr_retry_interval = *ksocknal_tunables.ksnd_min_reconnectms / 1000;
2029 route->ksnr_timeout = ktime_get_seconds() +
2030 route->ksnr_retry_interval;
2033 ksocknal_launch_connection_locked(route);
2036 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2040 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2042 route->ksnr_scheduled = 0;
2043 route->ksnr_connecting = 0;
2045 /* This is a retry rather than a new connection */
2046 route->ksnr_retry_interval *= 2;
2047 route->ksnr_retry_interval =
2048 max_t(time64_t, route->ksnr_retry_interval,
2049 *ksocknal_tunables.ksnd_min_reconnectms / 1000);
2050 route->ksnr_retry_interval =
2051 min_t(time64_t, route->ksnr_retry_interval,
2052 *ksocknal_tunables.ksnd_max_reconnectms / 1000);
2054 LASSERT(route->ksnr_retry_interval);
2055 route->ksnr_timeout = ktime_get_seconds() + route->ksnr_retry_interval;
2057 if (!list_empty(&peer_ni->ksnp_tx_queue) &&
2058 peer_ni->ksnp_accepting == 0 &&
2059 ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
2060 struct ksock_conn *conn;
2062 /* ksnp_tx_queue is queued on a conn on successful
2063 * connection for V1.x and V2.x */
2064 if (!list_empty(&peer_ni->ksnp_conns)) {
2065 conn = list_entry(peer_ni->ksnp_conns.next,
2066 struct ksock_conn, ksnc_list);
2067 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
2070 /* take all the blocked packets while I've got the lock and
2071 * complete below... */
2072 list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
2075 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2077 ksocknal_peer_failed(peer_ni);
2078 ksocknal_txlist_done(peer_ni->ksnp_ni, &zombies, rc);
2083 * check whether we need to create more connds.
2084 * It will try to create new thread if it's necessary, @timeout can
2085 * be updated if failed to create, so caller wouldn't keep try while
2086 * running out of resource.
2089 ksocknal_connd_check_start(time64_t sec, long *timeout)
2093 int total = ksocknal_data.ksnd_connd_starting +
2094 ksocknal_data.ksnd_connd_running;
2096 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2097 /* still in initializing */
2101 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2102 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2103 /* can't create more connd, or still have enough
2104 * threads to handle more connecting */
2108 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2109 /* no pending connecting request */
2113 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2114 /* may run out of resource, retry later */
2115 *timeout = cfs_time_seconds(1);
2119 if (ksocknal_data.ksnd_connd_starting > 0) {
2120 /* serialize starting to avoid flood */
2124 ksocknal_data.ksnd_connd_starting_stamp = sec;
2125 ksocknal_data.ksnd_connd_starting++;
2126 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2128 /* NB: total is the next id */
2129 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2130 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2132 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2137 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2138 ksocknal_data.ksnd_connd_starting--;
2139 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2145 * check whether current thread can exit, it will return 1 if there are too
2146 * many threads and no creating in past 120 seconds.
2147 * Also, this function may update @timeout to make caller come back
2148 * again to recheck these conditions.
2151 ksocknal_connd_check_stop(time64_t sec, long *timeout)
2155 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2156 /* still in initializing */
2160 if (ksocknal_data.ksnd_connd_starting > 0) {
2161 /* in progress of starting new thread */
2165 if (ksocknal_data.ksnd_connd_running <=
2166 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2170 /* created thread in past 120 seconds? */
2171 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2172 SOCKNAL_CONND_TIMEOUT - sec);
2174 *timeout = (val > 0) ? cfs_time_seconds(val) :
2175 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2179 /* no creating in past 120 seconds */
2181 return ksocknal_data.ksnd_connd_running >
2182 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2185 /* Go through connd_routes queue looking for a route that we can process
2186 * right now, @timeout_p can be updated if we need to come back later */
2187 static struct ksock_route *
2188 ksocknal_connd_get_route_locked(signed long *timeout_p)
2190 time64_t now = ktime_get_seconds();
2191 struct ksock_route *route;
2193 /* connd_routes can contain both pending and ordinary routes */
2194 list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2197 if (route->ksnr_retry_interval == 0 ||
2198 now >= route->ksnr_timeout)
2201 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2202 *timeout_p > cfs_time_seconds(route->ksnr_timeout - now))
2203 *timeout_p = cfs_time_seconds(route->ksnr_timeout - now);
2210 ksocknal_connd(void *arg)
2212 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2213 struct ksock_connreq *cr;
2214 wait_queue_entry_t wait;
2218 init_waitqueue_entry(&wait, current);
2220 spin_lock_bh(connd_lock);
2222 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2223 ksocknal_data.ksnd_connd_starting--;
2224 ksocknal_data.ksnd_connd_running++;
2226 while (!ksocknal_data.ksnd_shuttingdown) {
2227 struct ksock_route *route = NULL;
2228 time64_t sec = ktime_get_real_seconds();
2229 long timeout = MAX_SCHEDULE_TIMEOUT;
2230 int dropped_lock = 0;
2232 if (ksocknal_connd_check_stop(sec, &timeout)) {
2233 /* wakeup another one to check stop */
2234 wake_up(&ksocknal_data.ksnd_connd_waitq);
2238 if (ksocknal_connd_check_start(sec, &timeout)) {
2239 /* created new thread */
2243 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2244 /* Connection accepted by the listener */
2245 cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
2246 struct ksock_connreq, ksncr_list);
2248 list_del(&cr->ksncr_list);
2249 spin_unlock_bh(connd_lock);
2252 ksocknal_create_conn(cr->ksncr_ni, NULL,
2253 cr->ksncr_sock, SOCKLND_CONN_NONE);
2254 lnet_ni_decref(cr->ksncr_ni);
2255 LIBCFS_FREE(cr, sizeof(*cr));
2257 spin_lock_bh(connd_lock);
2260 /* Only handle an outgoing connection request if there
2261 * is a thread left to handle incoming connections and
2262 * create new connd */
2263 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2264 ksocknal_data.ksnd_connd_running) {
2265 route = ksocknal_connd_get_route_locked(&timeout);
2267 if (route != NULL) {
2268 list_del(&route->ksnr_connd_list);
2269 ksocknal_data.ksnd_connd_connecting++;
2270 spin_unlock_bh(connd_lock);
2273 if (ksocknal_connect(route)) {
2274 /* consecutive retry */
2275 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2276 CWARN("massive consecutive "
2277 "re-connecting to %pI4h\n",
2278 &route->ksnr_ipaddr);
2285 ksocknal_route_decref(route);
2287 spin_lock_bh(connd_lock);
2288 ksocknal_data.ksnd_connd_connecting--;
2292 if (++nloops < SOCKNAL_RESCHED)
2294 spin_unlock_bh(connd_lock);
2297 spin_lock_bh(connd_lock);
2301 /* Nothing to do for 'timeout' */
2302 set_current_state(TASK_INTERRUPTIBLE);
2303 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2304 spin_unlock_bh(connd_lock);
2307 schedule_timeout(timeout);
2309 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2310 spin_lock_bh(connd_lock);
2312 ksocknal_data.ksnd_connd_running--;
2313 spin_unlock_bh(connd_lock);
2315 ksocknal_thread_fini();
2319 static struct ksock_conn *
2320 ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
2322 /* We're called with a shared lock on ksnd_global_lock */
2323 struct ksock_conn *conn;
2324 struct list_head *ctmp;
2325 struct ksock_tx *tx;
2327 list_for_each(ctmp, &peer_ni->ksnp_conns) {
2330 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
2332 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2333 LASSERT (!conn->ksnc_closing);
2335 error = conn->ksnc_sock->sk->sk_err;
2337 ksocknal_conn_addref(conn);
2341 CNETERR("A connection with %s "
2342 "(%pI4h:%d) was reset; "
2343 "it may have rebooted.\n",
2344 libcfs_id2str(peer_ni->ksnp_id),
2349 CNETERR("A connection with %s "
2350 "(%pI4h:%d) timed out; the "
2351 "network or node may be down.\n",
2352 libcfs_id2str(peer_ni->ksnp_id),
2357 CNETERR("An unexpected network error %d "
2359 "(%pI4h:%d\n", error,
2360 libcfs_id2str(peer_ni->ksnp_id),
2369 if (conn->ksnc_rx_started &&
2370 ktime_get_seconds() >= conn->ksnc_rx_deadline) {
2371 /* Timed out incomplete incoming message */
2372 ksocknal_conn_addref(conn);
2373 CNETERR("Timeout receiving from %s (%pI4h:%d), "
2374 "state %d wanted %d left %d\n",
2375 libcfs_id2str(peer_ni->ksnp_id),
2378 conn->ksnc_rx_state,
2379 conn->ksnc_rx_nob_wanted,
2380 conn->ksnc_rx_nob_left);
2384 if ((!list_empty(&conn->ksnc_tx_queue) ||
2385 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2386 ktime_get_seconds() >= conn->ksnc_tx_deadline) {
2387 /* Timed out messages queued for sending or
2388 * buffered in the socket's send buffer */
2389 ksocknal_conn_addref(conn);
2390 list_for_each_entry(tx, &conn->ksnc_tx_queue,
2393 LNET_MSG_STATUS_LOCAL_TIMEOUT;
2394 CNETERR("Timeout sending data to %s (%pI4h:%d) "
2395 "the network or that node may be down.\n",
2396 libcfs_id2str(peer_ni->ksnp_id),
2397 &conn->ksnc_ipaddr, conn->ksnc_port);
2406 ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
2408 struct ksock_tx *tx;
2409 LIST_HEAD(stale_txs);
2411 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2413 while (!list_empty(&peer_ni->ksnp_tx_queue)) {
2414 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2415 struct ksock_tx, tx_list);
2417 if (ktime_get_seconds() < tx->tx_deadline)
2420 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2422 list_move_tail(&tx->tx_list, &stale_txs);
2425 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2427 ksocknal_txlist_done(peer_ni->ksnp_ni, &stale_txs, -ETIMEDOUT);
2431 ksocknal_send_keepalive_locked(struct ksock_peer_ni *peer_ni)
2432 __must_hold(&ksocknal_data.ksnd_global_lock)
2434 struct ksock_sched *sched;
2435 struct ksock_conn *conn;
2436 struct ksock_tx *tx;
2438 /* last_alive will be updated by create_conn */
2439 if (list_empty(&peer_ni->ksnp_conns))
2442 if (peer_ni->ksnp_proto != &ksocknal_protocol_v3x)
2445 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2446 ktime_get_seconds() < peer_ni->ksnp_last_alive +
2447 *ksocknal_tunables.ksnd_keepalive)
2450 if (ktime_get_seconds() < peer_ni->ksnp_send_keepalive)
2453 /* retry 10 secs later, so we wouldn't put pressure
2454 * on this peer_ni if we failed to send keepalive this time */
2455 peer_ni->ksnp_send_keepalive = ktime_get_seconds() + 10;
2457 conn = ksocknal_find_conn_locked(peer_ni, NULL, 1);
2459 sched = conn->ksnc_scheduler;
2461 spin_lock_bh(&sched->kss_lock);
2462 if (!list_empty(&conn->ksnc_tx_queue)) {
2463 spin_unlock_bh(&sched->kss_lock);
2464 /* there is an queued ACK, don't need keepalive */
2468 spin_unlock_bh(&sched->kss_lock);
2471 read_unlock(&ksocknal_data.ksnd_global_lock);
2473 /* cookie = 1 is reserved for keepalive PING */
2474 tx = ksocknal_alloc_tx_noop(1, 1);
2476 read_lock(&ksocknal_data.ksnd_global_lock);
2480 if (ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id) == 0) {
2481 read_lock(&ksocknal_data.ksnd_global_lock);
2485 ksocknal_free_tx(tx);
2486 read_lock(&ksocknal_data.ksnd_global_lock);
2493 ksocknal_check_peer_timeouts(int idx)
2495 struct hlist_head *peers = &ksocknal_data.ksnd_peers[idx];
2496 struct ksock_peer_ni *peer_ni;
2497 struct ksock_conn *conn;
2498 struct ksock_tx *tx;
2501 /* NB. We expect to have a look at all the peers and not find any
2502 * connections to time out, so we just use a shared lock while we
2505 read_lock(&ksocknal_data.ksnd_global_lock);
2507 hlist_for_each_entry(peer_ni, peers, ksnp_list) {
2508 struct ksock_tx *tx_stale;
2509 time64_t deadline = 0;
2513 if (ksocknal_send_keepalive_locked(peer_ni) != 0) {
2514 read_unlock(&ksocknal_data.ksnd_global_lock);
2518 conn = ksocknal_find_timed_out_conn(peer_ni);
2521 read_unlock(&ksocknal_data.ksnd_global_lock);
2523 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
2525 /* NB we won't find this one again, but we can't
2526 * just proceed with the next peer_ni, since we dropped
2527 * ksnd_global_lock and it might be dead already!
2529 ksocknal_conn_decref(conn);
2533 /* we can't process stale txs right here because we're
2534 * holding only shared lock
2536 if (!list_empty(&peer_ni->ksnp_tx_queue)) {
2537 struct ksock_tx *tx;
2539 tx = list_entry(peer_ni->ksnp_tx_queue.next,
2540 struct ksock_tx, tx_list);
2541 if (ktime_get_seconds() >= tx->tx_deadline) {
2542 ksocknal_peer_addref(peer_ni);
2543 read_unlock(&ksocknal_data.ksnd_global_lock);
2545 ksocknal_flush_stale_txs(peer_ni);
2547 ksocknal_peer_decref(peer_ni);
2552 if (list_empty(&peer_ni->ksnp_zc_req_list))
2556 spin_lock(&peer_ni->ksnp_lock);
2557 list_for_each_entry(tx, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
2558 if (ktime_get_seconds() < tx->tx_deadline)
2560 /* ignore the TX if connection is being closed */
2561 if (tx->tx_conn->ksnc_closing)
2564 if (tx_stale == NULL)
2568 if (tx_stale == NULL) {
2569 spin_unlock(&peer_ni->ksnp_lock);
2573 deadline = tx_stale->tx_deadline;
2574 resid = tx_stale->tx_resid;
2575 conn = tx_stale->tx_conn;
2576 ksocknal_conn_addref(conn);
2578 spin_unlock(&peer_ni->ksnp_lock);
2579 read_unlock(&ksocknal_data.ksnd_global_lock);
2581 CERROR("Total %d stale ZC_REQs for peer_ni %s detected; the "
2582 "oldest(%p) timed out %lld secs ago, "
2583 "resid: %d, wmem: %d\n",
2584 n, libcfs_nid2str(peer_ni->ksnp_id.nid), tx_stale,
2585 ktime_get_seconds() - deadline,
2586 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2588 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2589 ksocknal_conn_decref(conn);
2593 read_unlock(&ksocknal_data.ksnd_global_lock);
2596 int ksocknal_reaper(void *arg)
2598 wait_queue_entry_t wait;
2599 struct ksock_conn *conn;
2600 struct ksock_sched *sched;
2601 LIST_HEAD(enomem_conns);
2606 time64_t deadline = ktime_get_seconds();
2608 init_waitqueue_entry(&wait, current);
2610 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2612 while (!ksocknal_data.ksnd_shuttingdown) {
2613 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2614 conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
2615 struct ksock_conn, ksnc_list);
2616 list_del(&conn->ksnc_list);
2618 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2620 ksocknal_terminate_conn(conn);
2621 ksocknal_conn_decref(conn);
2623 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2627 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2628 conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
2629 struct ksock_conn, ksnc_list);
2630 list_del(&conn->ksnc_list);
2632 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2634 ksocknal_destroy_conn(conn);
2636 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2640 list_splice_init(&ksocknal_data.ksnd_enomem_conns,
2643 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2645 /* reschedule all the connections that stalled with ENOMEM... */
2647 while (!list_empty(&enomem_conns)) {
2648 conn = list_entry(enomem_conns.next,
2649 struct ksock_conn, ksnc_tx_list);
2650 list_del(&conn->ksnc_tx_list);
2652 sched = conn->ksnc_scheduler;
2654 spin_lock_bh(&sched->kss_lock);
2656 LASSERT(conn->ksnc_tx_scheduled);
2657 conn->ksnc_tx_ready = 1;
2658 list_add_tail(&conn->ksnc_tx_list,
2659 &sched->kss_tx_conns);
2660 wake_up(&sched->kss_waitq);
2662 spin_unlock_bh(&sched->kss_lock);
2666 /* careful with the jiffy wrap... */
2667 while ((timeout = deadline - ktime_get_seconds()) <= 0) {
2670 int chunk = HASH_SIZE(ksocknal_data.ksnd_peers);
2671 unsigned int lnd_timeout;
2673 /* Time to check for timeouts on a few more peers: I
2674 * do checks every 'p' seconds on a proportion of the
2675 * peer_ni table and I need to check every connection
2676 * 'n' times within a timeout interval, to ensure I
2677 * detect a timeout on any connection within (n+1)/n
2678 * times the timeout interval.
2681 lnd_timeout = lnet_get_lnd_timeout();
2682 if (lnd_timeout > n * p)
2683 chunk = (chunk * n * p) / lnd_timeout;
2687 for (i = 0; i < chunk; i++) {
2688 ksocknal_check_peer_timeouts(peer_index);
2689 peer_index = (peer_index + 1) %
2690 HASH_SIZE(ksocknal_data.ksnd_peers);
2696 if (nenomem_conns != 0) {
2697 /* Reduce my timeout if I rescheduled ENOMEM conns.
2698 * This also prevents me getting woken immediately
2699 * if any go back on my enomem list. */
2700 timeout = SOCKNAL_ENOMEM_RETRY;
2702 ksocknal_data.ksnd_reaper_waketime = ktime_get_seconds() +
2705 set_current_state(TASK_INTERRUPTIBLE);
2706 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2708 if (!ksocknal_data.ksnd_shuttingdown &&
2709 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2710 list_empty(&ksocknal_data.ksnd_zombie_conns))
2711 schedule_timeout(cfs_time_seconds(timeout));
2713 set_current_state(TASK_RUNNING);
2714 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
2716 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2719 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2721 ksocknal_thread_fini();